code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
# This script merges and consolidates the beacons and sessions datasets into 1 dataset
from pandarallel import pandarallel
import pandas as pd
import datetime
import sys
import os
pandarallel.initialize(progress_bar=False, nb_workers=4)
base_path = os.path.dirname(os.path.realpath(__file__))
def df_info(df):
"""
Function to get information about a dataframe
"""
col_name_list = list(df.columns)
col_type_list = [type(col) for col in df.iloc[0, :]]
col_null_count_list = [df[col].isnull().sum() for col in col_name_list]
col_unique_count_list = [df[col].nunique() for col in col_name_list]
col_memory_usage_list = [df[col].memory_usage(deep=True) for col in col_name_list]
df_total_memory_usage = sum(col_memory_usage_list) / 1048576
return pd.DataFrame({'col_name': col_name_list, 'col_type': col_type_list, 'null_count': col_null_count_list, 'nunique': col_unique_count_list}), df_total_memory_usage
######################
# Due to memory constraint, we split this script using an argument
# We are simply going to check if an argument was supplied or not
# If not supplied, we shall execute processing stages 1, 2 & 3
# If supplied, we shall execute only stage 4
if len(sys.argv) < 2:
# Reading the dataset b_3m and printing its basic summary
print('\n{}\tReading raw data: b_3m.csv ...'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
in_filename = '../data/sanitized/subset/b_3m.csv'
df_b = pd.read_csv(os.path.join(base_path, in_filename))
df_b_info = df_info(df_b)
print('\n{}\t"b_3m" dataset summary:'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
print('\t{} rows x {} columns | {:.2f} MB approx memory usage'.format(df_b.shape[0], df_b.shape[1], df_b_info[1]))
print(df_b_info[0].to_string())
print('\n"b_3m" dataset head:')
print(df_b.head().to_string())
# Reading the dataset s and printing its basic summary
print('\n{}\tReading raw data: s.csv ...'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
in_filename = '../data/sanitized/s.csv'
df_s = pd.read_csv(os.path.join(base_path, in_filename))
df_s_info = df_info(df_s)
print('\n{}\t"s" dataset summary:'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
print('\t{} rows x {} columns | {:.2f} MB approx memory usage'.format(df_s.shape[0], df_s.shape[1], df_s_info[1]))
print(df_s_info[0].to_string())
print('\n"s" dataset head:')
print(df_s.head().to_string())
######################
# Dropping null values and converting column types for dataset b
print('\n{}\tProcessing stage 1: "b_3m" dataset: dropping rows with na, converting column types ...'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
df_b.dropna(inplace=True)
df_b.uuid = df_b.uuid.parallel_apply(lambda x: str(int(x)))
df_b.beacon_value = df_b.beacon_value.parallel_apply(lambda x: int(x))
df_b_info = df_info(df_b)
print('\n{}\t"b_3m" dataset summary:'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
print('\t{} rows x {} columns | {:.2f} MB approx memory usage'.format(df_b.shape[0], df_b.shape[1], df_b_info[1]))
print(df_b_info[0].to_string())
print('\n"b_3m" dataset head:')
print(df_b.head().to_string())
# Dropping null values and converting column types for dataset s
print('\n{}\tProcessing stage 1: "s" dataset: dropping rows with na, dropping columns, converting column types ...'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
df_s.dropna(inplace=True)
df_s.drop(columns=['status'], inplace=True)
df_s.uuid = df_s.uuid.parallel_apply(lambda x: str(int(x)))
df_s.phone = df_s.phone.parallel_apply(lambda x: str(int(x)))
df_s.email = df_s.email.parallel_apply(lambda x: str(int(x)))
df_s.log_date = df_s.log_date.parallel_apply(lambda x: datetime.datetime.strptime(x[:10], '%Y-%m-%d').date())
df_s_info = df_info(df_s)
print('\n{}\t"s" dataset summary:'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
print('\t{} rows x {} columns | {:.2f} MB approx memory usage'.format(df_s.shape[0], df_s.shape[1], df_s_info[1]))
print(df_s_info[0].to_string())
print('\n"s" dataset head:')
print(df_s.head().to_string())
######################
print('\n{}\tProcessing stage 2: consolidating "b_3m" dataset by date and uuid ...'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
# Preparing to consolidate the dataset b_3m
df_b_gb_date_uuid = df_b.groupby(['log_date', 'uuid'])
df_b_cb_date_uuid = pd.DataFrame(df_b_gb_date_uuid.groups.keys(), columns=['date', 'uuid'])
# Feature consolidation
print('{}\t\tConsolidating sum_beacon_value ...'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
df_b_cb_date_uuid['sum_beacon_value'] = df_b_cb_date_uuid.parallel_apply(lambda x: df_b_gb_date_uuid.get_group((x[0], x[1]))['beacon_value'].values.sum(), axis=1)
print('{}\t\tConsolidating nunique_beacon_type ...'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
df_b_cb_date_uuid['nunique_beacon_type'] = df_b_cb_date_uuid.parallel_apply(lambda x: df_b_gb_date_uuid.get_group((x[0], x[1]))['beacon_type'].nunique(), axis=1)
print('{}\t\tConsolidating count_user_stay ...'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
df_b_cb_date_uuid['count_user_stay'] = df_b_cb_date_uuid.parallel_apply(lambda x: (df_b_gb_date_uuid.get_group((x[0], x[1]))['beacon_type'].values == 'user_stay').sum(), axis=1)
print('{}\t\tConsolidating count_pay_attempt ...'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
df_b_cb_date_uuid['count_pay_attempt'] = df_b_cb_date_uuid.parallel_apply(lambda x: df_b_gb_date_uuid.get_group((x[0], x[1]))['beacon_type'].str.contains('pay').sum(), axis=1)
print('{}\t\tConsolidating count_buy_click ...'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
df_b_cb_date_uuid['count_buy_click'] = df_b_cb_date_uuid.parallel_apply(lambda x: df_b_gb_date_uuid.get_group((x[0], x[1]))['beacon_type'].str.contains('buy|bottom').sum(), axis=1)
# Printing the summary of the consolidated beacons table
df_b_cb_date_uuid_info = df_info(df_b_cb_date_uuid)
print('\n{}\t"b_3m_cb_date_uuid" dataset summary:'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
print('\t{} rows x {} columns | {:.2f} MB approx memory usage'.format(df_b_cb_date_uuid.shape[0], df_b_cb_date_uuid.shape[1], df_b_cb_date_uuid_info[1]))
print(df_b_cb_date_uuid_info[0].to_string())
print('\n"b_3m_cb_date_uuid" dataset head:')
print(df_b_cb_date_uuid.head().to_string())
######################
# Merging the consolidated dataset b with s
print('\n{}\tProcessing stage 3: merging "s" with "b_3m_cb_date_uuid" ...'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
df_bs_merged = df_b_cb_date_uuid.merge(df_s.drop(columns=['log_date']), on='uuid', how='inner')
out_filename = '../data/sanitized/processed_base/bs_merged_3m.csv'
df_bs_merged.to_csv(os.path.join(base_path, out_filename), index=False)
df_bs_merged_info = df_info(df_bs_merged)
print('\n{}\t"bs_merged" dataset summary:'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
print('\t{} rows x {} columns | {:.2f} MB approx memory usage'.format(df_bs_merged.shape[0], df_bs_merged.shape[1], df_bs_merged_info[1]))
print(df_bs_merged_info[0].to_string())
print('\n"bs_merged" dataset head:')
print(df_bs_merged.head().to_string())
else:
# Reading the consolidated dataset and printing its summary
print('\n{}\tReading dataset: bs_merged_3m.csv ...'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
in_filename = '../data/sanitized/processed_base/bs_merged_3m.csv'
df_bs_merged = pd.read_csv(os.path.join(base_path, in_filename))
df_bs_merged_info = df_info(df_bs_merged)
print('\n{}\t"bs_merged" dataset summary:'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
print('\t{} rows x {} columns | {:.2f} MB approx memory usage'.format(df_bs_merged.shape[0], df_bs_merged.shape[1], df_bs_merged_info[1]))
print(df_bs_merged_info[0].to_string())
print('\n"bs_merged" dataset head:')
print(df_bs_merged.head().to_string())
######################
# Consolidating the merged dataset by date and email
print('\n{}\tProcessing stage 4: consolidating "bs_merged" dataset by date and email ...'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
df_bs_merged_gb_date_email = df_bs_merged.groupby(['date', 'email'])
df_bs_merged_cb_date_email = pd.DataFrame(df_bs_merged_gb_date_email.groups.keys(), columns=['date', 'email'])
print('{}\t\tConsolidating count_sessions ...'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
df_bs_merged_cb_date_email['count_sessions'] = df_bs_merged_cb_date_email.parallel_apply(lambda x: len(df_bs_merged_gb_date_email.get_group((x[0], x[1]))), axis=1)
print('{}\t\tConsolidating sum_beacon_value ...'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
df_bs_merged_cb_date_email['sum_beacon_value'] = df_bs_merged_cb_date_email.parallel_apply(lambda x: df_bs_merged_gb_date_email.get_group((x[0], x[1]))['sum_beacon_value'].values.sum(), axis=1)
print('{}\t\tConsolidating nunique_beacon_type ...'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
df_bs_merged_cb_date_email['nunique_beacon_type'] = df_bs_merged_cb_date_email.parallel_apply(lambda x: df_bs_merged_gb_date_email.get_group((x[0], x[1]))['nunique_beacon_type'].values.sum(), axis=1)
print('{}\t\tConsolidating count_user_stay ...'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
df_bs_merged_cb_date_email['count_user_stay'] = df_bs_merged_cb_date_email.parallel_apply(lambda x: df_bs_merged_gb_date_email.get_group((x[0], x[1]))['count_user_stay'].values.sum(), axis=1)
print('{}\t\tConsolidating count_pay_attempt ...'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
df_bs_merged_cb_date_email['count_pay_attempt'] = df_bs_merged_cb_date_email.parallel_apply(lambda x: df_bs_merged_gb_date_email.get_group((x[0], x[1]))['count_pay_attempt'].values.sum(), axis=1)
print('{}\t\tConsolidating count_buy_click ...'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
df_bs_merged_cb_date_email['count_buy_click'] = df_bs_merged_cb_date_email.parallel_apply(lambda x: df_bs_merged_gb_date_email.get_group((x[0], x[1]))['count_buy_click'].values.sum(), axis=1)
print('{}\t\tConsolidating nunique_gender ...'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
df_bs_merged_cb_date_email['nunique_gender'] = df_bs_merged_cb_date_email.parallel_apply(lambda x: df_bs_merged_gb_date_email.get_group((x[0], x[1]))['gender'].nunique(), axis=1)
print('{}\t\tConsolidating nunique_dob ...'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
df_bs_merged_cb_date_email['nunique_dob'] = df_bs_merged_cb_date_email.parallel_apply(lambda x: df_bs_merged_gb_date_email.get_group((x[0], x[1]))['dob'].nunique(), axis=1)
print('{}\t\tConsolidating nunique_language ...'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
df_bs_merged_cb_date_email['nunique_language'] = df_bs_merged_cb_date_email.parallel_apply(lambda x: df_bs_merged_gb_date_email.get_group((x[0], x[1]))['language'].nunique(), axis=1)
print('{}\t\tConsolidating nunique_report_type ...'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
df_bs_merged_cb_date_email['nunique_report_type'] = df_bs_merged_cb_date_email.parallel_apply(lambda x: df_bs_merged_gb_date_email.get_group((x[0], x[1]))['report_type'].nunique(), axis=1)
print('{}\t\tConsolidating nunique_device ...'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
df_bs_merged_cb_date_email['nunique_device'] = df_bs_merged_cb_date_email.parallel_apply(lambda x: df_bs_merged_gb_date_email.get_group((x[0], x[1]))['device'].nunique(), axis=1)
out_filename = '../data/sanitized/processed_base/bs_merged_consolidated_3m.csv'
df_bs_merged_cb_date_email.to_csv(os.path.join(base_path, out_filename), index=False)
df_bs_merged_cb_date_email_info = df_info(df_bs_merged_cb_date_email)
print('\n{}\t"bs_merged_cb_date_email" dataset summary:'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
print('\t{} rows x {} columns | {:.2f} MB approx memory usage'.format(df_bs_merged_cb_date_email.shape[0], df_bs_merged_cb_date_email.shape[1], df_bs_merged_cb_date_email_info[1]))
print(df_bs_merged_cb_date_email_info[0].to_string())
print('\n"bs_merged_cb_date_email" dataset head:')
print(df_bs_merged_cb_date_email.head().to_string())
######################
# /home/ngkpg/anaconda3/envs/pyconda37/bin/python3.7 /home/ngkpg/Documents/Packt_GP/GP1/code/data_prep_1.py
# INFO: Pandarallel will run on 4 workers.
# INFO: Pandarallel will use Memory file system to transfer data between the main process and workers.
#
# 2021-09-18 22:05:25 Reading raw data: b_3m.csv ...
#
# 2021-09-18 22:05:29 "b_3m" dataset summary:
# 6970265 rows x 4 columns | 993.62 MB approx memory usage
# col_name col_type null_count nunique
# 0 uuid <class 'numpy.float64'> 2 1794005
# 1 beacon_type <class 'str'> 2 42
# 2 beacon_value <class 'numpy.float64'> 2 277
# 3 log_date <class 'str'> 0 92
#
# "b_3m" dataset head:
# uuid beacon_type beacon_value log_date
# 0 8264419.0 user_stay 2.0 2021-05-01
# 1 8264429.0 masked_content 1.0 2021-05-01
# 2 8264423.0 user_stay 2.0 2021-05-01
# 3 8264430.0 bottom_banner 1.0 2021-05-01
# 4 8264421.0 user_stay 3.0 2021-05-01
#
# 2021-09-18 22:05:29 Reading raw data: s.csv ...
#
# 2021-09-18 22:05:50 "s" dataset summary:
# 9095602 rows x 10 columns | 3655.10 MB approx memory usage
# col_name col_type null_count nunique
# 0 uuid <class 'numpy.int64'> 0 9095602
# 1 phone <class 'numpy.float64'> 977 3399997
# 2 status <class 'numpy.int64'> 0 1
# 3 gender <class 'str'> 4765 6
# 4 dob <class 'str'> 20 36934
# 5 language <class 'str'> 398 17
# 6 email <class 'numpy.float64'> 733 3259793
# 7 report_type <class 'str'> 70 81
# 8 device <class 'str'> 187 5
# 9 log_date <class 'str'> 0 8285461
#
# "s" dataset head:
# uuid phone status gender dob language email report_type device log_date
# 0 10058150 145.0 1 Male 00000000 TAM 0.0 LS-MT mobile 2019-02-26 16:07:25
# 1 0 145.0 1 Male 00000000 TAM 0.0 LS-MT mobile 2019-02-26 16:12:08
# 2 1 145.0 1 Male 00000000 TAM 0.0 LS-MT mobile 2019-02-26 16:33:00
# 3 10058153 607734.0 1 Female 00000000 TEL 1.0 LS-MP mobile 2019-02-26 16:44:19
# 4 26 607735.0 1 Female 00000000 TAM 2.0 LS-MT mobile 2019-02-26 16:44:32
#
# 2021-09-18 22:05:50 Processing stage 1: "b_3m" dataset: dropping rows with na, converting column types ...
#
# 2021-09-18 22:05:58 "b_3m" dataset summary:
# 6970263 rows x 4 columns | 1578.80 MB approx memory usage
# col_name col_type null_count nunique
# 0 uuid <class 'str'> 0 1794005
# 1 beacon_type <class 'str'> 0 42
# 2 beacon_value <class 'numpy.int64'> 0 277
# 3 log_date <class 'str'> 0 92
#
# "b_3m" dataset head:
# uuid beacon_type beacon_value log_date
# 0 8264419 user_stay 2 2021-05-01
# 1 8264429 masked_content 1 2021-05-01
# 2 8264423 user_stay 2 2021-05-01
# 3 8264430 bottom_banner 1 2021-05-01
# 4 8264421 user_stay 3 2021-05-01
#
# 2021-09-18 22:05:58 Processing stage 1: "s" dataset: dropping rows with na, dropping columns, converting column types ...
#
# 2021-09-18 22:06:50 "s" dataset summary:
# 9088534 rows x 9 columns | 5340.69 MB approx memory usage
# col_name col_type null_count nunique
# 0 uuid <class 'str'> 0 9088534
# 1 phone <class 'str'> 0 3398850
# 2 gender <class 'str'> 0 6
# 3 dob <class 'str'> 0 36751
# 4 language <class 'str'> 0 17
# 5 email <class 'str'> 0 3258713
# 6 report_type <class 'str'> 0 78
# 7 device <class 'str'> 0 5
# 8 log_date <class 'datetime.date'> 0 827
#
# "s" dataset head:
# uuid phone gender dob language email report_type device log_date
# 0 10058150 145 Male 00000000 TAM 0 LS-MT mobile 2019-02-26
# 1 0 145 Male 00000000 TAM 0 LS-MT mobile 2019-02-26
# 2 1 145 Male 00000000 TAM 0 LS-MT mobile 2019-02-26
# 3 10058153 607734 Female 00000000 TEL 1 LS-MP mobile 2019-02-26
# 4 26 607735 Female 00000000 TAM 2 LS-MT mobile 2019-02-26
#
# 2021-09-18 22:06:50 Processing stage 2: consolidating "b_3m" dataset by date and uuid ...
# 2021-09-18 22:07:11 Consolidating sum_beacon_value ...
# 2021-09-18 22:08:54 Consolidating nunique_beacon_type ...
# 2021-09-18 22:11:07 Consolidating count_user_stay ...
# 2021-09-18 22:12:53 Consolidating count_pay_attempt ...
# 2021-09-18 22:16:14 Consolidating count_buy_click ...
#
# 2021-09-18 22:19:37 "b_3m_cb_date_uuid" dataset summary:
# 1811593 rows x 7 columns | 295.49 MB approx memory usage
# col_name col_type null_count nunique
# 0 date <class 'str'> 0 92
# 1 uuid <class 'str'> 0 1794005
# 2 sum_beacon_value <class 'numpy.int64'> 0 1563
# 3 nunique_beacon_type <class 'numpy.int64'> 0 13
# 4 count_user_stay <class 'numpy.int64'> 0 163
# 5 count_pay_attempt <class 'numpy.int64'> 0 42
# 6 count_buy_click <class 'numpy.int64'> 0 78
#
# "b_3m_cb_date_uuid" dataset head:
# date uuid sum_beacon_value nunique_beacon_type count_user_stay count_pay_attempt count_buy_click
# 0 2021-05-01 1446394 355 13 28 141 152
# 1 2021-05-01 4167303 3 1 0 3 0
# 2 2021-05-01 6005417 1 1 0 1 0
# 3 2021-05-01 7017557 1 1 1 0 0
# 4 2021-05-01 7192621 1 1 0 1 0
#
# 2021-09-18 22:19:37 Processing stage 3: merging "s" with "b_3m_cb_date_uuid" ...
#
# 2021-09-18 22:20:24 "bs_merged" dataset summary:
# 1604430 rows x 14 columns | 1107.75 MB approx memory usage
# col_name col_type null_count nunique
# 0 date <class 'str'> 0 92
# 1 uuid <class 'str'> 0 1597911
# 2 sum_beacon_value <class 'numpy.int64'> 0 1556
# 3 nunique_beacon_type <class 'numpy.int64'> 0 13
# 4 count_user_stay <class 'numpy.int64'> 0 163
# 5 count_pay_attempt <class 'numpy.int64'> 0 34
# 6 count_buy_click <class 'numpy.int64'> 0 34
# 7 phone <class 'str'> 0 835655
# 8 gender <class 'str'> 0 6
# 9 dob <class 'str'> 0 30252
# 10 language <class 'str'> 0 14
# 11 email <class 'str'> 0 824412
# 12 report_type <class 'str'> 0 68
# 13 device <class 'str'> 0 5
#
# "bs_merged" dataset head:
# date uuid sum_beacon_value nunique_beacon_type count_user_stay count_pay_attempt count_buy_click phone gender dob language email report_type device
# 0 2021-05-01 1446394 355 13 28 141 152 395287 Male 1996-06-14 HIN 524282 LS-MT mobile
# 1 2021-05-02 1446394 96 5 0 33 63 395287 Male 1996-06-14 HIN 524282 LS-MT mobile
# 2 2021-05-03 1446394 157 11 17 33 97 395287 Male 1996-06-14 HIN 524282 LS-MT mobile
# 3 2021-05-04 1446394 229 11 28 32 139 395287 Male 1996-06-14 HIN 524282 LS-MT mobile
# 4 2021-05-05 1446394 290 12 40 41 140 395287 Male 1996-06-14 HIN 524282 LS-MT mobile
######################
# /home/ngkpg/anaconda3/envs/pyconda37/bin/python3.7 /home/ngkpg/Documents/Packt_GP/GP1/code/data_prep_1.py skip
# INFO: Pandarallel will run on 4 workers.
# INFO: Pandarallel will use Memory file system to transfer data between the main process and workers.
#
# 2021-09-18 23:15:26 Reading dataset: bs_merged_3m.csv ...
#
# 2021-09-18 23:15:28 "bs_merged" dataset summary:
# 1604430 rows x 14 columns | 680.17 MB approx memory usage
# col_name col_type null_count nunique
# 0 date <class 'str'> 0 92
# 1 uuid <class 'numpy.int64'> 0 1597911
# 2 sum_beacon_value <class 'numpy.int64'> 0 1556
# 3 nunique_beacon_type <class 'numpy.int64'> 0 13
# 4 count_user_stay <class 'numpy.int64'> 0 163
# 5 count_pay_attempt <class 'numpy.int64'> 0 34
# 6 count_buy_click <class 'numpy.int64'> 0 34
# 7 phone <class 'numpy.int64'> 0 835655
# 8 gender <class 'str'> 0 6
# 9 dob <class 'str'> 0 30252
# 10 language <class 'str'> 0 14
# 11 email <class 'numpy.int64'> 0 824412
# 12 report_type <class 'str'> 0 68
# 13 device <class 'str'> 0 5
#
# "bs_merged" dataset head:
# date uuid sum_beacon_value nunique_beacon_type count_user_stay count_pay_attempt count_buy_click phone gender dob language email report_type device
# 0 2021-05-01 1446394 355 13 28 141 152 395287 Male 1996-06-14 HIN 524282 LS-MT mobile
# 1 2021-05-02 1446394 96 5 0 33 63 395287 Male 1996-06-14 HIN 524282 LS-MT mobile
# 2 2021-05-03 1446394 157 11 17 33 97 395287 Male 1996-06-14 HIN 524282 LS-MT mobile
# 3 2021-05-04 1446394 229 11 28 32 139 395287 Male 1996-06-14 HIN 524282 LS-MT mobile
# 4 2021-05-05 1446394 290 12 40 41 140 395287 Male 1996-06-14 HIN 524282 LS-MT mobile
#
# 2021-09-18 23:15:28 Processing stage 4: consolidating "bs_merged" dataset by date and email ...
# 2021-09-18 23:15:37 Consolidating count_sessions ...
# 2021-09-18 23:16:15 Consolidating sum_beacon_value ...
# 2021-09-18 23:17:10 Consolidating nunique_beacon_type ...
# 2021-09-18 23:18:05 Consolidating count_user_stay ...
# 2021-09-18 23:19:01 Consolidating count_pay_attempt ...
# 2021-09-18 23:19:56 Consolidating count_buy_click ...
# 2021-09-18 23:20:53 Consolidating nunique_gender ...
# 2021-09-18 23:22:09 Consolidating nunique_dob ...
# 2021-09-18 23:23:22 Consolidating nunique_language ...
# 2021-09-18 23:24:35 Consolidating nunique_report_type ...
# 2021-09-18 23:25:48 Consolidating nunique_device ...
#
# 2021-09-18 23:27:04 "bs_merged_cb_date_email" dataset summary:
# 1064216 rows x 13 columns | 165.43 MB approx memory usage
# col_name col_type null_count nunique
# 0 date <class 'str'> 0 92
# 1 email <class 'numpy.int64'> 0 824412
# 2 count_sessions <class 'numpy.int64'> 0 56
# 3 sum_beacon_value <class 'numpy.int64'> 0 2549
# 4 nunique_beacon_type <class 'numpy.int64'> 0 62
# 5 count_user_stay <class 'numpy.int64'> 0 237
# 6 count_pay_attempt <class 'numpy.int64'> 0 45
# 7 count_buy_click <class 'numpy.int64'> 0 42
# 8 nunique_gender <class 'numpy.int64'> 0 3
# 9 nunique_dob <class 'numpy.int64'> 0 42
# 10 nunique_language <class 'numpy.int64'> 0 8
# 11 nunique_report_type <class 'numpy.int64'> 0 13
# 12 nunique_device <class 'numpy.int64'> 0 5
#
# "bs_merged_cb_date_email" dataset head:
# date email count_sessions sum_beacon_value nunique_beacon_type count_user_stay count_pay_attempt count_buy_click nunique_gender nunique_dob nunique_language nunique_report_type nunique_device
# 0 2021-05-01 125 3 30 3 12 0 0 2 2 1 1 1
# 1 2021-05-01 141 5 39 5 16 0 0 2 5 4 2 2
# 2 2021-05-01 195 1 10 1 4 0 0 1 1 1 1 1
# 3 2021-05-01 645 1 10 1 4 0 0 1 1 1 1 1
# 4 2021-05-01 798 1 3 1 2 0 0 1 1 1 1 1
| [
"datetime.datetime.strptime",
"os.path.join",
"os.path.realpath",
"datetime.datetime.now",
"pandarallel.pandarallel.initialize",
"pandas.DataFrame"
] | [((192, 248), 'pandarallel.pandarallel.initialize', 'pandarallel.initialize', ([], {'progress_bar': '(False)', 'nb_workers': '(4)'}), '(progress_bar=False, nb_workers=4)\n', (214, 248), False, 'from pandarallel import pandarallel\n'), ((280, 306), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (296, 306), False, 'import os\n'), ((810, 951), 'pandas.DataFrame', 'pd.DataFrame', (["{'col_name': col_name_list, 'col_type': col_type_list, 'null_count':\n col_null_count_list, 'nunique': col_unique_count_list}"], {}), "({'col_name': col_name_list, 'col_type': col_type_list,\n 'null_count': col_null_count_list, 'nunique': col_unique_count_list})\n", (822, 951), True, 'import pandas as pd\n'), ((1530, 1566), 'os.path.join', 'os.path.join', (['base_path', 'in_filename'], {}), '(base_path, in_filename)\n', (1542, 1566), False, 'import os\n'), ((2180, 2216), 'os.path.join', 'os.path.join', (['base_path', 'in_filename'], {}), '(base_path, in_filename)\n', (2192, 2216), False, 'import os\n'), ((7281, 7318), 'os.path.join', 'os.path.join', (['base_path', 'out_filename'], {}), '(base_path, out_filename)\n', (7293, 7318), False, 'import os\n'), ((8071, 8107), 'os.path.join', 'os.path.join', (['base_path', 'in_filename'], {}), '(base_path, in_filename)\n', (8083, 8107), False, 'import os\n'), ((12491, 12528), 'os.path.join', 'os.path.join', (['base_path', 'out_filename'], {}), '(base_path, out_filename)\n', (12503, 12528), False, 'import os\n'), ((1395, 1418), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1416, 1418), False, 'import datetime\n'), ((1649, 1672), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1670, 1672), False, 'import datetime\n'), ((2055, 2078), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2076, 2078), False, 'import datetime\n'), ((2296, 2319), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2317, 2319), False, 'import datetime\n'), ((2794, 2817), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2815, 2817), False, 'import datetime\n'), ((3103, 3126), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3124, 3126), False, 'import datetime\n'), ((3593, 3616), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3614, 3616), False, 'import datetime\n'), ((3988, 4034), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['x[:10]', '"""%Y-%m-%d"""'], {}), "(x[:10], '%Y-%m-%d')\n", (4014, 4034), False, 'import datetime\n'), ((4121, 4144), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4142, 4144), False, 'import datetime\n'), ((4532, 4555), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4553, 4555), False, 'import datetime\n'), ((4892, 4915), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4913, 4915), False, 'import datetime\n'), ((5180, 5203), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5201, 5203), False, 'import datetime\n'), ((5463, 5486), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5484, 5486), False, 'import datetime\n'), ((5764, 5787), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5785, 5787), False, 'import datetime\n'), ((6061, 6084), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6082, 6084), False, 'import datetime\n'), ((6491, 6514), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6512, 6514), False, 'import datetime\n'), ((7023, 7046), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7044, 7046), False, 'import datetime\n'), ((7437, 7460), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7458, 7460), False, 'import datetime\n'), ((7912, 7935), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7933, 7935), False, 'import datetime\n'), ((8211, 8234), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8232, 8234), False, 'import datetime\n'), ((8738, 8761), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8759, 8761), False, 'import datetime\n'), ((9047, 9070), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (9068, 9070), False, 'import datetime\n'), ((9333, 9356), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (9354, 9356), False, 'import datetime\n'), ((9652, 9675), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (9673, 9675), False, 'import datetime\n'), ((9973, 9996), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (9994, 9996), False, 'import datetime\n'), ((10288, 10311), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (10309, 10311), False, 'import datetime\n'), ((10605, 10628), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (10626, 10628), False, 'import datetime\n'), ((10917, 10940), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (10938, 10940), False, 'import datetime\n'), ((11213, 11236), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11234, 11236), False, 'import datetime\n'), ((11508, 11531), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11529, 11531), False, 'import datetime\n'), ((11816, 11839), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11837, 11839), False, 'import datetime\n'), ((12125, 12148), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (12146, 12148), False, 'import datetime\n'), ((12689, 12712), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (12710, 12712), False, 'import datetime\n')] |
"""
==========================================================================
bitstruct.py
==========================================================================
APIs to generate a bitstruct type. Using decorators and type annotations
to create bit struct is much inspired by python3 dataclass implementation.
Note that the implementation (such as the _CAPITAL constants to add some
private metadata) in this file is very similar to the **original python3
dataclass implementation**. The syntax of creating bit struct is very
similar to that of python3 dataclass.
https://github.com/python/cpython/blob/master/Lib/dataclasses.py
For example,
@bitstruct
class Point:
x : Bits4
y : Bits4
will automatically generate some methods, such as __init__, __str__,
__repr__, for the Point class.
Similar to the built-in dataclasses module, we also provide a
mk_bitstruct function for user to dynamically generate bit struct types.
For example,
mk_bitstruct( 'Pixel',{
'r' : Bits4,
'g' : Bits4,
'b' : Bits4,
},
name_space = {
'__str__' : lambda self: f'({self.r},{self.g},{self.b})'
}
)
is equivalent to:
@bitstruct
class Pixel:
r : Bits4
g : Bits4
b : Bits4
def __str__( self ):
return f'({self.r},{self.g},{self.b})'
Author : <NAME>, <NAME>
Date : Oct 19, 2019
"""
import functools
import keyword
import operator
import types
import warnings
import py
from pymtl3.extra.pypy import custom_exec
from .bits_import import *
from .helpers import concat
#-------------------------------------------------------------------------
# Constants
#-------------------------------------------------------------------------
# Object with this attribute is considered as bit struct, as we assume
# only the bitstruct decorator will stamp this attribute to a class. This
# attribute also stores the field information and can be used for
# translation.
#
# The original dataclass use hasattr( cls, _FIELDS ) to check dataclass.
# We do this here as well
_FIELDS = '__bitstruct_fields__'
def is_bitstruct_inst( obj ):
"""Returns True if obj is an instance of a dataclass."""
return hasattr(type(obj), _FIELDS)
def is_bitstruct_class(cls):
"""Returns True if obj is a dataclass ."""
return isinstance(cls, type) and hasattr(cls, _FIELDS)
def get_bitstruct_inst_all_classes( obj ):
# list: put all types together
if isinstance( obj, list ):
return functools.reduce( operator.or_, [ get_bitstruct_inst_all_classes(x) for x in obj ] )
ret = { obj.__class__ }
# BitsN or int
if isinstance( obj, (Bits, int) ):
return ret
# BitStruct
assert is_bitstruct_inst( obj ), f"{obj} is not a valid PyMTL Bitstruct!"
return ret | functools.reduce( operator.or_, [ get_bitstruct_inst_all_classes(getattr(obj, v))
for v in obj.__bitstruct_fields__.keys() ] )
_DEFAULT_SELF_NAME = 's'
_ANTI_CONFLICT_SELF_NAME = '__bitstruct_self__'
#-------------------------------------------------------------------------
# _create_fn
#-------------------------------------------------------------------------
# A helper function that creates a function based on
# - fn_name : name of the function
# - args_lst : a list of arguments in string
# - body_lst : a list of statement of the function body in string
# Also note that this whole _create_fn thing is similar to the original
# dataclass implementation!
def _create_fn( fn_name, args_lst, body_lst, _globals=None ):
# Assemble argument string and body string
args = ', '.join(args_lst)
body = '\n'.join(f' {statement}' for statement in body_lst)
# Assemble the source code and execute it
src = f'def {fn_name}({args}):\n{body}'
if _globals is None: _globals = {}
_locals = {}
custom_exec( py.code.Source(src).compile(), _globals, _locals )
return _locals[fn_name]
#-------------------------------------------------------------------------
# _mk_init_arg
#-------------------------------------------------------------------------
# Creates a init argument string from a field.
#
# Shunning: I revamped the whole thing because they are indeed mutable
# objects.
def _mk_init_arg( name, type_ ):
# default is always None
if isinstance( type_, list ) or is_bitstruct_class( type_ ):
return f'{name} = None'
return f'{name} = 0'
#-------------------------------------------------------------------------
# _mk_init_body
#-------------------------------------------------------------------------
# Creates one line of __init__ body from a field
# to globals.
def _mk_init_body( self_name, name, type_ ):
def _recursive_generate_init( x ):
if isinstance( x, list ):
return f"[{', '.join( [ _recursive_generate_init(x[0]) ] * len(x) )}]"
return f"_type_{name}()"
if isinstance( type_, list ) or is_bitstruct_class( type_ ):
return f'{self_name}.{name} = {name} or {_recursive_generate_init(type_)}'
assert issubclass( type_, Bits )
return f'{self_name}.{name} = _type_{name}({name})'
#-------------------------------------------------------------------------
# _mk_tuple_str
#-------------------------------------------------------------------------
# Creates a tuple of string representations of each field. For example,
# if the self_name is 'self' and fields is [ 'x', 'y' ], it will return
# ('self.x', 'self.y'). This is used for creating the default __eq__ and
# __hash__ function.
def _mk_tuple_str( self_name, fields ):
return f'({",".join([f"{self_name}.{name}" for name in fields])},)'
#-------------------------------------------------------------------------
# _mk_init_fn
#-------------------------------------------------------------------------
# Creates a __init__ function based on fields. For example, if fields
# contains two field x (Bits4) and y (Bits4), _mk_init_fn will return a
# function that looks like the following:
#
# def __init__( s, x = 0, y = 0, z = None, p = None ):
# s.x = _type_x(x)
# s.y = _type_y(y)
# s.z = z or _type_z()
# s.p = p or [ _type_p(), _type_p() ]
#
# NOTE:
# _mk_init_fn also takes as argument the name of self in case there is a
# field with name 's' or 'self'.
#
# TODO: should we provide a __post_init__ function like dataclass does?
def _mk_init_fn( self_name, fields ):
# Register necessary types in _globals
_globals = {}
for name, type_ in fields.items():
if isinstance( type_, list ):
x = type_[0]
while isinstance( x, list ):
x = x[0]
_globals[ f"_type_{name}" ] = x
else:
assert issubclass( type_, Bits ) or is_bitstruct_class( type_ )
_globals[ f"_type_{name}" ] = type_
return _create_fn(
'__init__',
[ self_name ] + [ _mk_init_arg( *field ) for field in fields.items() ],
[ _mk_init_body( self_name, *field ) for field in fields.items() ],
_globals = _globals,
)
#-------------------------------------------------------------------------
# _mk_str_fn
#-------------------------------------------------------------------------
# Creates a __str__ function based on fields. For example, if fields
# contains two field x (Bits4) and y (Bits4), _mk_str_fn will return a
# function that looks like the following:
#
# def __str__( self ):
# return f'{self.x}:{self.y}'
def _mk_str_fn( fields ):
return _create_fn(
'__str__',
[ 'self' ],
[ 'return f"' +
':'.join([ f'{{self.{name}}}' for name in fields ]) + '"']
)
#-------------------------------------------------------------------------
# _mk_repr_fn
#-------------------------------------------------------------------------
# Creates a __repr__ function based on fields. For example, if fields
# contains two field x (Bits4) and y (Bits4), _mk_repr_fn will return a
# function that looks like the following:
#
# def __repr__( self ):
# return self.__class__.__name__ + f'(x={self.x!r}, y={self.y!r})'
def _mk_repr_fn( fields ):
return _create_fn(
'__repr__',
[ 'self' ],
[ 'return self.__class__.__name__ + f"(' +
','.join([ f'{{self.{name}!r}}' for name in fields ]) +
')"']
)
#-------------------------------------------------------------------------
# _mk_eq_fn
#-------------------------------------------------------------------------
# Creates a __eq__ function based on fields. By default it just compares
# each field. For example, if fields contains two field x (Bits4) and y
# (Bits4), _mk_eq_fn will return a function that looks like the
# following:
#
# def __eq__( self, other ):
# if other.__class__ is self.__class__:
# return (self.x,self.y,) == (other.x,other.y,)
# else:
# raise NotImplemented
def _mk_eq_fn( fields ):
self_tuple = _mk_tuple_str( 'self', fields )
other_tuple = _mk_tuple_str( 'other', fields )
return _create_fn(
'__eq__',
[ 'self', 'other' ],
[ f'return (other.__class__ is self.__class__) and {self_tuple} == {other_tuple}' ]
)
#-------------------------------------------------------------------------
# _mk_hash_fn
#-------------------------------------------------------------------------
# Creates a __hash__ function based on fields. By default it just hashes
# all fields. For example, if fields contains two field x (Bits4) and y
# (Bits4), _mk_hash_fn will return a function that looks like the
# following:
#
# def __hash__( self ):
# return hash((self.x,self.y,))
def _mk_hash_fn( fields ):
self_tuple = _mk_tuple_str( 'self', fields )
return _create_fn(
'__hash__',
[ 'self' ],
[ f'return hash({self_tuple})' ]
)
#--------------------------PyMTL3 specific--------------------------------
#-------------------------------------------------------------------------
# _mk_ff_fn
#-------------------------------------------------------------------------
# Creates __ilshift__ and _flip functions that looks like the following:
#
# def __ilshift__( self, other ):
# if self.__class__ is not other.__class__:
# other = self.__class__.from_bits( other.to_bits() )
# self.x <<= other.x
# self.y[0][0] <<= other.y[0][0]
#
# def _flip( self ):
# self.x._flip()
# self.y[i][j]._flip()
def _mk_ff_fn( fields ):
def _gen_list_ilshift_strs( type_, prefix='' ):
if isinstance( type_, list ):
ilshift_strs, flip_strs = [], []
for i in range(len(type_)):
ils, fls = _gen_list_ilshift_strs( type_[0], f"{prefix}[{i}]" )
ilshift_strs.extend( ils )
flip_strs.extend( fls )
return ilshift_strs, flip_strs
else:
return [ f"self.{prefix} <<= other.{prefix}" ], [f"self.{prefix}._flip()"]
ilshift_strs = [ 'if self.__class__ is not other.__class__:',
' other = self.__class__.from_bits( other.to_bits() )']
flip_strs = []
for name, type_ in fields.items():
ils, fls = _gen_list_ilshift_strs( type_, name )
ilshift_strs.extend( ils )
flip_strs.extend( fls )
return _create_fn(
'__ilshift__',
[ 'self', 'other' ],
ilshift_strs + [ "return self" ],
), _create_fn(
'_flip',
[ 'self' ],
flip_strs,
),
#-------------------------------------------------------------------------
# _mk_clone_fn
#-------------------------------------------------------------------------
# Creates clone function that looks like the following:
# Use this clone function in any place that you need to perform a
# deepcopy on a bitstruct.
#
# def clone( self ):
# return self.__class__( self.x.clone(), [ self.y[0].clone(), self.y[1].clone() ] )
def _gen_list_clone_strs( type_, prefix='' ):
if isinstance( type_, list ):
return "[" + ",".join( [ _gen_list_clone_strs( type_[0], f"{prefix}[{i}]" )
for i in range(len(type_)) ] ) + "]"
else:
return f"{prefix}.clone()"
def _mk_clone_fn( fields ):
clone_strs = [ 'return self.__class__(' ]
for name, type_ in fields.items():
clone_strs.append( " " + _gen_list_clone_strs( type_, f'self.{name}' ) + "," )
return _create_fn(
'clone',
[ 'self' ],
clone_strs + [ ')' ],
)
def _mk_deepcopy_fn( fields ):
clone_strs = [ 'return self.__class__(' ]
for name, type_ in fields.items():
clone_strs.append( " " + _gen_list_clone_strs( type_, f'self.{name}' ) + "," )
return _create_fn(
'__deepcopy__',
[ 'self', 'memo' ],
clone_strs + [ ')' ],
)
#-------------------------------------------------------------------------
# _mk_imatmul_fn
#-------------------------------------------------------------------------
# Creates @= function that copies the value over ...
# TODO create individual from_bits for imatmul and ilshift
# def __imatmul__( self, other ):
# if self.__class__ is not other.__class__:
# other = self.__class__.from_bits( other.to_bits() )
# self.x @= other.x
# self.y[0] @= other.y[0]
# self.y[1] @= other.y[1]
def _mk_imatmul_fn( fields ):
def _gen_list_imatmul_strs( type_, prefix='' ):
if isinstance( type_, list ):
ret = []
for i in range(len(type_)):
ret.extend( _gen_list_imatmul_strs( type_[0], f"{prefix}[{i}]" ) )
return ret
else:
return [ f"self.{prefix} @= other.{prefix}" ]
imatmul_strs = [ 'if self.__class__ is not other.__class__:',
' other = self.__class__.from_bits( other.to_bits() )']
for name, type_ in fields.items():
imatmul_strs.extend( _gen_list_imatmul_strs( type_, name ) )
return _create_fn(
'__imatmul__',
[ 'self', 'other' ],
imatmul_strs + [ "return self" ],
)
#-------------------------------------------------------------------------
# _mk_nbits_to_bits_fn
#-------------------------------------------------------------------------
# Creates nbits, to_bits function that copies the value over ...
#
# def to_bits( self ):
# return concat( self.x, self.y[0], self.y[1] )
#
# TODO packing order of array? x[0] is LSB or MSB of a list
# current we do LSB
def _mk_nbits_to_bits_fn( fields ):
def _gen_to_bits_strs( type_, prefix, start_bit ):
if isinstance( type_, list ):
to_strs = []
# The packing order is LSB, so we need to reverse the list to make x[-1] higher bits
for i in reversed(range(len(type_))):
start_bit, tos = _gen_to_bits_strs( type_[0], f"{prefix}[{i}]", start_bit )
to_strs.extend( tos )
return start_bit, to_strs
elif is_bitstruct_class( type_ ):
to_strs = []
for name, typ in getattr(type_, _FIELDS).items():
start_bit, tos = _gen_to_bits_strs( typ, f"{prefix}.{name}", start_bit )
to_strs.extend( tos )
return start_bit, to_strs
else:
end_bit = start_bit + type_.nbits
return end_bit, [ f"self.{prefix}" ]
to_bits_strs = []
total_nbits = 0
for name, type_ in fields.items():
total_nbits, tos = _gen_to_bits_strs( type_, name, total_nbits )
to_bits_strs.extend( tos )
return total_nbits, _create_fn( 'to_bits', [ 'self' ],
[ f"return concat({', '.join(to_bits_strs)})" ],
_globals={'concat':concat} )
#-------------------------------------------------------------------------
# _mk_from_bits_fn
#-------------------------------------------------------------------------
# Creates static method from_bits that creates a new bitstruct based on Bits
# and instance method _from_bits that copies the value over
#
# @staticmethod
# def from_bits( other ):
# return self.__class__( other[16:32], other[0:16] )
def _mk_from_bits_fns( fields, total_nbits ):
def _gen_from_bits_strs( type_, end_bit ):
if isinstance( type_, list ):
from_strs = []
# Since we are doing LSB for x[0], we need to unpack from the last
# element of the list, and then reverse it again to construct a list ...
for i in range(len(type_)):
end_bit, fs = _gen_from_bits_strs( type_[0], end_bit )
from_strs.extend( fs )
return end_bit, [ f"[{','.join(reversed(from_strs))}]" ]
elif is_bitstruct_class( type_ ):
if type_ in type_name_mapping:
type_name = type_name_mapping[ type_ ]
else:
type_name = f"_type{len(type_name_mapping)}"
type_name_mapping[ type_ ] = type_name
from_strs = []
for name, typ in getattr(type_, _FIELDS).items():
end_bit, fs = _gen_from_bits_strs( typ, end_bit )
from_strs.extend( fs )
return end_bit, [ f"{type_name}({','.join(from_strs)})" ]
else:
if type_ not in type_name_mapping:
type_name_mapping[ type_ ] = type_.__name__
else:
assert type_name_mapping[ type_ ] == type_.__name__
start_bit = end_bit - type_.nbits
return start_bit, [ f"other[{start_bit}:{end_bit}]" ]
from_bits_strs = []
end_bit = total_nbits
# This is to make sure we capture two types with the same name but different
# attributes
type_name_mapping = {}
type_count = 0
for _, type_ in fields.items():
end_bit, fs = _gen_from_bits_strs( type_, end_bit )
from_bits_strs.extend( fs )
assert end_bit == 0
_globals = { y: x for x,y in type_name_mapping.items() }
assert len(_globals) == len(type_name_mapping)
# TODO add assertion in bits
return _create_fn( 'from_bits', [ 'cls', 'other' ],
[ "assert cls.nbits == other.nbits, f'LHS bitstruct {cls.nbits}-bit <> RHS other {other.nbits}-bit'",
"other = other.to_bits()",
f"return cls({','.join(from_bits_strs)})" ], _globals )
#-------------------------------------------------------------------------
# _check_valid_array
#-------------------------------------------------------------------------
def _recursive_check_array_types( current ):
x = current[0]
if isinstance( x, list ):
x_len = len(x)
x_type = _recursive_check_array_types( x )
for y in current[1:]:
assert isinstance( y, list ) and len(y) == x_len
y_type = _recursive_check_array_types( y )
assert y_type is x_type
return x_type
assert issubclass( x, Bits ) or is_bitstruct_class( x )
for y in current[1:]:
assert y is x
return x
def _check_valid_array_of_types( arr ):
# Check if the provided list is a strict multidimensional array
try:
return _recursive_check_array_types( arr )
except Exception as e:
print(e)
return None
#-------------------------------------------------------------------------
# _check_field_annotation
#-------------------------------------------------------------------------
def _check_field_annotation( cls, name, type_ ):
# Make sure not default is annotated
if hasattr( cls, name ):
default = getattr( cls, name )
raise TypeError( "We don't allow subfields to have default value:\n"
f"- Field '{name}' of BitStruct {cls.__name__} has default value {default!r}." )
# Special case if the type is an instance of list
if isinstance( type_, list ):
if _check_valid_array_of_types( type_ ) is None:
raise TypeError( "The provided list spec should be a strict multidimensional ARRAY "
"with no varying sizes or types. All non-list elements should be VALID types." )
else:
# Now we work with types
if not isinstance( type_, type ):
raise TypeError(f"{type_} is not a type\n"\
f"- Field '{name}' of BitStruct {cls.__name__} is annotated as {type_}.")
# More specifically, Bits and BitStruct
if not issubclass( type_, Bits ) and not is_bitstruct_class( type_ ):
raise TypeError( "We currently only support BitsN, list, or another BitStruct as BitStruct field:\n"
f"- Field '{name}' of BitStruct {cls.__name__} is annotated as {type_}." )
#-------------------------------------------------------------------------
# _get_self_name
#-------------------------------------------------------------------------
# Return a self name based on fields.
def _get_self_name( fields ):
return( _ANTI_CONFLICT_SELF_NAME if _DEFAULT_SELF_NAME in fields else
_DEFAULT_SELF_NAME )
#-------------------------------------------------------------------------
# _process_cls
#-------------------------------------------------------------------------
# Process the input cls and add methods to it.
_bitstruct_hash_cache = {}
def _process_class( cls, add_init=True, add_str=True, add_repr=True,
add_hash=True ):
# Get annotations of the class
cls_annotations = cls.__dict__.get('__annotations__', {})
if not cls_annotations:
raise AttributeError( "No field is declared in the bit struct definition.\n"
f"Suggestion: check the definition of {cls.__name__} to"
" make sure it only contains 'field_name(string): Type(type).'" )
# Get field information from the annotation and prepare for hashing
fields = {}
hashable_fields = {}
def _convert_list_to_tuple( x ):
if isinstance( x, list ):
return tuple( [ _convert_list_to_tuple( y ) for y in x ] )
return x
reserved_fields = ['to_bits', 'from_bits', 'nbits']
for x in reserved_fields:
assert x not in cls.__dict__, f"Currently a bitstruct cannot have {reserved_fields}, but "\
f"{x} is provided as {cls.__dict__[x]}"
for a_name, a_type in cls_annotations.items():
assert a_name not in reserved_fields, f"Currently a bitstruct cannot have {reserved_fields}, but "\
f"{a_name} is annotated as {a_type}"
_check_field_annotation( cls, a_name, a_type )
fields[ a_name ] = a_type
hashable_fields[ a_name ] = _convert_list_to_tuple( a_type )
cls._hash = _hash = hash( (cls.__name__, *tuple(hashable_fields.items()),
add_init, add_str, add_repr, add_hash) )
if _hash in _bitstruct_hash_cache:
return _bitstruct_hash_cache[ _hash ]
_bitstruct_hash_cache[ _hash ] = cls
# Stamp the special attribute so that translation pass can identify it
# as bit struct.
setattr( cls, _FIELDS, fields )
# Add methods to the class
# Create __init__. Here I follow the dataclass convention that we only
# add our generated __init__ function when add_init is true and user
# did not define their own init.
if add_init:
if not '__init__' in cls.__dict__:
cls.__init__ = _mk_init_fn( _get_self_name(fields), fields )
# Create __str__
if add_str:
if not '__str__' in cls.__dict__:
cls.__str__ = _mk_str_fn( fields )
# Create __repr__
if add_repr:
if not '__repr__' in cls.__dict__:
cls.__repr__ = _mk_repr_fn( fields )
# Create __eq__. There is no need for a __ne__ method as python will
# call __eq__ and negate it.
# NOTE: if user overwrites __eq__ it may lead to different behavior for
# the translated verilog as in the verilog world two bit structs are
# equal only if all the fields are equal. We always try to add __eq__
if not '__eq__' in cls.__dict__:
cls.__eq__ = _mk_eq_fn( fields )
else:
w_msg = ( f'Overwriting {cls.__qualname__}\'s __eq__ may cause the '
'translated verilog behaves differently from PyMTL '
'simulation.')
warnings.warn( w_msg )
# Create __hash__.
if add_hash:
if not '__hash__' in cls.__dict__:
cls.__hash__ = _mk_hash_fn( fields )
# Shunning: add __ilshift__ and _flip for update_ff
assert not '__ilshift__' in cls.__dict__ and not '_flip' in cls.__dict__
cls.__ilshift__, cls._flip = _mk_ff_fn( fields )
# Shunning: add clone
assert not 'clone' in cls.__dict__ and not '__deepcopy__' in cls.__dict__
cls.clone = _mk_clone_fn( fields )
cls.__deepcopy__ = _mk_deepcopy_fn( fields )
# Shunning: add imatmul for assignment, as well as nbits/to_bits/from_bits
assert '__imatmul__' not in cls.__dict__ and 'to_bits' not in cls.__dict__ and \
'nbits' not in cls.__dict__ and 'from_bits' not in cls.__dict__
cls.__imatmul__ = _mk_imatmul_fn( fields )
cls.nbits, cls.to_bits = _mk_nbits_to_bits_fn( fields )
from_bits = _mk_from_bits_fns( fields, cls.nbits )
cls.from_bits = classmethod(from_bits)
assert not 'get_field_type' in cls.__dict__
def get_field_type( cls, name ):
if name in cls.__bitstruct_fields__:
return cls.__bitstruct_fields__[ name ]
raise AttributeError( f"{cls} has no field '{name}'" )
cls.get_field_type = classmethod(get_field_type)
# TODO: maybe add a to_bits and from bits function.
return cls
#-------------------------------------------------------------------------
# bitstruct
#-------------------------------------------------------------------------
# The actual class decorator. We add a * in the argument list so that the
# following argument can only be used as keyword arguments.
def bitstruct( _cls=None, *, add_init=True, add_str=True, add_repr=True, add_hash=True ):
def wrap( cls ):
return _process_class( cls, add_init, add_str, add_repr )
# Called as @bitstruct(...)
if _cls is None:
return wrap
# Called as @bitstruct without parens.
return wrap( _cls )
#-------------------------------------------------------------------------
# mk_bitstruct
#-------------------------------------------------------------------------
# Dynamically generate a bit struct class.
# TODO: should we add base parameters to support inheritence?
def mk_bitstruct( cls_name, fields, *, namespace=None, add_init=True,
add_str=True, add_repr=True, add_hash=True ):
# copy namespace since will mutate it
namespace = {} if namespace is None else namespace.copy()
# We assume fields is a dictionary and thus there won't be duplicate
# field names. So we only check if the field names are indeed strings
# and that they are not keywords.
annos = {}
for name, f in fields.items():
if not isinstance( name, str ) or not name.isidentifier():
raise TypeError( f'Field name {name!r} is not a valid identifier!' )
if keyword.iskeyword( name ):
raise TypeError( f'Field name {name!r} is a keyword!' )
annos[ name ] = f
namespace['__annotations__'] = annos
cls = types.new_class( cls_name, (), {}, lambda ns: ns.update( namespace ) )
return bitstruct( cls, add_init=add_init, add_str=add_str,
add_repr=add_repr, add_hash=add_hash )
| [
"warnings.warn",
"keyword.iskeyword",
"py.code.Source"
] | [((23064, 23084), 'warnings.warn', 'warnings.warn', (['w_msg'], {}), '(w_msg)\n', (23077, 23084), False, 'import warnings\n'), ((25838, 25861), 'keyword.iskeyword', 'keyword.iskeyword', (['name'], {}), '(name)\n', (25855, 25861), False, 'import keyword\n'), ((3751, 3770), 'py.code.Source', 'py.code.Source', (['src'], {}), '(src)\n', (3765, 3770), False, 'import py\n')] |
import platform
"""
[Note for Windows]
- Use '\\' or '/' in path
Ex) gitStoragePath = "D:\\Source\\gitrepos"
- Install 'Git for Windows'
- Windows version of VUDDY use its own JRE
[Note for POSIX]
- Use '/' for path
Ex) gitStoragePath = "/home/ubuntu/gitrepos/"
- Java binary is only needed in POSIX
"""
gitStoragePath = "/home/ubuntu/gitrepos/"
pf = platform.platform()
if "Windows" in pf: # Windows
gitBinary = "C:\\Program Files\\Git\\bin\\git.exe"
diffBinary = "C:\\Program Files\\Git\\usr\\bin\\diff.exe"
else: # POSIX
gitBinary = "git"
diffBinary = "diff"
javaBinary = "java"
| [
"platform.platform"
] | [((355, 374), 'platform.platform', 'platform.platform', ([], {}), '()\n', (372, 374), False, 'import platform\n')] |
from math import sqrt, pow
def std_asym_ostap(n1,n2):
return (VE(n1,n1).asym(VE(n2,n2))).error()
def std_asym_calc(n1,n2):
return 2.*n1*sqrt(1./n1+1./n2) /( n2*pow(n1/n2+1.,2))
print("n=100")
print(" ostap = " + str(std_asym_ostap(100,100)))
print(" calc. = " + str(std_asym_calc (100,100)))
| [
"math.pow",
"math.sqrt"
] | [((146, 171), 'math.sqrt', 'sqrt', (['(1.0 / n1 + 1.0 / n2)'], {}), '(1.0 / n1 + 1.0 / n2)\n', (150, 171), False, 'from math import sqrt, pow\n'), ((170, 191), 'math.pow', 'pow', (['(n1 / n2 + 1.0)', '(2)'], {}), '(n1 / n2 + 1.0, 2)\n', (173, 191), False, 'from math import sqrt, pow\n')] |
#!/usr/bin/env python3
# vim: set fileencoding=utf-8 fileformat=unix expandtab :
"""struct.py -- Point and Rect
Copyright (C) 2010 <NAME> <<EMAIL>> All rights reserved.
This software is subject to the provisions of the Zope Public License,
Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
FOR A PARTICULAR PURPOSE.
"""
from collections import namedtuple
import math
__all__ = ("Point", "Rect", "EPSILON")
PI = math.pi
EPSILON = 0.01 # mm
_Point = namedtuple("Point", "x y")
class Point(_Point):
"""Point represented by 2D coordinate.
>>> p = Point(0, 10)
>>> p
Point(0.00, 10.00)
>>> p + Point(5, 10)
Point(5.00, 20.00)
>>> p - Point(5, 10)
Point(-5.00, 0.00)
>>> -p
Point(0.00, -10.00)
>>> p * 2
Point(0.00, 20.00)
>>> p / 2
Point(0.00, 5.00)
>>> p.shift(Point(20, 30))
Point(20.00, 40.00)
>>> p.shift([20, 30])
Point(20.00, 40.00)
>>> p.shift(20)
Point(20.00, 10.00)
>>> list(p)
[0, 10]
>>> p == Point(0, 10)
True
>>> p != Point(0, 10)
False
>>> p == Point(5, 10)
False
>>> p != Point(5, 10)
True
>>> bool(p)
True
>>> bool(Point(0, 0))
False
>>> p.rotate(30)
Point(-5.00, 8.66)
>>> p.rotate(30, origin=Point(10, 10))
Point(1.34, 5.00)
"""
def __str__(self):
return f"({self.x:.2f}, {self.y:.2f})"
def __repr__(self):
return "Point" + self.__str__()
def int(self):
return Point(*map(int, self))
fix = int
def floor(self):
return Point(*map(math.floor, self))
def ceil(self):
return Point(*map(math.ceil, self))
@staticmethod
def _round(f, places=0):
# Round a number in accordance with the traditional way,
# while Python's round() rounds to the nearest even number.
return math.floor(f * math.pow(10, places) + .5) / math.pow(10, places)
def round(self, places=0):
return Point(self._round(self.x, places), self._round(self.y, places))
def __bool__(self):
return self != (0, 0)
def __neg__(self):
return Point(-self.x, -self.y)
def __add__(self, pnt):
return self.shift(pnt)
def __sub__(self, pnt):
return self.shift(-pnt)
def __mul__(self, n):
if not isinstance(n, (int, float)):
raise NotImplementedError
return Point(self.x * n, self.y * n)
__rmul__ = __mul__
def __truediv__(self, n):
if not isinstance(n, (int, float)):
raise NotImplementedError
return Point(self.x / n, self.y / n)
def shift(self, pnt, _y = 0):
if isinstance(pnt, (tuple, list)):
return Point(self.x + pnt[0], self.y + pnt[1])
elif isinstance(pnt, (int, float)) and isinstance(_y, (int, float)):
return Point(self.x + pnt, self.y + _y)
else:
raise NotImplementedError
def rotate(self, degree, origin=None):
p = Point(*self)
if origin is not None:
p -= origin
rad = PI * degree / 180.0
sin, cos = math.sin(rad), math.cos(rad)
p = Point(p.x * cos - p.y * sin, p.x * sin + p.y * cos)
if origin is not None:
p += origin
return p
_Rect = namedtuple("_Rect", "left top right bottom")
class Rect(_Rect):
"""Half-open rectangular region.
A region is represented by half-open coodinate intervals. Left-top
coordinate is inclusive but right-bottom one is exclusive.
>>> r = Rect(0, 10, 20, 30)
>>> r
Rect(0.00, 10.00, 20.00, 30.00)
>>> r.position()
Point(0.00, 10.00)
>>> r.size()
Point(20.00, 20.00)
>>> r.shift(Point(15, 25))
Rect(15.00, 35.00, 35.00, 55.00)
>>> r * 2
Rect(0.00, 10.00, 40.00, 50.00)
>>> r / 2
Rect(0.00, 10.00, 10.00, 20.00)
>>> list(r)
[0, 10, 20, 30]
>>> r == Rect(0, 10, 20, 30)
True
>>> r != Rect(0, 10, 20, 30)
False
>>> r.position()
Point(0.00, 10.00)
>>> r.size()
Point(20.00, 20.00)
>>> r.position_and_size()
(Point(0.00, 10.00), Point(20.00, 20.00))
"""
def __str__(self):
return f"({', '.join(f'{x:.2f}' for x in self)})"
def __repr__(self):
return "Rect" + self.__str__()
def half_open(self):
"""Get half-open version i.e. right-bottom is excluded."""
return Rect(self.left, self.top,
self.right + EPSILON, self.bottom + EPSILON)
def closed(self):
"""Get closed version i.e. rigit-bottom is included."""
return Rect(self.left, self.top,
self.right - EPSILON, self.bottom - EPSILON)
def int(self):
"""Special method to adapt to XDW_RECT."""
return Rect(*map(int, self))
fix = int
def position(self):
return Point(self.left, self.top)
def size(self):
return Point(self.right - self.left, self.bottom - self.top)
def position_and_size(self):
return (self.position(), self.size())
def __mul__(self, n):
if not isinstance(n, (int, float)):
raise NotImplementedError
return Rect(self.left, self.top,
self.left + (self.right - self.left) * n,
self.top + (self.bottom - self.top) * n)
__rmul__ = __mul__
def __truediv__(self, n):
if not isinstance(n, (int, float)):
raise NotImplementedError
return Rect(self.left, self.top,
self.left + (self.right - self.left) / n,
self.top + (self.bottom - self.top) / n)
def shift(self, pnt, _y=0):
if isinstance(pnt, (tuple, list)):
x, y = pnt
elif isinstance(pnt, (int, float)) and isinstance(_y, (int, float)):
x, y = pnt, _y
else:
raise NotImplementedError
return Rect(self.left + x, self.top + y,
self.right + x, self.bottom + y)
def rotate(self, degree, origin=None):
return Rect(p.rotate(degree, origin=origin) for p in self)
if __name__ == "__main__":
import doctest
doctest.testmod()
| [
"collections.namedtuple",
"math.pow",
"math.cos",
"doctest.testmod",
"math.sin"
] | [((693, 719), 'collections.namedtuple', 'namedtuple', (['"""Point"""', '"""x y"""'], {}), "('Point', 'x y')\n", (703, 719), False, 'from collections import namedtuple\n'), ((3505, 3549), 'collections.namedtuple', 'namedtuple', (['"""_Rect"""', '"""left top right bottom"""'], {}), "('_Rect', 'left top right bottom')\n", (3515, 3549), False, 'from collections import namedtuple\n'), ((6339, 6356), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (6354, 6356), False, 'import doctest\n'), ((2128, 2148), 'math.pow', 'math.pow', (['(10)', 'places'], {}), '(10, places)\n', (2136, 2148), False, 'import math\n'), ((3330, 3343), 'math.sin', 'math.sin', (['rad'], {}), '(rad)\n', (3338, 3343), False, 'import math\n'), ((3345, 3358), 'math.cos', 'math.cos', (['rad'], {}), '(rad)\n', (3353, 3358), False, 'import math\n'), ((2099, 2119), 'math.pow', 'math.pow', (['(10)', 'places'], {}), '(10, places)\n', (2107, 2119), False, 'import math\n')] |
import os
from pathlib import Path
def menpo3d_src_dir_path():
r"""The path to the top of the menpo3d Python package.
Useful for locating where the data folder is stored.
Returns
-------
path : str
The full path to the top of the Menpo3d package
"""
return Path(os.path.abspath(__file__)).parent
| [
"os.path.abspath"
] | [((302, 327), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (317, 327), False, 'import os\n')] |
# Generated by Django 3.0.4 on 2022-03-02 19:32
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('page_edits', '0014_delete_whatsappnumber'),
]
operations = [
migrations.DeleteModel(
name='HowWeWorkText',
),
]
| [
"django.db.migrations.DeleteModel"
] | [((233, 277), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""HowWeWorkText"""'}), "(name='HowWeWorkText')\n", (255, 277), False, 'from django.db import migrations\n')] |
# (C) Copyright 2021 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
import logging
from climetlab.arguments.climetlab_types import Type
from climetlab.vocabularies.aliases import unalias
LOG = logging.getLogger(__name__)
class _all:
def __repr__(self):
return "climetlab.ALL"
ALL = _all()
class Action:
def execute(self, kwargs):
raise NotImplementedError()
def __repr__(self) -> str:
return f"{self.__class__}"
class ArgumentTransformer(Action):
def __init__(self, owner):
self.owner = owner
def execute(self, kwargs):
if self.name in kwargs: # TODO: discuss that
kwargs[self.name] = self.transform(kwargs[self.name])
return kwargs
def transform(self, value):
raise NotImplementedError(self.__class__.__name__)
@property
def name(self):
if self.owner is None:
return "-"
if isinstance(self.owner, str):
return self.owner
return self.owner.name
class _TypedTransformer(ArgumentTransformer):
def __init__(self, owner, type) -> None:
super().__init__(owner)
self.type = type if isinstance(type, Type) else type()
class AliasTransformer(_TypedTransformer):
def __init__(self, owner, type, aliases) -> None:
super().__init__(owner, type)
self.aliases = aliases
if isinstance(self.aliases, str):
self.unalias = self.from_string
return
if isinstance(self.aliases, dict):
self.unalias = self.from_dict
return
if callable(self.aliases):
self.unalias = self.aliases
return
self.unalias = self.unsupported
def unsupported(self, value):
raise NotImplementedError(self.aliases)
def from_string(self, value):
return unalias(self.aliases, value)
def from_dict(self, value):
try:
return self.aliases[value]
except KeyError: # No alias for this value
pass
except TypeError: # if value is not hashable
pass
return value
def _transform_one(self, value):
old = object()
while old != value:
old = value
value = self.unalias(old)
LOG.debug(" Unalias %s --> %s", old, value)
return value
def transform(self, value):
LOG.debug(" Unaliasing %s", value)
if isinstance(value, list):
return [self._transform_one(v) for v in value]
if isinstance(value, tuple):
return tuple([self._transform_one(v) for v in value])
return self._transform_one(value)
def __repr__(self) -> str:
return f"AliasTransformer({self.owner},{self.aliases},{self.type})"
class FormatTransformer(_TypedTransformer):
def __init__(self, owner, format, type) -> None:
super().__init__(owner, type)
self.format = format
def transform(self, value):
if value is None:
return value
return self.type.format(value, self.format)
def __repr__(self) -> str:
return f"FormatTransformer({self.owner},{self.format},{self.type})"
class TypeTransformer(_TypedTransformer):
def __init__(self, owner, type):
super().__init__(owner, type)
def transform(self, value):
if value is None:
return value
return self.type.cast(value)
def __repr__(self) -> str:
return f"TypeTransformer({self.owner},{self.type}"
class AvailabilityChecker(Action):
def __init__(self, availability) -> None:
self.availability = availability
def execute(self, kwargs):
LOG.debug("Checking availability for %s", kwargs)
assert isinstance(kwargs, dict), kwargs
without_none = {k: v for k, v in kwargs.items() if v is not None}
self.availability.check(without_none)
return kwargs
def __repr__(self) -> str:
txt = "Availability:"
for line in self.availability.tree().split("\n"):
if line:
txt += "\n " + line
return txt
| [
"logging.getLogger",
"climetlab.vocabularies.aliases.unalias"
] | [((506, 533), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (523, 533), False, 'import logging\n'), ((2155, 2183), 'climetlab.vocabularies.aliases.unalias', 'unalias', (['self.aliases', 'value'], {}), '(self.aliases, value)\n', (2162, 2183), False, 'from climetlab.vocabularies.aliases import unalias\n')] |
from django.shortcuts import get_object_or_404
from rest_framework import viewsets, permissions
from rest_framework.decorators import detail_route, list_route
from rest_framework.response import Response
from account.models import User
from account.serializers import UserSerializer, SimpleUserSerializer, FullUserSerializer, UserOverwriteSerializer
from .permissions import IsAdminUserOrReadOnly
class UserViewSet(viewsets.ModelViewSet):
lookup_field = 'username'
queryset = User.objects.all()
permission_classes = (permissions.IsAuthenticated, IsAdminUserOrReadOnly)
def get_serializer_class(self):
if self.request.method in permissions.SAFE_METHODS:
if self.action == 'list':
return SimpleUserSerializer
else:
return FullUserSerializer
else:
if self.request.method.lower() == 'post':
return UserSerializer
else:
return UserOverwriteSerializer
| [
"account.models.User.objects.all"
] | [((489, 507), 'account.models.User.objects.all', 'User.objects.all', ([], {}), '()\n', (505, 507), False, 'from account.models import User\n')] |
# This file is part of MaixUI
# Copyright (c) sipeed.com
#
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license.php
#
import time, gc
from core import agent
from ui_canvas import ui, print_mem_free
from ui_system_info import system_info
#from ui_catch import catch
#from ui_taskbar import taskbar
from wdt import protect
class app:
ctrl = agent()
@ui.warp_template(ui.blank_draw)
@ui.warp_template(ui.grey_draw)
@ui.warp_template(ui.bg_in_draw)
@ui.warp_template(ui.anime_in_draw)
#@ui.warp_template(ui.help_in_draw)
#@ui.warp_template(taskbar.time_draw)
#@ui.warp_template(taskbar.mem_draw)
#@catch # need sipeed_button
def draw():
ui.display()
def run():
#app.ctrl.event(100, lambda *args: time.sleep(1))
app.ctrl.event(5, app.draw)
while True:
#import time
#last = time.ticks_ms()
while True:
try:
#print((int)(1000 / (time.ticks_ms() - last)), 'fps')
#last = time.ticks_ms()
app.ctrl.cycle()
protect.keep()
#time.sleep(0.1)
except KeyboardInterrupt:
protect.stop()
raise KeyboardInterrupt()
except Exception as e:
# gc.collect()
print(e)
if __name__ == "__main__":
# gc.collect()
print_mem_free()
app.run()
| [
"ui_canvas.ui.warp_template",
"wdt.protect.keep",
"ui_canvas.ui.display",
"ui_canvas.print_mem_free",
"wdt.protect.stop",
"core.agent"
] | [((379, 386), 'core.agent', 'agent', ([], {}), '()\n', (384, 386), False, 'from core import agent\n'), ((393, 424), 'ui_canvas.ui.warp_template', 'ui.warp_template', (['ui.blank_draw'], {}), '(ui.blank_draw)\n', (409, 424), False, 'from ui_canvas import ui, print_mem_free\n'), ((430, 460), 'ui_canvas.ui.warp_template', 'ui.warp_template', (['ui.grey_draw'], {}), '(ui.grey_draw)\n', (446, 460), False, 'from ui_canvas import ui, print_mem_free\n'), ((466, 497), 'ui_canvas.ui.warp_template', 'ui.warp_template', (['ui.bg_in_draw'], {}), '(ui.bg_in_draw)\n', (482, 497), False, 'from ui_canvas import ui, print_mem_free\n'), ((503, 537), 'ui_canvas.ui.warp_template', 'ui.warp_template', (['ui.anime_in_draw'], {}), '(ui.anime_in_draw)\n', (519, 537), False, 'from ui_canvas import ui, print_mem_free\n'), ((1471, 1487), 'ui_canvas.print_mem_free', 'print_mem_free', ([], {}), '()\n', (1485, 1487), False, 'from ui_canvas import ui, print_mem_free\n'), ((718, 730), 'ui_canvas.ui.display', 'ui.display', ([], {}), '()\n', (728, 730), False, 'from ui_canvas import ui, print_mem_free\n'), ((1142, 1156), 'wdt.protect.keep', 'protect.keep', ([], {}), '()\n', (1154, 1156), False, 'from wdt import protect\n'), ((1256, 1270), 'wdt.protect.stop', 'protect.stop', ([], {}), '()\n', (1268, 1270), False, 'from wdt import protect\n')] |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ORQA ops."""
from language.orqa import ops as orqa_ops
import tensorflow.compat.v1 as tf
class OrqaOpsTest(tf.test.TestCase):
def test_reader_inputs(self):
concat_inputs = orqa_ops.reader_inputs(
question_token_ids=[0, 1],
block_token_ids=[[2, 3, 4], [5, 6, 0]],
block_lengths=[3, 2],
block_token_map=[[1, 2, 5], [1, 3, 4]],
answer_token_ids=[[3, 4], [7, 0]],
answer_lengths=[2, 1],
cls_token_id=10,
sep_token_id=11,
max_sequence_len=10)
self.assertAllEqual(
concat_inputs.token_ids.numpy(),
[[10, 0, 1, 11, 2, 3, 4, 11, 0, 0], [10, 0, 1, 11, 5, 6, 11, 0, 0, 0]])
self.assertAllEqual(
concat_inputs.mask.numpy(),
[[1, 1, 1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0]])
self.assertAllEqual(
concat_inputs.segment_ids.numpy(),
[[0, 0, 0, 0, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 1, 1, 1, 0, 0, 0]])
self.assertAllEqual(
concat_inputs.block_mask.numpy(),
[[0, 0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0]])
self.assertAllEqual(concat_inputs.token_map.numpy(),
[[-1, -1, -1, -1, 1, 2, 5, -1, -1, -1],
[-1, -1, -1, -1, 1, 3, -1, -1, -1, -1]])
self.assertAllEqual(concat_inputs.gold_starts.numpy(), [[5], [-1]])
self.assertAllEqual(concat_inputs.gold_ends.numpy(), [[6], [-1]])
def test_has_answer(self):
result = orqa_ops.has_answer(blocks=["abcdefg", "hijklmn"], answers=["hij"])
self.assertAllEqual(result.numpy(), [False, True])
if __name__ == "__main__":
tf.test.main()
| [
"language.orqa.ops.reader_inputs",
"tensorflow.compat.v1.test.main",
"language.orqa.ops.has_answer"
] | [((2237, 2251), 'tensorflow.compat.v1.test.main', 'tf.test.main', ([], {}), '()\n', (2249, 2251), True, 'import tensorflow.compat.v1 as tf\n'), ((809, 1086), 'language.orqa.ops.reader_inputs', 'orqa_ops.reader_inputs', ([], {'question_token_ids': '[0, 1]', 'block_token_ids': '[[2, 3, 4], [5, 6, 0]]', 'block_lengths': '[3, 2]', 'block_token_map': '[[1, 2, 5], [1, 3, 4]]', 'answer_token_ids': '[[3, 4], [7, 0]]', 'answer_lengths': '[2, 1]', 'cls_token_id': '(10)', 'sep_token_id': '(11)', 'max_sequence_len': '(10)'}), '(question_token_ids=[0, 1], block_token_ids=[[2, 3, 4\n ], [5, 6, 0]], block_lengths=[3, 2], block_token_map=[[1, 2, 5], [1, 3,\n 4]], answer_token_ids=[[3, 4], [7, 0]], answer_lengths=[2, 1],\n cls_token_id=10, sep_token_id=11, max_sequence_len=10)\n', (831, 1086), True, 'from language.orqa import ops as orqa_ops\n'), ((2084, 2151), 'language.orqa.ops.has_answer', 'orqa_ops.has_answer', ([], {'blocks': "['abcdefg', 'hijklmn']", 'answers': "['hij']"}), "(blocks=['abcdefg', 'hijklmn'], answers=['hij'])\n", (2103, 2151), True, 'from language.orqa import ops as orqa_ops\n')] |
from common.numpy_fast import clip, interp
from selfdrive.car.tesla.values import CruiseButtons
from selfdrive.config import Conversions as CV
import time
from common.params import Params
from cereal import car
ACCEL_MAX = 0.6 #0.6m/s2 * 36 = ~ 0 -> 50mph in 6 seconds
ACCEL_MIN = -3.5
_DT = 0.05 # 20Hz in our case, since we don't want to process more than once the same radarState message
_DT_MPC = _DT
# TODO: these should end up in values.py at some point, probably variable by trim
# Accel limits
MAX_RADAR_DISTANCE = 120.0 # max distance to take in consideration radar reading
MAX_PEDAL_VALUE_AVG = 100
MAX_PEDAL_REGEN_VALUE = 0.0
MAX_BRAKE_VALUE = 1 #ibooster fully pressed BBTODO determine the exact value we need
PEDAL_HYST_GAP = (
1.0 # don't change pedal command for small oscilalitons within this value
)
# Cap the pedal to go from 0 to max in 3 seconds
PEDAL_MAX_UP = MAX_PEDAL_VALUE_AVG * _DT / 3
# Cap the pedal to go from max to 0 in 0.4 seconds
PEDAL_MAX_DOWN = MAX_PEDAL_VALUE_AVG * _DT / 0.4
# BBTODO: move the vehicle variables; maybe make them speed variable
TORQUE_LEVEL_ACC = 0.0
TORQUE_LEVEL_DECEL = -30.0
MIN_PCC_V_KPH = 0.0 #
MAX_PCC_V_KPH = 270.0
# Pull the cruise stalk twice in this many ms for a 'double pull'
STALK_DOUBLE_PULL_MS = 750
class PCCState:
# Possible state of the PCC system, following the DI_cruiseState naming scheme.
OFF = 0 # Disabled by UI (effectively never happens since button switches over to ACC mode).
STANDBY = 1 # Ready to be engaged.
ENABLED = 2 # Engaged.
NOT_READY = 9 # Not ready to be engaged due to the state of the car.
def _current_time_millis():
return int(round(time.time() * 1000))
# this is for the pedal cruise control
class PCCController:
def __init__(self, longcontroller,tesla_can,pedalcan):
self.LongCtr = longcontroller
self.tesla_can = tesla_can
self.human_cruise_action_time = 0
self.pcc_available = self.prev_pcc_available = False
self.pedal_timeout_frame = 0
self.accelerator_pedal_pressed = self.prev_accelerator_pedal_pressed = False
self.automated_cruise_action_time = 0
self.last_angle = 0.0
self.lead_1 = None
self.last_update_time = 0
self.enable_pedal_cruise = False
self.stalk_pull_time_ms = 0
self.prev_stalk_pull_time_ms = -1000
self.prev_cruise_state = 0
self.prev_cruise_buttons = CruiseButtons.IDLE
self.pedal_speed_kph = 0.0
self.speed_limit_kph = 0.0
self.prev_speed_limit_kph = 0.0
self.pedal_idx = 0
self.pedal_steady = 0.0
self.prev_tesla_accel = 0.0
self.prev_tesla_pedal = 0.0
self.prev_tesla_brake = 0.0
self.torqueLevel_last = 0.0
self.prev_v_ego = 0.0
self.PedalForZeroTorque = (
18.0 # starting number for a S85, adjusts down automatically
)
self.lastTorqueForPedalForZeroTorque = TORQUE_LEVEL_DECEL
self.v_pid = 0.0
self.a_pid = 0.0
self.last_output_gb = 0.0
self.last_speed_kph = None
# for smoothing the changes in speed
self.v_acc_start = 0.0
self.a_acc_start = 0.0
self.v_acc = 0.0
self.v_acc_sol = 0.0
self.v_acc_future = 0.0
self.a_acc = 0.0
self.a_acc_sol = 0.0
self.v_cruise = 0.0
self.a_cruise = 0.0
# when was radar data last updated?
self.lead_last_seen_time_ms = 0
self.continuous_lead_sightings = 0
self.params = Params()
self.pedalcan = pedalcan
self.madMax = False
if longcontroller.madMax:
self.madMax = True
def update_stat(self, CS, frame):
if not self.LongCtr.CP.openpilotLongitudinalControl:
self.pcc_available = False
return []
if not CS.enablePedal:
self.pcc_available = False
return []
self._update_pedal_state(CS, frame)
can_sends = []
if not self.pcc_available:
timed_out = frame >= self.pedal_timeout_frame
if timed_out or CS.pedal_interceptor_state > 0:
if frame % 50 == 0:
# send reset command
idx = self.pedal_idx
self.pedal_idx = (self.pedal_idx + 1) % 16
can_sends.append(
self.tesla_can.create_pedal_command_msg(0, 0, idx, self.pedalcan)
)
return can_sends
#prev_enable_pedal_cruise = self.enable_pedal_cruise
# disable on brake
if CS.realBrakePressed and self.enable_pedal_cruise:
CS.longCtrlEvent = car.CarEvent.EventName.pccDisabled
self.enable_pedal_cruise = False
# process any stalk movement
curr_time_ms = _current_time_millis()
speed_uom_kph = 1.0
if CS.speed_units == "MPH":
speed_uom_kph = CV.MPH_TO_KPH
if (
CS.cruise_buttons == CruiseButtons.MAIN
and self.prev_cruise_buttons != CruiseButtons.MAIN
):
self.prev_stalk_pull_time_ms = self.stalk_pull_time_ms
self.stalk_pull_time_ms = curr_time_ms
double_pull = (
self.stalk_pull_time_ms - self.prev_stalk_pull_time_ms
< STALK_DOUBLE_PULL_MS
)
ready = CS.enablePedal
if ready and double_pull:
# A double pull enables ACC. updating the max ACC speed if necessary.
if not self.enable_pedal_cruise:
CS.longCtrlEvent = car.CarEvent.EventName.pccEnabled
self.enable_pedal_cruise = True
# Increase PCC speed to match current, if applicable.
# We round the target speed in the user's units of measurement to avoid jumpy speed readings
current_speed_kph_uom_rounded = (
int(CS.out.vEgo * CV.MS_TO_KPH / speed_uom_kph + 0.5) * speed_uom_kph
)
self.pedal_speed_kph = max(
current_speed_kph_uom_rounded, self.speed_limit_kph
)
# Handle pressing the cancel button.
elif CS.cruise_buttons == CruiseButtons.CANCEL:
if self.enable_pedal_cruise:
CS.longCtrlEvent = car.CarEvent.EventName.pccDisabled
self.enable_pedal_cruise = False
self.pedal_speed_kph = 0.0
self.stalk_pull_time_ms = 0
self.prev_stalk_pull_time_ms = -1000
# Handle pressing up and down buttons.
elif self.enable_pedal_cruise and CS.cruise_buttons != self.prev_cruise_buttons:
# Real stalk command while PCC is already enabled. Adjust the max PCC speed if necessary.
# We round the target speed in the user's units of measurement to avoid jumpy speed readings
actual_speed_kph_uom_rounded = (
int(CS.out.vEgo * CV.MS_TO_KPH / speed_uom_kph + 0.5) * speed_uom_kph
)
if CS.cruise_buttons == CruiseButtons.RES_ACCEL:
self.pedal_speed_kph = (
max(self.pedal_speed_kph, actual_speed_kph_uom_rounded)
+ speed_uom_kph
)
elif CS.cruise_buttons == CruiseButtons.RES_ACCEL_2ND:
self.pedal_speed_kph = (
max(self.pedal_speed_kph, actual_speed_kph_uom_rounded)
+ 5 * speed_uom_kph
)
elif CS.cruise_buttons == CruiseButtons.DECEL_SET:
self.pedal_speed_kph = self.pedal_speed_kph - speed_uom_kph
elif CS.cruise_buttons == CruiseButtons.DECEL_2ND:
self.pedal_speed_kph = self.pedal_speed_kph - 5 * speed_uom_kph
# Clip PCC speed between 0 and 170 KPH.
self.pedal_speed_kph = clip(
self.pedal_speed_kph, MIN_PCC_V_KPH, MAX_PCC_V_KPH
)
# If something disabled cruise control, disable PCC too
elif self.enable_pedal_cruise and CS.cruise_state and not CS.enablePedal:
self.enable_pedal_cruise = False
CS.longCtrlEvent = car.CarEvent.EventName.pccDisabled
# A single pull disables PCC (falling back to just steering). Wait some time
# in case a double pull comes along.
elif (
self.enable_pedal_cruise
and curr_time_ms - self.stalk_pull_time_ms > STALK_DOUBLE_PULL_MS
and self.stalk_pull_time_ms - self.prev_stalk_pull_time_ms
> STALK_DOUBLE_PULL_MS
):
self.enable_pedal_cruise = False
CS.longCtrlEvent = car.CarEvent.EventName.pccDisabled
# Update prev state after all other actions.
self.prev_cruise_buttons = CS.cruise_buttons
self.prev_cruise_state = CS.cruise_state
return can_sends
def update_pdl(
self,
enabled,
CS,
frame,
actuators,
v_target,
pcm_override,
speed_limit_ms,
set_speed_limit_active,
speed_limit_offset,
alca_enabled,
radSt
):
if not self.LongCtr.CP.openpilotLongitudinalControl:
return 0.0, 0.0, -1, -1
if not CS.enablePedal:
return 0.0, 0.0, -1, -1
idx = self.pedal_idx
self.prev_speed_limit_kph = self.speed_limit_kph
######################################################################################
# Determine pedal "zero"
#
# save position for cruising (zero acc, zero brake, no torque) when we are above 10 MPH
######################################################################################
if (
CS.torqueLevel < TORQUE_LEVEL_ACC
and CS.torqueLevel > TORQUE_LEVEL_DECEL
and CS.out.vEgo >= 10.0 * CV.MPH_TO_MS
and abs(CS.torqueLevel) < abs(self.lastTorqueForPedalForZeroTorque)
):
self.PedalForZeroTorque = self.prev_tesla_pedal
self.lastTorqueForPedalForZeroTorque = CS.torqueLevel
# print ("Detected new Pedal For Zero Torque at %s" % (self.PedalForZeroTorque))
# print ("Torque level at detection %s" % (CS.torqueLevel))
# print ("Speed level at detection %s" % (CS.out.vEgo * CV.MS_TO_MPH))
if set_speed_limit_active and speed_limit_ms > 0:
self.speed_limit_kph = (speed_limit_ms + speed_limit_offset) * CV.MS_TO_KPH
if int(self.prev_speed_limit_kph) != int(self.speed_limit_kph):
self.pedal_speed_kph = self.speed_limit_kph
else: # reset internal speed limit, so double pull doesn't set higher speed than current (e.g. after leaving the highway)
self.speed_limit_kph = 0.0
self.pedal_idx = (self.pedal_idx + 1) % 16
if not self.pcc_available or not enabled:
return 0.0, 0.0, 0, idx
##############################################################
# This mode uses the longitudinal MPC built in OP
#
# we use the values from actuators.accel
##############################################################
ZERO_ACCEL = self.PedalForZeroTorque
REGEN_DECEL = -0.5 #BB needs to be calculated based on regen available
if CS.out.vEgo < 5 * CV.MPH_TO_MS:
ZERO_ACCEL = 0
MAX_PEDAL_BP = [0., 5., 20., 30., 40]
MAX_PEDAL_V = [65. , 75., 85., 100., 120.]
if self.madMax:
MAX_PEDAL_V = [65. , 85., 105., 120., 140.]
MAX_PEDAL_VALUE = interp(CS.out.vEgo, MAX_PEDAL_BP, MAX_PEDAL_V)
ACCEL_LOOKUP_BP = [REGEN_DECEL, 0., ACCEL_MAX]
ACCEL_LOOKUP_V = [MAX_PEDAL_REGEN_VALUE, ZERO_ACCEL, MAX_PEDAL_VALUE]
BRAKE_LOOKUP_BP = [ACCEL_MIN, REGEN_DECEL]
BRAKE_LOOKUP_V = [MAX_BRAKE_VALUE, 0.]
enable_pedal = 1.0 if self.enable_pedal_cruise else 0.0
tesla_pedal = int(round(interp(actuators.accel/2, ACCEL_LOOKUP_BP, ACCEL_LOOKUP_V)))
#only do pedal hysteresis when very close to speed set
if abs(CS.out.vEgo * CV.MS_TO_KPH - self.pedal_speed_kph) < 0.5:
tesla_pedal = self.pedal_hysteresis(tesla_pedal, enable_pedal)
if CS.out.vEgo < 0.1 and actuators.accel < 0.01:
#hold brake pressed at when standstill
#BBTODO: show HOLD indicator in IC with integration
tesla_brake = 0.26
else:
tesla_brake = interp(actuators.accel, BRAKE_LOOKUP_BP, BRAKE_LOOKUP_V)
if CS.has_ibooster_ecu and CS.brakeUnavailable:
CS.longCtrlEvent = car.CarEvent.EventName.iBoosterBrakeNotOk
tesla_pedal = clip(tesla_pedal, self.prev_tesla_pedal - PEDAL_MAX_DOWN, self.prev_tesla_pedal + PEDAL_MAX_UP)
self.prev_tesla_brake = tesla_brake * enable_pedal
self.torqueLevel_last = CS.torqueLevel
self.prev_tesla_pedal = tesla_pedal * enable_pedal
self.prev_v_ego = CS.out.vEgo
return self.prev_tesla_pedal, self.prev_tesla_brake, enable_pedal, idx
def pedal_hysteresis(self, pedal, enabled):
# for small accel oscillations within PEDAL_HYST_GAP, don't change the command
if not enabled:
# send 0 when disabled, otherwise acc faults
self.pedal_steady = 0.0
elif pedal > self.pedal_steady + PEDAL_HYST_GAP:
self.pedal_steady = pedal - PEDAL_HYST_GAP
elif pedal < self.pedal_steady - PEDAL_HYST_GAP:
self.pedal_steady = pedal + PEDAL_HYST_GAP
return self.pedal_steady
def _update_pedal_state(self, CS, frame):
if CS.pedal_idx != CS.prev_pedal_idx:
# time out pedal after 500ms without receiving a new CAN message from it
self.pedal_timeout_frame = frame + 50
self.prev_pcc_available = self.pcc_available
pedal_ready = (
frame < self.pedal_timeout_frame and CS.pedal_interceptor_state == 0
)
#acc_disabled = CS.enablePedal or CruiseState.is_off(CS.cruise_state)
# Mark pedal unavailable while traditional cruise is on.
self.pcc_available = pedal_ready and CS.enablePedal
| [
"common.numpy_fast.clip",
"common.params.Params",
"time.time",
"common.numpy_fast.interp"
] | [((3564, 3572), 'common.params.Params', 'Params', ([], {}), '()\n', (3570, 3572), False, 'from common.params import Params\n'), ((11650, 11696), 'common.numpy_fast.interp', 'interp', (['CS.out.vEgo', 'MAX_PEDAL_BP', 'MAX_PEDAL_V'], {}), '(CS.out.vEgo, MAX_PEDAL_BP, MAX_PEDAL_V)\n', (11656, 11696), False, 'from common.numpy_fast import clip, interp\n'), ((12750, 12850), 'common.numpy_fast.clip', 'clip', (['tesla_pedal', '(self.prev_tesla_pedal - PEDAL_MAX_DOWN)', '(self.prev_tesla_pedal + PEDAL_MAX_UP)'], {}), '(tesla_pedal, self.prev_tesla_pedal - PEDAL_MAX_DOWN, self.\n prev_tesla_pedal + PEDAL_MAX_UP)\n', (12754, 12850), False, 'from common.numpy_fast import clip, interp\n'), ((12541, 12597), 'common.numpy_fast.interp', 'interp', (['actuators.accel', 'BRAKE_LOOKUP_BP', 'BRAKE_LOOKUP_V'], {}), '(actuators.accel, BRAKE_LOOKUP_BP, BRAKE_LOOKUP_V)\n', (12547, 12597), False, 'from common.numpy_fast import clip, interp\n'), ((1674, 1685), 'time.time', 'time.time', ([], {}), '()\n', (1683, 1685), False, 'import time\n'), ((12026, 12086), 'common.numpy_fast.interp', 'interp', (['(actuators.accel / 2)', 'ACCEL_LOOKUP_BP', 'ACCEL_LOOKUP_V'], {}), '(actuators.accel / 2, ACCEL_LOOKUP_BP, ACCEL_LOOKUP_V)\n', (12032, 12086), False, 'from common.numpy_fast import clip, interp\n'), ((7912, 7968), 'common.numpy_fast.clip', 'clip', (['self.pedal_speed_kph', 'MIN_PCC_V_KPH', 'MAX_PCC_V_KPH'], {}), '(self.pedal_speed_kph, MIN_PCC_V_KPH, MAX_PCC_V_KPH)\n', (7916, 7968), False, 'from common.numpy_fast import clip, interp\n')] |
# Ising Model in Python.
# 28-03-2019.
# Written by <NAME>.
# Python 3.7.
# NumPy has been installed and used in this project.
# Numba has been installed and used in this project.
# Tools used: Visual Studio Code, GitHub Desktop.
from Input_param_reader import Ising_input # Python Function in the same directory as the Main.py File
from Montecarlo import Monte_Carlo # Python Function in the same directory as the Main.py File
from numba import jit # Python Package to be downloaded manually
from Path import Output_Path_Set # Python Function to create output folder by date and time and set it as working directory
import random
import numpy
import time
import math
import csv
import os
time_start = time.perf_counter() # For Program Runtime Profiling. Time.clock() has been depreciated
i=0 # Dummy Integer
j=0 # Dummy Integer
k=0 # Dummy Integer
m=0 # Dummy Integer
n=0 # Dummy Integer
d=0 # Dummy Integer
nrows=0 # Number of Rows in A
ncols=0 # Number of Columns in A
nlayers=0 # Number of Layers in Quasi 3D Matrix
temp=0 # Temperature
beta=0 # Inverse Temperature
ConfigType=0 # Starting Configuration type
npass=0 # number of passes for MC algorithm
ipass=0 # the current pass number
nequil=0 # number of equilibration steps
trial_spin=0 # values of changed spin
high_temp=0 # starting temp for scan
low_temp=0 # final temp for scan
temp_interval=0 # interval between scan points
nscans=0 # number of scans (each at diff T)
iscan=1 # current number
iscan1=0 # current number
DeltaU=0 # change in energy between 2 configs
log_eta=0 # log of random number to compare to
magnetization=0 # magnetization of all spins in lattice
magnetization_ave=0 # cumulative average magnetization
magnetization2_ave=0 # cumulative average of mag. squared
energy=0 # energy of all spins in lattice
energy_ave=0 # cumulative average of energy
energy2_ave=0 # cumulative average of energy squared
output_count=0 # Number of times things have been added to averages
ran0=0 # T B C
iterator=0 # to be used with for loop / dummy operation
iterator2=0 # to be used for loop / dummy operations
print("\n")
print("MONTE CARLO QUASI 3D ISING MODEL\n")
print("Monte Carlo Statistics for Quasi 3D Ising Model with periodic boundary conditions\n")
print("The critical temperature is approximately 2.3, as seen on Chandler p. 123.\n")
# This section is for reading input parameters and assigning it to global variables
nrows, ncols, nlayers, npass, nequil, high_temp, low_temp, temp_interval, ConfigType=Ising_input()
# End of input parameter reader section
iterator = nrows # Setting iterator to be used as number of rows value
iterator2 = ncols # Setting iterator to be used as number of columns value
if(nrows%2!=0):
iterator+=1
if(ncols%2!=0):
iterator2+=1
print("Running program for %d rows, %d columns and %d layers\n" % (iterator,iterator2,nlayers))
# Matrix arrays are stored as a[depth,row,column] manner in Numpy
a=numpy.ones((nlayers,iterator,iterator2),dtype=int)
start_matrix=a
# Functions
# Function to generate uniform random numbers
@jit(nopython=True)
def pick_random(ran0):
ran0=round(random.uniform(0,1),12)
return ran0
# End of function
# Function to obtain magnetization value
@jit(nopython=True)
def magnetization_sum(nlayers,iterator,iterator2,a):
return numpy.sum(a[0:nlayers,1:iterator-1,1:iterator-1])/(nlayers*iterator*iterator2*1.0)
# End of function
path=Output_Path_Set()
input_config=open("Input_Config.csv","w+") # To write input configuration to output folder in a seperate file for future use.
input_config.write("Number of Rows :"+str(nrows))
input_config.write("\nNumber of Columns :"+str(ncols))
input_config.write("\nValue of npass :"+str(npass))
input_config.write("\nValue of nequil :"+str(nequil))
input_config.write("\nValue of high_temp :"+str(high_temp))
input_config.write("\nValue of low_temp :"+str(low_temp))
input_config.write("\nValue of temp_interval :"+str(temp_interval))
input_config.write("\nConfigType :"+str(ConfigType))
input_config.close()
spin_attribute = open("spin_array_attribute.csv", "w")
spin_attribute.write("number of rows :"+str(nrows))
spin_attribute.write("\nnumber of columns :"+str(ncols))
spin_attribute.write("\nnumber of layers :"+str(nlayers))
nscans=int((high_temp-low_temp)/temp_interval+1) # Determining the number of scans
spin_attribute.write("\nnumber of scans :"+str(nscans))
spin_attribute.write("\n2")
spin_attribute.close()
spin = open("spin_array.csv","w+")
spin_writer=csv.writer(spin)
spin_row=["temp","i","j","k","a[i,j]"]
spin_writer.writerow(spin_row)
magnet = open("magnetization.csv","w+")
magnet.write("Temp , Ave_magnetization , Ave_magnetization^2 , Susceptibility")
magnet.write("\n")
magnet_writer=csv.writer(magnet)
energyObj = open("energy.csv","w+")
energyObj.write("Temp , Ave_energy , Ave_energy^2 , C_v")
energyObj.write("\n")
energy_writer=csv.writer(energyObj)
# Section for choosing Configtype
if(ConfigType==1):
# Checkerboard Pattern Matrix
start_matrix[1::2,::2,::2] = -1 # Depth
start_matrix[::2,1::2,::2] = -1 # Row
start_matrix[::2,::2,1::2] = -1 # Column
elif(ConfigType==2):
# Interface Pattern Matrix
for k in range(0,nlayers): # Depth
for i in range(0,iterator): # Row
for j in range(0,iterator2): # Column
if(j>=iterator2/2):
dummyval=-1
else:
dummyval=1
start_matrix[:,:,j]=dummyval
dummyval=0
elif(ConfigType==3):
# Unequal Interface Pattern Matrix
for k in range(0,nlayers): # Depth
for i in range(0,iterator): # Row
for j in range(0,iterator2): # Column
if(j>=iterator2/4):
dummyval=-1
else:
dummyval=1
start_matrix[:,:,j]=dummyval
dummyval=0
elif(ConfigType==4):
# Random Pattern Matrix
for k in range(0,nlayers): # Depth
for i in range(0,iterator): # Row
for j in range(0,iterator2): # Column
dummy=pick_random(ran0)
if(dummy>=0.5):
dummy=1
else:
dummy=-1
start_matrix[k,i,j]=dummy
else:
print("Error! Check ConfigType parameter in ising.in")
# Scan Loop
for iscan in range(1,nscans+1): # Main for loop
temp = float(round((high_temp - temp_interval*(iscan-1)), 3)) # rounding off to two decimal places for optimisation purposes
print("Running Program for Temperature : "+str(temp)+"\n")
beta = 1.0/temp # Reseting variables to initial values
output_count = 0
energy_ave = 0.0
energy2_ave = 0.0
magnetization_ave = 0.0
magnetization2_ave = 0.0
a=start_matrix # Reseting matrix a to initial congiguration
# Main loop containing Monte Carlo algorithm
m , n , d , i , j , k , ipass , npass , nequil , iterator , iterator2 , nlayers , ran0 , a , magnetization , magnetization_ave , magnetization2_ave , energy , beta , DeltaU , output_count , energy_ave , energy2_ave = Monte_Carlo( m , n , d , i , j , k , ipass , npass , nequil , iterator , iterator2 , nlayers , ran0 , a , magnetization , magnetization_ave , magnetization2_ave , energy , beta , DeltaU , output_count,energy_ave,energy2_ave )
# End Monte carlo pases
for k in range(0,nlayers): # Depth
for i in range(0,iterator): # Rows
for j in range(0,iterator2): # Columns
spin_row=[temp,k,i,j,a[k,i,j]]
spin_writer.writerow(spin_row)
magnet_row=[temp , abs(magnetization_ave/output_count) , magnetization2_ave/output_count , beta*(magnetization2_ave/output_count - (magnetization_ave/output_count)**2)]
magnet_writer.writerow(magnet_row)
energy_row=[temp , energy_ave/output_count , energy2_ave/output_count , (beta**2)*(energy2_ave/output_count - (energy_ave/output_count)**2)]
energy_writer.writerow(energy_row)
# End Scan Loop
print("\nProgram completed.\n\nOpen folder",path,"to view output.\n\n")
spin.close() # Closing open files.This part is important as open files may not allow writing of new data
magnet.close()
energyObj.close()
Profiler = open("Program_Profile.csv","a+")
time_elapsed=(time.perf_counter()-time_start) # Program execuion time profiler
time_elapsed=round(time_elapsed,5)
Profiler.write("\nProgram FInished running in "+str(time_elapsed)+" Seconds on "+str(time.ctime()))
Profiler.close()
# THE END | [
"Path.Output_Path_Set",
"random.uniform",
"time.ctime",
"numpy.ones",
"Montecarlo.Monte_Carlo",
"csv.writer",
"Input_param_reader.Ising_input",
"time.perf_counter",
"numpy.sum",
"numba.jit"
] | [((806, 825), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (823, 825), False, 'import time\n'), ((3146, 3159), 'Input_param_reader.Ising_input', 'Ising_input', ([], {}), '()\n', (3157, 3159), False, 'from Input_param_reader import Ising_input\n'), ((3604, 3657), 'numpy.ones', 'numpy.ones', (['(nlayers, iterator, iterator2)'], {'dtype': 'int'}), '((nlayers, iterator, iterator2), dtype=int)\n', (3614, 3657), False, 'import numpy\n'), ((3740, 3758), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (3743, 3758), False, 'from numba import jit\n'), ((3915, 3933), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (3918, 3933), False, 'from numba import jit\n'), ((4108, 4125), 'Path.Output_Path_Set', 'Output_Path_Set', ([], {}), '()\n', (4123, 4125), False, 'from Path import Output_Path_Set\n'), ((5323, 5339), 'csv.writer', 'csv.writer', (['spin'], {}), '(spin)\n', (5333, 5339), False, 'import csv\n'), ((5564, 5582), 'csv.writer', 'csv.writer', (['magnet'], {}), '(magnet)\n', (5574, 5582), False, 'import csv\n'), ((5714, 5735), 'csv.writer', 'csv.writer', (['energyObj'], {}), '(energyObj)\n', (5724, 5735), False, 'import csv\n'), ((8326, 8539), 'Montecarlo.Monte_Carlo', 'Monte_Carlo', (['m', 'n', 'd', 'i', 'j', 'k', 'ipass', 'npass', 'nequil', 'iterator', 'iterator2', 'nlayers', 'ran0', 'a', 'magnetization', 'magnetization_ave', 'magnetization2_ave', 'energy', 'beta', 'DeltaU', 'output_count', 'energy_ave', 'energy2_ave'], {}), '(m, n, d, i, j, k, ipass, npass, nequil, iterator, iterator2,\n nlayers, ran0, a, magnetization, magnetization_ave, magnetization2_ave,\n energy, beta, DeltaU, output_count, energy_ave, energy2_ave)\n', (8337, 8539), False, 'from Montecarlo import Monte_Carlo\n'), ((9609, 9628), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (9626, 9628), False, 'import time\n'), ((3802, 3822), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (3816, 3822), False, 'import random\n'), ((3998, 4053), 'numpy.sum', 'numpy.sum', (['a[0:nlayers, 1:iterator - 1, 1:iterator - 1]'], {}), '(a[0:nlayers, 1:iterator - 1, 1:iterator - 1])\n', (4007, 4053), False, 'import numpy\n'), ((9803, 9815), 'time.ctime', 'time.ctime', ([], {}), '()\n', (9813, 9815), False, 'import time\n')] |
from concat.level1.typecheck.types import (
IndividualType,
SequenceVariable,
StackItemType,
)
from hypothesis.strategies import (
SearchStrategy,
booleans,
composite,
from_type,
iterables,
lists,
register_type_strategy,
sampled_from,
)
from typing import (
Iterable,
Sequence,
Type,
)
def _iterable_strategy(type: Type[Iterable]) -> SearchStrategy[Iterable]:
@composite
def strategy(draw) -> Iterable:
if hasattr(type, '__args__') and type.__args__ == (StackItemType,):
list = []
if draw(booleans()):
list.append(draw(from_type(SequenceVariable)))
list += draw(lists(from_type(IndividualType), max_size=10))
return list
cls = draw(sampled_from([list, tuple, set, frozenset]))
return cls(
draw(iterables(getattr(type, '__args__', object), max_size=10))
)
return strategy()
def _sequence_strategy(type: Type[Sequence]) -> SearchStrategy[Sequence]:
@composite
def strategy(draw) -> Sequence:
cls = draw(sampled_from([list, tuple]))
return cls(draw(_iterable_strategy(type)))
return strategy()
register_type_strategy(Iterable, _iterable_strategy)
register_type_strategy(Sequence, _sequence_strategy)
| [
"hypothesis.strategies.from_type",
"hypothesis.strategies.sampled_from",
"hypothesis.strategies.register_type_strategy",
"hypothesis.strategies.booleans"
] | [((1203, 1255), 'hypothesis.strategies.register_type_strategy', 'register_type_strategy', (['Iterable', '_iterable_strategy'], {}), '(Iterable, _iterable_strategy)\n', (1225, 1255), False, 'from hypothesis.strategies import SearchStrategy, booleans, composite, from_type, iterables, lists, register_type_strategy, sampled_from\n'), ((1256, 1308), 'hypothesis.strategies.register_type_strategy', 'register_type_strategy', (['Sequence', '_sequence_strategy'], {}), '(Sequence, _sequence_strategy)\n', (1278, 1308), False, 'from hypothesis.strategies import SearchStrategy, booleans, composite, from_type, iterables, lists, register_type_strategy, sampled_from\n'), ((778, 821), 'hypothesis.strategies.sampled_from', 'sampled_from', (['[list, tuple, set, frozenset]'], {}), '([list, tuple, set, frozenset])\n', (790, 821), False, 'from hypothesis.strategies import SearchStrategy, booleans, composite, from_type, iterables, lists, register_type_strategy, sampled_from\n'), ((1098, 1125), 'hypothesis.strategies.sampled_from', 'sampled_from', (['[list, tuple]'], {}), '([list, tuple])\n', (1110, 1125), False, 'from hypothesis.strategies import SearchStrategy, booleans, composite, from_type, iterables, lists, register_type_strategy, sampled_from\n'), ((587, 597), 'hypothesis.strategies.booleans', 'booleans', ([], {}), '()\n', (595, 597), False, 'from hypothesis.strategies import SearchStrategy, booleans, composite, from_type, iterables, lists, register_type_strategy, sampled_from\n'), ((694, 719), 'hypothesis.strategies.from_type', 'from_type', (['IndividualType'], {}), '(IndividualType)\n', (703, 719), False, 'from hypothesis.strategies import SearchStrategy, booleans, composite, from_type, iterables, lists, register_type_strategy, sampled_from\n'), ((633, 660), 'hypothesis.strategies.from_type', 'from_type', (['SequenceVariable'], {}), '(SequenceVariable)\n', (642, 660), False, 'from hypothesis.strategies import SearchStrategy, booleans, composite, from_type, iterables, lists, register_type_strategy, sampled_from\n')] |
'''
===============================================================================
ENGR 133 Program Description
This function takes an image array, a size number and a blur value and returns an array that contains a blurred image
Assignment Information
Assignment: Python Group Project
Author: <NAME>, <EMAIL>
<NAME>, <EMAIL>
<NAME>, <EMAIL>
<NAME>, <EMAIL>
Team ID: 002-10
===============================================================================
'''
## UNIMPLEMENTED IN MAIN
import math
import numpy as np
def process(imageData, blur, size):
channelCount = len(imageData[0][0])#determine if RGB or RGBA
rowCount = len(imageData)#for progress display
if(size%2 == 0):
size += 1
kernel = getKernel(size, blur)
outimage = np.empty([len(imageData), len(imageData[0]), channelCount])#create empty image with same dimensions as original
for i in range(0, len(imageData)):
print(f"Row {i}/{rowCount}")
for j in range(0, len(imageData[0])):#for each pixel in image
weightedAvg = np.zeros(channelCount)
for h in range(0, len(kernel)):
for k in range(0, len(kernel[0])):#for each number in kernel
dx = -len(kernel)//2 + h#relative change in pixel x
dy = -len(kernel)//2 + k#relative change in pixel y
if i+dx >=0 and i+dx < len(imageData):#if pixel is out of bounds, extend the image
pixelX = i+dx
elif i+dx < 0:
pixelX = 0
elif i+dx >= len(imageData):
pixelX = -1
if j+dy >= 0 and j+dy < len(imageData[0]):
pixelY = j+dy
elif j+dy < 0:
pixelY = 0
elif j+dy > len(imageData[0]):
pixelY = -1
pixel = imageData[pixelX][pixelY]#get pixel data for target pixel
weightedAvg += np.multiply(kernel[h][k], pixel)#sum the corresponding ARGB or RGB numbers
outimage[i][j] = weightedAvg
return outimage
def getKernel(size, stddev):#odd integer size of square kernel, spread in both directions
kernel = []
center = (int(size))
for j in range(0, size):
row = []
for i in range(0, size):
x = i-center
y = j-center
row.append(gaussian2D(x, y, stddev))
kernel.append(row)
kernel = normalizeArray(kernel)
return kernel
def gaussian2D(x, y, stddev):#return the gaussian2D function evaluated at x and y
A = 1/(2*math.pi*stddev*stddev)
return A*math.exp(-(x*x+y*y)/(2*stddev*stddev))
def normalizeArray(array):#2D array input
total = 0
for i in range(len(array[0])):
for j in range(len(array)):
total += array[i][j]
#print(sum(sum(array,[])))
for i in range(len(array[0])):
for j in range(len(array)):
array[i][j] /= total
#print(sum(sum(array,[])))
return(array)
| [
"math.exp",
"numpy.multiply",
"numpy.zeros"
] | [((2771, 2821), 'math.exp', 'math.exp', (['(-(x * x + y * y) / (2 * stddev * stddev))'], {}), '(-(x * x + y * y) / (2 * stddev * stddev))\n', (2779, 2821), False, 'import math\n'), ((1129, 1151), 'numpy.zeros', 'np.zeros', (['channelCount'], {}), '(channelCount)\n', (1137, 1151), True, 'import numpy as np\n'), ((2109, 2141), 'numpy.multiply', 'np.multiply', (['kernel[h][k]', 'pixel'], {}), '(kernel[h][k], pixel)\n', (2120, 2141), True, 'import numpy as np\n')] |
from .. import db
from app.main.model.token_blacklist_model import TokenBlackList
from .. import jwt
from app.main.schema.token_blacklist_schema import tokens_blacklist_schema
from datetime import datetime
@jwt.token_in_blocklist_loader
def check_if_token_revoked(jwt_header, jwt_payload):
""" Callback function to revoke tokens. """
jti = jwt_payload["jti"]
token = db.session.query(TokenBlackList.id).filter_by(jti=jti).scalar()
return token is not None
def delete_expired_tokens():
""" Method to delete expired tokens from blacklist. """
jwt_tokens = TokenBlackList.query.all()
jwt_tokens_list = tokens_blacklist_schema.dump(jwt_tokens)
now_str = datetime.utcnow().isoformat(' ', 'seconds')
now = datetime.strptime(now_str, '%Y-%m-%d %H:%M:%S')
for jwt_token in jwt_tokens_list:
if now > datetime.strptime(jwt_token['expiration_time'], '%Y-%m-%dT%H:%M:%S'):
TokenBlackList.query.filter_by(id=jwt_token['id']).delete()
db.session.commit()
| [
"app.main.schema.token_blacklist_schema.tokens_blacklist_schema.dump",
"app.main.model.token_blacklist_model.TokenBlackList.query.all",
"datetime.datetime.utcnow",
"datetime.datetime.strptime",
"app.main.model.token_blacklist_model.TokenBlackList.query.filter_by"
] | [((580, 606), 'app.main.model.token_blacklist_model.TokenBlackList.query.all', 'TokenBlackList.query.all', ([], {}), '()\n', (604, 606), False, 'from app.main.model.token_blacklist_model import TokenBlackList\n'), ((629, 669), 'app.main.schema.token_blacklist_schema.tokens_blacklist_schema.dump', 'tokens_blacklist_schema.dump', (['jwt_tokens'], {}), '(jwt_tokens)\n', (657, 669), False, 'from app.main.schema.token_blacklist_schema import tokens_blacklist_schema\n'), ((738, 785), 'datetime.datetime.strptime', 'datetime.strptime', (['now_str', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(now_str, '%Y-%m-%d %H:%M:%S')\n", (755, 785), False, 'from datetime import datetime\n'), ((684, 701), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (699, 701), False, 'from datetime import datetime\n'), ((841, 909), 'datetime.datetime.strptime', 'datetime.strptime', (["jwt_token['expiration_time']", '"""%Y-%m-%dT%H:%M:%S"""'], {}), "(jwt_token['expiration_time'], '%Y-%m-%dT%H:%M:%S')\n", (858, 909), False, 'from datetime import datetime\n'), ((923, 973), 'app.main.model.token_blacklist_model.TokenBlackList.query.filter_by', 'TokenBlackList.query.filter_by', ([], {'id': "jwt_token['id']"}), "(id=jwt_token['id'])\n", (953, 973), False, 'from app.main.model.token_blacklist_model import TokenBlackList\n')] |
import math
import re
import subprocess
# from math import *
import sys
with open('response.plot', "r") as f:
plotTemplate = f.read()
with open('response_multi.plot', "r") as f:
plotTemplateMulti = f.read()
indexhtml = '<head></head><body>'
def mathDict():
d = {
"pow": math.pow, "cos": math.cos, "sin": math.sin, "asin": math.asin, "acos": math.acos, "pi": math.pi,
"tan": math.tan, "tanh": math.tanh, "atan": math.atan, "sqrt": math.sqrt, "log": math.log,
"log10": math.log10, "exp": math.exp, "cosh": math.cosh, "acosh": math.acosh,
"erf": math.erf
}
return d
def similarList(l: list, s: list) -> bool:
for i in range(len(l)):
if l[i] is None:
return False
if s[i] is None:
return False
if math.fabs(l[i] - s[i]) > 0.0001:
return False
return True
class Curve:
def __init__(self):
self.funcPairs = []
self.results = []
def createCrossPlot(self, name: str, funcMath1: str, funcMath2: str, precondition1: str, precondition2: str):
global indexhtml
print(name)
func1 = funcMath1
preCon1Str = ""
preCon2Str = ""
if precondition1 != "":
preCon1Str = "a={}\\n".format(precondition1)
if precondition2 != "":
preCon2Str = "a={}\\n".format(precondition2)
if funcMath2 is not None:
func2 = funcMath2
title = "{}nx={}\\n{}y={}".format(preCon1Str, funcMath1, preCon2Str, funcMath2)
basic = 0
else:
func2 = None
title = "{}y={}".format(preCon1Str, funcMath1)
basic = 1
deltaIntegral = 0
lastValue = 0
integral = 0.0
startPoints = []
with open("curve.csv", "w") as fOut:
for i in range(-100, 101):
x = i / 100.0
y = None
try:
if precondition1 is not None:
if precondition1 != "":
aFactor = eval(precondition1)
else:
aFactor = 1
y = eval(func1, mathDict(), {"a": aFactor, "x": x})
if funcMath2 is not None:
if precondition2 is not None:
if precondition2 != "":
aFactor = eval(precondition2)
else:
aFactor = 1
y = eval(func2, mathDict(), {"a": aFactor, "x": y})
integral += y
deltaIntegral += y - lastValue
lastValue = y
fOut.write("{}\t{}\n".format(x, y))
except Exception as e:
print(e)
if i in [-100, 0, 100]:
startPoints.append(y)
niceRange = similarList(startPoints, [-1, 0, 1]) or similarList(startPoints, [1, 0, -1])
goodRange = similarList(startPoints[1:], [0, 1]) or similarList(startPoints[1:], [1, 0])
color = '#009900'
if niceRange:
color = '#990000'
elif goodRange:
color = '#000099'
filename = "images/{}.png".format(name.replace("^", "_"))
with open("temp.plot", "w") as fTmpl:
fTmpl.write(plotTemplate.format(filename=filename, title=title, color=color))
subprocess.call("gnuplot temp.plot", shell=True)
self.results.append([integral, deltaIntegral, filename, basic, niceRange, goodRange])
def addfunction(self, includeCross: str, name: str, func: str, precondition: str):
self.funcPairs.append([name, func, precondition, includeCross])
def runAll(self):
for name, func, precondition1, cross1 in self.funcPairs:
self.createCrossPlot(name, func, None, precondition1, None)
for name1, func1, precondition1, cross1 in self.funcPairs:
if cross1 == "#":
for name2, func2, precondition2, cross2 in self.funcPairs:
if cross2 == "#":
self.createCrossPlot(name1 + name2, func1, func2, precondition1, precondition2)
self.results.sort(key=lambda x: (x[3], x[0], x[1], x[2]), reverse=True)
indexhtml = ""
for item in self.results:
indexhtml += '<img src="{}" alt="{}" />\n'.format(item[2], item[2])
with open('index.html', "w") as fHtml:
fHtml.write(indexhtml)
class ParamtricCurve:
def __init__(self):
self.funcPairs = []
self.results = []
def done(self):
indexhtml = ""
for image in self.results:
indexhtml += '<img src="{}" alt="{}" />\n'.format(image,image)
with open('index_parametric.html', "w") as fHtml:
fHtml.write(indexhtml)
def createPlot(self, name: str, funcMath1: str, min:int, param_a: list):
global indexhtml
func1 = funcMath1
func2 = None
title = "y={}".format(funcMath1)
basic = 1
with open("curve.csv", "w") as fOut:
for i in range(min*100, 101):
x = i / 100.0
fOut.write("{}".format(x))
y = None
for a in param_a:
try:
y = eval(func1, mathDict(), {"a": a, "x": x})
fOut.write("\t{}".format(y))
except Exception as e:
print("f={} x={} a={} : {}".format(funcMath1, x,a,e))
fOut.write("\n")
plot = "'curve.csv' using 1:{idx} smooth mcspline title \"{title}\""
plotAdd = ""
i = 1
for a in param_a:
if i != 1:
plotAdd += ","
plotAdd += plot.format(idx=i + 1, title=str(a))
i += 1
filename = "images/parametric_{}.png".format(name.replace("^", "_"))
self.results.append(filename)
with open("temp.plot", "w") as fTmpl:
fTmpl.write(plotTemplateMulti.format(minrange=min-0.1, filename=filename, title=title, plot=plotAdd))
subprocess.call("gnuplot temp.plot", shell=True)
pcurve = ParamtricCurve()
oddIntParams = [1, 3, 5, 7, 9, 11, 13]
atanValues = [ 0.1, 0.5, 1, 5, 10, 50, 100, 500, 1000]
expos = []
for i in range(-10, 10):
if i != 0:
expos.append(math.pow(2, i))
pcurve.createPlot("atan_n", "atan(x*a)/atan(a)", -1, atanValues)
pcurve.createPlot("erf", "erf(x*a)", -1, [0.1, 0.5, 1,1.5, 2, 3, 4, 5, 7, 9, 11, 13])
pcurve.createPlot("x^n", "pow(x-0.5,a)*(pow(2,(a-1)))+0.5", 0, oddIntParams)
pcurve.createPlot("x^n-1_1", "pow(x,a)", -1, [1, 3, 5, 7, 9, 11, 13])
pcurve.createPlot("tanh", "tanh(x*a)/tanh(a)", -1, [1, 2, 3, 5, 7, 9, 11, 13])
pcurve.createPlot("sigmoid_n", "(atan(x*2*a-a)-atan(-a))/atan(a)/2", 0, [0.5, 1, 2, 3, 5, 7, 9, 11, 13])
pcurve.createPlot("log0_1", "log(abs(x*a)+1)/log(a+1)", 0, [0.1, 0.5, 1, 2, 10, 50, 100, 1000, 10000])
pcurve.createPlot("exp", "(pow(a,x-1)-pow(a,-1))*a/(a-1)", 0, expos)
pcurve.createPlot("exp2", "(pow(a,sin(x*pi/2)-1)-pow(a,-1))*a/(a-1)", 0, expos)
pcurve.done()
sys.exit(0)
curve = Curve()
curve.addfunction("", "x", "x", "")
curve.addfunction("", "x1", "1-x", "")
curve.addfunction("", "sin2", "sin(x*pi/2)", "")
curve.addfunction("", "cos1", "(-cos(x*pi)+1)/2", "")
curve.addfunction("#", "x^3", "pow(x-0.5,a)*(pow(2,(a-1)))+0.5", "3")
curve.addfunction("", "x^5", "pow(x-0.5,a)*(pow(2,(a-1)))+0.5", "5")
curve.addfunction("", "x^11", "pow(x-0.5,a)*(pow(2,(a-1)))+0.5", "11")
curve.addfunction("#", "sqr", "x*x", "")
curve.addfunction("#", "cube", "x*x*x", "")
curve.addfunction("#", "sigmoid1", "(atan(x*2*a-a)-atan(-a))/atan(a)/2", "1")
curve.addfunction("", "sigmoid5", "(atan(x*2*a-a)-atan(-a))/atan(a)/2", "5")
curve.addfunction("", "atan1", "atan(x*a)/atan(a)", "1")
curve.addfunction("#", "atan2", "atan(x*a)/atan(a)", "2")
curve.addfunction("", "atan10", "atan(x*a)/atan(a)", "10")
curve.addfunction("", "atan100", "atan(x*a)/atan(a)", "100")
curve.addfunction("#", "log", "log(x+1)/log(2)", "")
curve.addfunction("", "tan1", "tan(x*a)/tan(a)", "1")
curve.addfunction("", "tan1.3", "tan(x*a)/tan(a)", "1.3")
curve.addfunction("#", "tan1.5", "tan(x*a)/tan(a)", "1.5")
curve.addfunction("", "asin2", "asin(x*2-1)/pi+0.5", "")
curve.addfunction("", "halfcircle", "sqrt(1-x*x)", "")
curve.addfunction("", "quarter", "sqrt(1-(x-1)*(x-1))", "")
curve.addfunction("", "sqrtdivx3", "sqrt(1-pow(-x+1,a))", "3")
curve.addfunction("", "sqrtdivx11", "sqrt(1-pow(-x+1,a))", "11")
curve.addfunction("", "triangle2", "(asin(-cos(x*pi*a))/pi*2+1)/2", "2")
curve.addfunction("", "trianglerev", "(asin(cos(x*pi*a))/pi*2+1)/2", "2")
curve.addfunction("", "triangle3", "(asin(-cos(x*pi*a))/pi*2+1)/2", "3")
curve.runAll()
| [
"math.pow",
"math.fabs",
"subprocess.call",
"sys.exit"
] | [((7199, 7210), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (7207, 7210), False, 'import sys\n'), ((3466, 3514), 'subprocess.call', 'subprocess.call', (['"""gnuplot temp.plot"""'], {'shell': '(True)'}), "('gnuplot temp.plot', shell=True)\n", (3481, 3514), False, 'import subprocess\n'), ((6182, 6230), 'subprocess.call', 'subprocess.call', (['"""gnuplot temp.plot"""'], {'shell': '(True)'}), "('gnuplot temp.plot', shell=True)\n", (6197, 6230), False, 'import subprocess\n'), ((803, 825), 'math.fabs', 'math.fabs', (['(l[i] - s[i])'], {}), '(l[i] - s[i])\n', (812, 825), False, 'import math\n'), ((6425, 6439), 'math.pow', 'math.pow', (['(2)', 'i'], {}), '(2, i)\n', (6433, 6439), False, 'import math\n')] |
import os
from typing import Dict, Optional
import numpy as np
import pandas as pd
from scipy.signal import correlate
from . import ShakeExtractor, helpers
from .abstract_extractor import AbstractExtractor
from .helpers import normalize, get_equidistant_signals
from .log import logger
from .synchronization_errors import StartEqualsEndError
from .types import SourceDict, ResultTableSpec, SyncPairTimeshift, SyncPairs
class Synchronizer:
@property
def extractor(self) -> AbstractExtractor:
"""Get the current extractor"""
return self._extractor
@extractor.setter
def extractor(self, value: AbstractExtractor):
if not issubclass(type(value), AbstractExtractor):
raise TypeError("Extractor needs to be a subclass of AbstractExtractor.")
self._extractor = value
def __init__(
self,
sources: SourceDict,
reference_source_name: str,
extractor: Optional[AbstractExtractor] = None,
sampling_freq: Optional[float] = None,
):
"""
Create a new synchronizer. Synchronizer objects are used to remove offsets and clock offsets by stretching and
moving reference points detected by an extractor.
:param sources: A SourceDict to describe the input data
:param reference_source_name: name of the sensor to be used as reference.
Other sensors will be made synchronous to this sensor, and data from this sensor will not be modified.
:param extractor: This will be used to find synchronization points in the source data. If None, it defaults to
a ShakeExtractor instance
:param sampling_freq: Override the frequency used to resample input data. If None, it defaults to the maximum
input frequency
"""
self.sources = sources
self.ref_source_name = reference_source_name
self._check_sources()
self.extractor = extractor if extractor is not None else ShakeExtractor()
self.ref_signals = self._prepare_ref_signals()
self.sampling_freq = (
sampling_freq
if sampling_freq is not None
else helpers.get_max_ref_frequency(self.ref_signals)
)
def _check_sources(self):
"""Verifies that the source dict adheres to the required format and that the reference source is available"""
for source_name, source in self.sources.items():
if "data" not in source or "ref_column" not in source:
raise ValueError(
"Each source needs to have a `data` and a `ref_column` property"
)
if not isinstance(source["data"], pd.DataFrame):
raise ValueError(
"The `data` property of each source must contain a DatFrame"
)
if not isinstance(source["data"].index, pd.DatetimeIndex):
raise ValueError(
"The `data` DataFrame must have a pd.DatetimeIndex for each source"
)
if source["data"].index.duplicated().any():
raise ValueError(
"The input dataframe must not have duplicate index values, "
"convert the data into a normalized wide format"
)
if (
not isinstance(source["ref_column"], str)
or source["ref_column"] not in source["data"].columns
):
raise ValueError(
"Each source must have a string specifying the reference column, and the reference"
"column must be available in the source's DataFrame"
)
if self.ref_source_name not in self.sources.keys():
raise ValueError(
"The reference source name must be available in the source dict"
)
def _prepare_ref_signals(self) -> pd.DataFrame:
"""
Collect the reference columns from all sources and join them into a single dataframe.
Each reference column is named equal to the name of the source it comes from.
:return: normalized reference signals
"""
reference_signals = pd.DataFrame()
for source_name, source in self.sources.items():
signal = source["data"][source["ref_column"]].dropna()
reference_signals = reference_signals.join(signal, how="outer")
reference_signals.rename(
columns={source["ref_column"]: source_name}, inplace=True
)
reference_signals = reference_signals.apply(normalize)
return reference_signals
@staticmethod
def _get_timeshift_pair(
dataframe: pd.DataFrame, ref_col: str, sig_col: str, segments: SyncPairs
) -> SyncPairTimeshift:
"""
Returns timeshifts to synchronize sig_col to ref_col.
Expects equidistant sampled signals.
:param dataframe: reference signal dataframe
:param ref_col: name of the reference signal in segments
:param sig_col: name of the target signal in segments
:param segments: all detected synchronization pairs
:return: timeshift to align the first and second synchronization point
for the target signal to the reference signal
"""
timeshifts = {}
for index, segment in enumerate(["first", "second"]):
logger.debug(
f"Calculate timeshift of {segment} segment "
f"for {sig_col} to {ref_col}."
)
# reference signal segment data extraction
ref_start, ref_end, ref_data = helpers.get_segment_data(
dataframe, segments, ref_col, segment
)
sig_start, sig_end, sig_data = helpers.get_segment_data(
dataframe, segments, sig_col, segment
)
# calculate cross-correlation of segments
cross_corr = correlate(ref_data, sig_data)
shift_in_samples = np.argmax(cross_corr) - len(sig_data) + 1
# get timestamp at which sig_segment must start to sync signals
max_corr_ts = dataframe.index[
dataframe.index.get_loc(ref_start, method="nearest") + shift_in_samples
]
logger.debug(
f"Highest correlation with start at "
f"{max_corr_ts} with {np.max(cross_corr)}."
)
# calculate timeshift to move signal to maximize correlation
timeshifts[segment] = max_corr_ts - sig_start
logger.debug("Timeshift is {}.".format(str(timeshifts[segment])))
return timeshifts
def _calculate_stretch_factors(self) -> pd.DataFrame:
"""
Calculate the stretch factor that aligns each reference signal to the reference
signal of the reference source. It immediately applies these stretch factors
to a copy of ``self.ref_signals``.
:return: a copy of self.ref_signals with the stretch factors applied.
"""
ref_signals = self.ref_signals.copy()
start_time = ref_signals.index.min()
# Get equidistantly sampled reference signals for the cross correlation to work
df_equidistant = get_equidistant_signals(ref_signals, self.sampling_freq)
sync_pairs = self.extractor.get_segments(df_equidistant)
helpers.verify_segments(ref_signals.columns, sync_pairs)
for source in df_equidistant.columns:
if source == self.ref_source_name:
continue
timeshifts = Synchronizer._get_timeshift_pair(
df_equidistant, self.ref_source_name, source, sync_pairs
)
logger.debug(
f"Timedelta between shifts before stretching: "
f"{timeshifts['first'] - timeshifts['second']}"
)
try:
stretch_factor = helpers.get_stretch_factor(
sync_pairs[source], timeshifts
)
except ZeroDivisionError:
raise StartEqualsEndError(
"First and last segment have been identified as exactly the same. Bad window, maybe?"
)
logger.info(f"Stretch factor for {source}: {stretch_factor}")
# stretch signal and exchange it in dataframe
signal_stretched = helpers.stretch_signals(
pd.DataFrame(ref_signals[source]).dropna(),
stretch_factor,
start_time,
)
ref_signals = (
ref_signals.drop(source, axis="columns")
.join(signal_stretched, how="outer")
.astype(pd.SparseDtype("float"))
)
self.sources[source]["stretch_factor"] = stretch_factor
return ref_signals
def _calculate_timeshifts(self, stretched_ref_signals: pd.DataFrame):
"""
Calculate the shift necessary to align the stretched reference signals to the not-stretched reference sensor.
:param stretched_ref_signals: a copy of self.ref_signals that has been stretched to align the duration between
the synchronization points to the duration between them in the reference sensor
"""
# Resample again with stretched signal
df_equi = get_equidistant_signals(stretched_ref_signals, self.sampling_freq)
segments = self.extractor.get_segments(df_equi)
helpers.verify_segments(stretched_ref_signals.columns, segments)
for source in df_equi.columns:
if source == self.ref_source_name:
continue
timeshifts = Synchronizer._get_timeshift_pair(
df_equi, self.ref_source_name, source, segments
)
timedelta = timeshifts["first"] - timeshifts["second"]
if timedelta > pd.Timedelta(0):
logger.warning(
f"Timedelta between shifts after stretching: {timedelta}."
f"This should be very small: the timedelta to the reference signal"
f"should be equal for both start and end so a simple offset aligns the"
f"signals perfectly."
)
logger.info("Timeshift for {}: {}".format(source, timeshifts["first"]))
self.sources[source]["timeshift"] = timeshifts["first"]
def _calculate_sync_params(self):
"""
This function calculates the synchronization parameters to sync all signals to the reference signal.
It stores the result in ``self.sources``, in the keys ``timeshift`` and ``stretch_factor``.
"""
self.sources[self.ref_source_name]["timeshift"] = None
self.sources[self.ref_source_name]["stretch_factor"] = 1
# Firstly, determine stretch factor and get stretched reference signals
stretched_ref_signals = self._calculate_stretch_factors()
# Secondly, get timeshift for the stretched signals
self._calculate_timeshifts(stretched_ref_signals)
def get_sync_params(self, recalculate: bool = False):
"""
Get the synchronization params. If they have not been calculated yet, they will be.
:param recalculate: force calculation, even if it was already done before
:return: the synchronization params for each source, i.e., each timeshift and stretch factor
"""
selected_keys = ["timeshift", "stretch_factor"]
if recalculate or "timeshift" not in self.sources[self.ref_source_name]:
self._calculate_sync_params()
return {
source_name: {
key: value for key, value in source.items() if key in selected_keys
}
for source_name, source in self.sources.items()
}
def get_synced_data(self, recalculate: bool = False) -> Dict[str, pd.DataFrame]:
"""
Synchronize the input data.
:param recalculate: force recalculating the synchronization parameters
:return: a dictionary of the shifted and stretched source signals
"""
self.get_sync_params(recalculate)
synced_data = {}
start_time = self.ref_signals.index.min()
for source_name, source in self.sources.items():
data = source["data"].copy()
stretch_factor, timeshift = source["stretch_factor"], source["timeshift"]
if stretch_factor != 1:
data = helpers.stretch_signals(data, stretch_factor, start_time)
if timeshift is not None:
data = data.shift(1, freq=timeshift)
synced_data[source_name] = data
return synced_data
def save_pickles(self, target_dir: str) -> Dict[str, pd.DataFrame]:
"""
Save a pickled, synced, dataframe for each source file.
Does not save a total table.
Sync parameters are saved as ``SYNC.csv``.
:param target_dir: target directory for the export files
:return: the synced data, plus a sync parameter dataframe in the dictionary entry with the key "SYNC".
"""
sync_params = pd.DataFrame(self.get_sync_params())
synced_data = self.get_synced_data()
sync_params.to_csv(os.path.join(target_dir, "SYNC.csv"))
for source_name, synced_df in synced_data.items():
synced_df.to_pickle(
os.path.join(target_dir, f"{source_name.upper()}.PICKLE")
)
return {**synced_data, "SYNC": sync_params}
def save_data(
self,
target_dir: str,
tables: Optional[ResultTableSpec] = None,
save_total_table: bool = True,
):
"""
Export synchronized data.
Two formats are possible: if ``tables`` is given, a file for each root key is created containing the columns
from the sensors specified as the keys on the second level. This can be used to create a file for each sensor
type, see ``ResultTableSpec`` for an example.
A ``SYNC.csv`` is always exported to store the synchronization parameters that have been calculated.
:param target_dir: target directory for the export files
:param tables: ResultTableSpec to specify the export format, or None
:param save_total_table: exports an outer join over all synchronized dataframes
"""
if tables is not None and "SYNC" in tables.keys():
raise ValueError(
"SYNC must not be one of the table names. "
"It is reserved for the synchronization parameters."
)
if save_total_table and tables is not None:
if "TOTAL" in tables.keys():
raise ValueError(
"TOTAL must not be one of the table names, "
"if the table with all data should be saved."
)
sync_params = self.get_sync_params()
synced_data = self.get_synced_data()
# Save sync params
pd.DataFrame(sync_params).to_csv(os.path.join(target_dir, "SYNC.csv"))
# Save custom tables
logger.info(tables)
if tables is not None:
for table_name, table_spec in tables.items():
if len(table_spec) == 0:
logger.warning(
f"Table entry {table_name} is missing any requested columns"
)
continue
table_df = pd.DataFrame()
for source_name, source_columns in table_spec.items():
# create dataframe for each source
source_df = pd.DataFrame()
for column in source_columns:
try:
data = synced_data[source_name][column]
except KeyError:
raise ValueError(
f"Requested non-existing {source_name}->{column}"
)
# join selected signals to device dataframe
source_df = source_df.join(data, how="outer")
if not source_df.empty:
# add device signals to general dataframe
source_df = source_df.rename(
lambda col_name: f"{source_name}_{col_name}",
axis="columns",
)
table_df = table_df.join(source_df, how="outer")
table_df.dropna(axis="index", how="all", inplace=True)
table_df.to_csv(os.path.join(target_dir, f"{table_name}.csv"))
# Save table with total data
if save_total_table:
total_table = pd.DataFrame()
for source_name, data in synced_data.items():
source_df = data.rename(
lambda col_name: f"{source_name}_{col_name}",
axis="columns",
)
total_table = total_table.join(source_df, how="outer")
total_table.to_csv(os.path.join(target_dir, "TOTAL.csv"))
| [
"pandas.Timedelta",
"pandas.SparseDtype",
"os.path.join",
"scipy.signal.correlate",
"numpy.argmax",
"numpy.max",
"pandas.DataFrame"
] | [((4212, 4226), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4224, 4226), True, 'import pandas as pd\n'), ((5964, 5993), 'scipy.signal.correlate', 'correlate', (['ref_data', 'sig_data'], {}), '(ref_data, sig_data)\n', (5973, 5993), False, 'from scipy.signal import correlate\n'), ((13268, 13304), 'os.path.join', 'os.path.join', (['target_dir', '"""SYNC.csv"""'], {}), "(target_dir, 'SYNC.csv')\n", (13280, 13304), False, 'import os\n'), ((15052, 15088), 'os.path.join', 'os.path.join', (['target_dir', '"""SYNC.csv"""'], {}), "(target_dir, 'SYNC.csv')\n", (15064, 15088), False, 'import os\n'), ((16776, 16790), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (16788, 16790), True, 'import pandas as pd\n'), ((8721, 8744), 'pandas.SparseDtype', 'pd.SparseDtype', (['"""float"""'], {}), "('float')\n", (8735, 8744), True, 'import pandas as pd\n'), ((9893, 9908), 'pandas.Timedelta', 'pd.Timedelta', (['(0)'], {}), '(0)\n', (9905, 9908), True, 'import pandas as pd\n'), ((15019, 15044), 'pandas.DataFrame', 'pd.DataFrame', (['sync_params'], {}), '(sync_params)\n', (15031, 15044), True, 'import pandas as pd\n'), ((15478, 15492), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (15490, 15492), True, 'import pandas as pd\n'), ((17113, 17150), 'os.path.join', 'os.path.join', (['target_dir', '"""TOTAL.csv"""'], {}), "(target_dir, 'TOTAL.csv')\n", (17125, 17150), False, 'import os\n'), ((6025, 6046), 'numpy.argmax', 'np.argmax', (['cross_corr'], {}), '(cross_corr)\n', (6034, 6046), True, 'import numpy as np\n'), ((15652, 15666), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (15664, 15666), True, 'import pandas as pd\n'), ((16636, 16681), 'os.path.join', 'os.path.join', (['target_dir', 'f"""{table_name}.csv"""'], {}), "(target_dir, f'{table_name}.csv')\n", (16648, 16681), False, 'import os\n'), ((6407, 6425), 'numpy.max', 'np.max', (['cross_corr'], {}), '(cross_corr)\n', (6413, 6425), True, 'import numpy as np\n'), ((8441, 8474), 'pandas.DataFrame', 'pd.DataFrame', (['ref_signals[source]'], {}), '(ref_signals[source])\n', (8453, 8474), True, 'import pandas as pd\n')] |
from collections import deque, defaultdict
num_snacks = int(input())
snacks = deque([int(num) for num in input().split(" ")])
other_sizes = defaultdict(bool)
while snacks:
sizes_to_print = []
if snacks:
current_size = snacks.popleft()
#print(current_size)
if current_size == num_snacks:
num_snacks -= 1
sizes_to_print.append(current_size)
while other_sizes[num_snacks]:
if other_sizes[num_snacks] == True:
sizes_to_print.append(num_snacks)
num_snacks -= 1
continue
# for other_size in other_sizes:
# if other_size == num_snacks:
# num_snacks -= 1
# sizes_to_print.append(other_size)
# other_sizes.remove(other_size)
# continue
# break
# if other_sizes:
# for size in sizes_to_print[1:]:
# other_sizes.remove(size)
print(*sizes_to_print)
continue
other_sizes[current_size] = True
print()
| [
"collections.defaultdict"
] | [((141, 158), 'collections.defaultdict', 'defaultdict', (['bool'], {}), '(bool)\n', (152, 158), False, 'from collections import deque, defaultdict\n')] |
#!/usr/bin/env python3
# Copyright 2022 Johns Hopkins University (authors: <NAME>)
#
# See ../../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file computes fbank features of the SPGISpeech dataset.
It looks for manifests in the directory data/manifests.
The generated fbank features are saved in data/fbank.
"""
import argparse
import logging
from pathlib import Path
import torch
from lhotse import LilcomChunkyWriter, load_manifest_lazy
from lhotse.features.kaldifeat import (
KaldifeatFbank,
KaldifeatFbankConfig,
KaldifeatFrameOptions,
KaldifeatMelOptions,
)
# Torch's multithreaded behavior needs to be disabled or
# it wastes a lot of CPU and slow things down.
# Do this outside of main() in case it needs to take effect
# even when we are not invoking the main (e.g. when spawning subprocesses).
torch.set_num_threads(1)
torch.set_num_interop_threads(1)
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--num-splits",
type=int,
default=20,
help="Number of splits for the train set.",
)
parser.add_argument(
"--start",
type=int,
default=0,
help="Start index of the train set split.",
)
parser.add_argument(
"--stop",
type=int,
default=-1,
help="Stop index of the train set split.",
)
parser.add_argument(
"--test",
action="store_true",
help="If set, only compute features for the dev and val set.",
)
parser.add_argument(
"--train",
action="store_true",
help="If set, only compute features for the train set.",
)
return parser.parse_args()
def compute_fbank_spgispeech(args):
assert args.train or args.test, "Either train or test must be set."
src_dir = Path("data/manifests")
output_dir = Path("data/fbank")
sampling_rate = 16000
num_mel_bins = 80
extractor = KaldifeatFbank(
KaldifeatFbankConfig(
frame_opts=KaldifeatFrameOptions(sampling_rate=sampling_rate),
mel_opts=KaldifeatMelOptions(num_bins=num_mel_bins),
device="cuda",
)
)
if args.train:
logging.info("Processing train")
cut_set = load_manifest_lazy(src_dir / "cuts_train_raw.jsonl.gz")
chunk_size = len(cut_set) // args.num_splits
cut_sets = cut_set.split_lazy(
output_dir=src_dir / f"cuts_train_raw_split{args.num_splits}",
chunk_size=chunk_size,
)
start = args.start
stop = (
min(args.stop, args.num_splits)
if args.stop > 0
else args.num_splits
)
num_digits = len(str(args.num_splits))
for i in range(start, stop):
idx = f"{i + 1}".zfill(num_digits)
cuts_train_idx_path = src_dir / f"cuts_train_{idx}.jsonl.gz"
logging.info(f"Processing train split {i}")
cs = cut_sets[i].compute_and_store_features_batch(
extractor=extractor,
storage_path=output_dir / f"feats_train_{idx}",
batch_duration=500,
num_workers=4,
storage_type=LilcomChunkyWriter,
)
cs.to_file(cuts_train_idx_path)
if args.test:
for partition in ["dev", "val"]:
if (output_dir / f"cuts_{partition}.jsonl.gz").is_file():
logging.info(f"{partition} already exists - skipping.")
continue
logging.info(f"Processing {partition}")
cut_set = load_manifest_lazy(
src_dir / f"cuts_{partition}_raw.jsonl.gz"
)
cut_set = cut_set.compute_and_store_features_batch(
extractor=extractor,
storage_path=output_dir / f"feats_{partition}",
manifest_path=src_dir / f"cuts_{partition}.jsonl.gz",
batch_duration=500,
num_workers=4,
storage_type=LilcomChunkyWriter,
)
if __name__ == "__main__":
formatter = (
"%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s"
)
logging.basicConfig(format=formatter, level=logging.INFO)
args = get_args()
compute_fbank_spgispeech(args)
| [
"logging.basicConfig",
"argparse.ArgumentParser",
"pathlib.Path",
"lhotse.load_manifest_lazy",
"torch.set_num_threads",
"lhotse.features.kaldifeat.KaldifeatMelOptions",
"logging.info",
"lhotse.features.kaldifeat.KaldifeatFrameOptions",
"torch.set_num_interop_threads"
] | [((1404, 1428), 'torch.set_num_threads', 'torch.set_num_threads', (['(1)'], {}), '(1)\n', (1425, 1428), False, 'import torch\n'), ((1429, 1461), 'torch.set_num_interop_threads', 'torch.set_num_interop_threads', (['(1)'], {}), '(1)\n', (1458, 1461), False, 'import torch\n'), ((1493, 1518), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1516, 1518), False, 'import argparse\n'), ((2391, 2413), 'pathlib.Path', 'Path', (['"""data/manifests"""'], {}), "('data/manifests')\n", (2395, 2413), False, 'from pathlib import Path\n'), ((2431, 2449), 'pathlib.Path', 'Path', (['"""data/fbank"""'], {}), "('data/fbank')\n", (2435, 2449), False, 'from pathlib import Path\n'), ((4740, 4797), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': 'formatter', 'level': 'logging.INFO'}), '(format=formatter, level=logging.INFO)\n', (4759, 4797), False, 'import logging\n'), ((2773, 2805), 'logging.info', 'logging.info', (['"""Processing train"""'], {}), "('Processing train')\n", (2785, 2805), False, 'import logging\n'), ((2824, 2879), 'lhotse.load_manifest_lazy', 'load_manifest_lazy', (["(src_dir / 'cuts_train_raw.jsonl.gz')"], {}), "(src_dir / 'cuts_train_raw.jsonl.gz')\n", (2842, 2879), False, 'from lhotse import LilcomChunkyWriter, load_manifest_lazy\n'), ((3468, 3511), 'logging.info', 'logging.info', (['f"""Processing train split {i}"""'], {}), "(f'Processing train split {i}')\n", (3480, 3511), False, 'import logging\n'), ((4089, 4128), 'logging.info', 'logging.info', (['f"""Processing {partition}"""'], {}), "(f'Processing {partition}')\n", (4101, 4128), False, 'import logging\n'), ((4151, 4213), 'lhotse.load_manifest_lazy', 'load_manifest_lazy', (["(src_dir / f'cuts_{partition}_raw.jsonl.gz')"], {}), "(src_dir / f'cuts_{partition}_raw.jsonl.gz')\n", (4169, 4213), False, 'from lhotse import LilcomChunkyWriter, load_manifest_lazy\n'), ((2585, 2635), 'lhotse.features.kaldifeat.KaldifeatFrameOptions', 'KaldifeatFrameOptions', ([], {'sampling_rate': 'sampling_rate'}), '(sampling_rate=sampling_rate)\n', (2606, 2635), False, 'from lhotse.features.kaldifeat import KaldifeatFbank, KaldifeatFbankConfig, KaldifeatFrameOptions, KaldifeatMelOptions\n'), ((2658, 2700), 'lhotse.features.kaldifeat.KaldifeatMelOptions', 'KaldifeatMelOptions', ([], {'num_bins': 'num_mel_bins'}), '(num_bins=num_mel_bins)\n', (2677, 2700), False, 'from lhotse.features.kaldifeat import KaldifeatFbank, KaldifeatFbankConfig, KaldifeatFrameOptions, KaldifeatMelOptions\n'), ((3996, 4051), 'logging.info', 'logging.info', (['f"""{partition} already exists - skipping."""'], {}), "(f'{partition} already exists - skipping.')\n", (4008, 4051), False, 'import logging\n')] |
# ! /usr/bin/python3
"""### Provides tools for maps and heightmaps
This module contains functions to:
* Calculate a heightmap ideal for building
* Visualise numpy arrays
"""
__all__ = ['calcGoodHeightmap']
# __version__
import cv2
import matplotlib.pyplot as plt
import numpy as np
def calcGoodHeightmap(worldSlice):
"""**Calculates a heightmap ideal for building.**
Trees are ignored and water is considered ground.
Args:
worldSlice (WorldSlice): an instance of the WorldSlice class containing the raw heightmaps and block data
Returns:
any: numpy array containing the calculated heightmap
"""
hm_mbnl = worldSlice.heightmaps["MOTION_BLOCKING_NO_LEAVES"]
heightmapNoTrees = hm_mbnl[:]
area = worldSlice.rect
for x in range(area[2]):
for z in range(area[3]):
while True:
y = heightmapNoTrees[x, z]
block = worldSlice.getBlockAt(
(area[0] + x, y - 1, area[1] + z))
if block[-4:] == '_log':
heightmapNoTrees[x, z] -= 1
else:
break
return np.array(np.minimum(hm_mbnl, heightmapNoTrees))
def visualize(*arrays, title=None, autonormalize=True):
"""**Visualizes one or multiple numpy arrays.**
Args:
title (str, optional): display title. Defaults to None.
autonormalize (bool, optional): Normalizes the array to be between 0 (black) and 255 (white). Defaults to True.
"""
for array in arrays:
if autonormalize:
array = (normalize(array) * 255).astype(np.uint8)
plt.figure()
if title:
plt.title(title)
plt_image = cv2.cvtColor(array, cv2.COLOR_BGR2RGB)
imgplot = plt.imshow(plt_image)
plt.show()
def normalize(array):
"""**Normalizes the array to contain values from 0 to 1.**"""
return (array - array.min()) / (array.max() - array.min())
| [
"matplotlib.pyplot.imshow",
"numpy.minimum",
"matplotlib.pyplot.figure",
"cv2.cvtColor",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] | [((1790, 1800), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1798, 1800), True, 'import matplotlib.pyplot as plt\n'), ((1153, 1190), 'numpy.minimum', 'np.minimum', (['hm_mbnl', 'heightmapNoTrees'], {}), '(hm_mbnl, heightmapNoTrees)\n', (1163, 1190), True, 'import numpy as np\n'), ((1627, 1639), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1637, 1639), True, 'import matplotlib.pyplot as plt\n'), ((1707, 1745), 'cv2.cvtColor', 'cv2.cvtColor', (['array', 'cv2.COLOR_BGR2RGB'], {}), '(array, cv2.COLOR_BGR2RGB)\n', (1719, 1745), False, 'import cv2\n'), ((1764, 1785), 'matplotlib.pyplot.imshow', 'plt.imshow', (['plt_image'], {}), '(plt_image)\n', (1774, 1785), True, 'import matplotlib.pyplot as plt\n'), ((1670, 1686), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (1679, 1686), True, 'import matplotlib.pyplot as plt\n')] |
"""Wrapper around project converter to convert a project"""
from vb2py import projectconverter
if __name__ == '__main__':
projectconverter.main() | [
"vb2py.projectconverter.main"
] | [((129, 152), 'vb2py.projectconverter.main', 'projectconverter.main', ([], {}), '()\n', (150, 152), False, 'from vb2py import projectconverter\n')] |
import torch
from torch.optim import Optimizer
from torch.optim.optimizer import required
#Depending on PyTorch version, the name of the functional module
#May either have an underscore or not!
oldversion = False
try:
import torch.optim._functional as F
except:
import torch.optim.functional as F
oldversion = True
from tpstorch import _rank, _world_size
from tpstorch import dist
class ParallelAdam(Optimizer):
r"""Implements Adam algorithm.
This implementation has an additional step which is to collect gradients computed in different
MPI processes and just average them. This is useful when you're running many unbiased simulations
Any more detailed implementation should be consulted on torch.optim.Adam
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
super(ParallelAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(ParallelAdam, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
grads = []
exp_avgs = []
exp_avg_sqs = []
state_sums = []
max_exp_avg_sqs = []
state_steps = []
beta1, beta2 = group['betas']
for p in group['params']:
if p.grad is not None:
params_with_grad.append(p)
if p.grad.is_sparse:
raise RuntimeError('ParallelAdam does not support sparse gradients!')
#This is the new part from the original Adam implementation
#We just do an all reduce on the gradients
d_p = p.grad
dist.all_reduce(d_p)
grads.append(d_p)
state = self.state[p]
# Lazy state initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['amsgrad']:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
exp_avgs.append(state['exp_avg'])
exp_avg_sqs.append(state['exp_avg_sq'])
if group['amsgrad']:
max_exp_avg_sqs.append(state['max_exp_avg_sq'])
# update the steps for each param group update
state['step'] += 1
# record the step after step update
state_steps.append(state['step'])
F.adam(params_with_grad,
grads,
exp_avgs,
exp_avg_sqs,
max_exp_avg_sqs,
state_steps,
group['amsgrad'],
beta1,
beta2,
group['lr'],
group['weight_decay'],
group['eps']
)
return loss
class ParallelSGD(Optimizer):
r"""Implements stochastic gradient descent (optionally with momentum).
This implementation has an additional step which is to collect gradients computed in different
MPI processes and just average them. This is useful when you're running many unbiased simulations
Any more detailed implementation should be consulted on torch.optim.ParallelSGD
"""
def __init__(self, params, sampler=required, lr=required, momentum=0, dampening=0,
weight_decay=0, nesterov=False):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super(ParallelSGD, self).__init__(params, defaults)
def __setstate__(self, state):
super(ParallelSGD, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
#Gradient of parameters
#p.grad should be the average of grad(x)/c(x) over the minibatch
d_p = p.grad
#This is the new part from the original ParallelSGD implementation
#We just do an all reduce on the gradients
dist.all_reduce(d_p)
if weight_decay != 0:
d_p = d_p.add(p, alpha=weight_decay)
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
if nesterov:
d_p = d_p.add(buf, alpha=momentum)
else:
d_p = buf
p.add_(d_p, alpha=-group['lr'])
return loss
class FTSImplicitUpdate(Optimizer):
r"""Implements the Finite-Temperature String Method update, which is all implicit. This provides larger region in stability
"""
def __init__(self, params, sampler=required, deltatau=required, dimN=required, kappa = 0.1, freeze=False):
if deltatau is not required and deltatau < 0.0:
raise ValueError("Invalid step size: {}".format(deltatau))
defaults = dict(lr=deltatau, kappa=kappa, freeze=freeze)
super(FTSImplicitUpdate, self).__init__(params, defaults)
self.avgconfig = 0
self.nsamples = 0.0
# Matrix used for inversing
# Construct only on rank 0
# Note that it always stays the same, so invert here and use at each iteration
# Kinda confusing notation, but essentially to make this tridiagonal order is
# we go through each direction in order
# zeros
self.matrix = torch.zeros(dimN*_world_size, dimN*_world_size, dtype=torch.float)
# first, last row
#for i in range(dimN):
# self.matrix[i*_world_size,i*_world_size] = 1.0+deltatau
# self.matrix[(i+1)*_world_size-1,(i+1)*_world_size-1] = 1.+deltatau
# rest of rows
shape = _world_size-1
# first, last row
#Go through for every node
torch.set_printoptions(threshold=10000)
self.matrix = torch.zeros(dimN*_world_size, dimN*_world_size, dtype=torch.float)
# first, last row
for i in range(dimN):
self.matrix[i*_world_size,i*_world_size] = 1.0+deltatau
self.matrix[(i+1)*_world_size-1,(i+1)*_world_size-1] = 1.0+deltatau
# rest of rows
for i in range(dimN):
for j in range(1,_world_size-1):
self.matrix[i*_world_size+j,i*_world_size+j] = 1.0+deltatau+2.0*kappa*deltatau*_world_size
self.matrix[i*_world_size+j,i*_world_size+j-1] = -1.0*kappa*deltatau*_world_size
self.matrix[i*_world_size+j,i*_world_size+j+1] = -1.0*kappa*deltatau*_world_size
self.dimN = dimN
self.matrix_inverse = torch.inverse(self.matrix)
def __setstate__(self, state):
super(FTSImplicitUpdate, self).__setstate__(state)
@torch.no_grad()
def step(self, configs, batch_size):
"""Performs a single optimization step.
"""
for group in self.param_groups:
kappa = group['kappa']
freeze = group['freeze']
for p in group['params']:
if p.requires_grad is True:
print("Warning! String stored in Rank [{}] has gradient enabled. Make sure that the string is not being updated during NN training!".format(_rank))
## (1) Compute the average configuration
avgconfig = torch.zeros_like(p)
avgconfig[_rank] = torch.mean(configs,dim=0)
dist.all_reduce(avgconfig)
## (1) Implicit Stochastic Gradient Descent
force = p.clone()+group['lr']*(avgconfig)#[1:-1]
#if _rank == 0:
# print(p)
p.zero_()
p.add_(torch.matmul(self.matrix_inverse, force.t().flatten()).view(-1,_world_size).t())#.clone().detach()
## (2) Re-parameterization/Projection
#Compute the new intermediate nodal variables
#which doesn't obey equal arc-length parametrization
alpha = torch.linspace(0,1,_world_size)
ell_k = torch.norm(p[1:].clone()-p[:-1].clone(),dim=1)
ellsum = torch.sum(ell_k)
ell_k /= ellsum
intm_alpha = torch.zeros_like(alpha)
for i in range(1, p.shape[0]):
intm_alpha[i] += ell_k[i-1]+intm_alpha[i-1]
#REALLY REALLY IMPORTANT. this interpolation assumes that the intermediate configuration lies between the left and right neighbors
#of the desired configuration,
#Now interpolate back to the correct parametrization
newstring = torch.zeros_like(p)
newstring[0] = p[0].clone()/_world_size
newstring[-1] = p[-1].clone()/_world_size
if _rank > 0 and _rank < _world_size-1:
index = torch.bucketize(alpha[_rank],intm_alpha)
weight = (alpha[_rank]-intm_alpha[index-1])/(intm_alpha[index]-intm_alpha[index-1])
if index == _rank+1:
newstring[_rank] = torch.lerp(p.clone()[_rank],p.clone()[_rank+1],weight)
elif index == _rank:
newstring[_rank] = torch.lerp(p.clone()[_rank-1],p.clone()[_rank],weight)
elif index == _rank-1:
newstring[_rank] = torch.lerp(p.clone()[_rank-2],p.clone()[_rank],weight)
else:
raise RuntimeError("Rank [{}]: You need to interpolate from points beyond your nearest neighbors. \n \
Reduce your timestep for the string update!".format(_rank))
dist.all_reduce(newstring)
p.zero_()
p.add_(newstring.clone().detach())
del newstring
class FTSUpdate(Optimizer):
r"""Implements the Finite-Temperature String Method update.
It can be shown that the FTS method update is just stochastic gradient descent. Thus, one can also opt to compute the update with momentum to accelerate convergence.
"""
def __init__(self, params, sampler=required, deltatau=required, momentum=0, nesterov=False, kappa = 0.1, freeze=False):
if deltatau is not required and deltatau < 0.0:
raise ValueError("Invalid step size: {}".format(deltatau))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
defaults = dict(lr=deltatau, momentum=momentum, nesterov=nesterov, kappa=kappa, freeze=freeze)
super(FTSUpdate, self).__init__(params, defaults)
self.avgconfig = 0
self.nsamples = 0.0
def __setstate__(self, state):
super(FTSUpdate, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
@torch.no_grad()
def step(self, configs, batch_size):
"""Performs a single optimization step.
"""
for group in self.param_groups:
momentum = group['momentum']
kappa = group['kappa']
nesterov = group['nesterov']
freeze = group['freeze']
for p in group['params']:
if p.requires_grad is True:
#print("Warning! String stored in Rank [{}] has gradient enabled. Make sure that the string is not being updated during NN training!")
print("Warning! String stored in Rank [{}] has gradient enabled. Make sure that the string is not being updated during NN training!".format(_rank))
## (1) Compute the average configuration
avgconfig = torch.zeros_like(p)
avgconfig[_rank] = torch.mean(configs,dim=0)
dist.all_reduce(avgconfig)
## (1) Stochastic Gradient Descent
d_p = torch.zeros_like(p)
d_p[1:-1] = (p[1:-1]-avgconfig[1:-1])-kappa*_world_size*(p[0:-2]-2*p[1:-1]+p[2:])
if freeze is False:
d_p[0] = (p[0]-avgconfig[0])
d_p[-1] = (p[-1]-avgconfig[-1])
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(d_p)
if nesterov:
d_p = d_p.add(buf, alpha=momentum)
else:
d_p = buf
p.add_(d_p, alpha=-group['lr'])
## (2) Re-parameterization/Projection
#Compute the new intermediate nodal variables
#which doesn't obey equal arc-length parametrization
alpha = torch.linspace(0,1,_world_size)
ell_k = torch.norm(p[1:].clone()-p[:-1].clone(),dim=1)
ellsum = torch.sum(ell_k)
ell_k /= ellsum
intm_alpha = torch.zeros_like(alpha)
for i in range(1, p.shape[0]):
intm_alpha[i] += ell_k[i-1]+intm_alpha[i-1]
#REALLY REALLY IMPORTANT. this interpolation assumes that the intermediate configuration lies between the left and right neighbors
#of the desired configuration,
#Now interpolate back to the correct parametrization
newstring = torch.zeros_like(p)
newstring[0] = p[0].clone()/_world_size
newstring[-1] = p[-1].clone()/_world_size
if _rank > 0 and _rank < _world_size-1:
index = torch.bucketize(alpha[_rank],intm_alpha)
weight = (alpha[_rank]-intm_alpha[index-1])/(intm_alpha[index]-intm_alpha[index-1])
if index == _rank+1:
newstring[_rank] = torch.lerp(p[_rank].clone(),p[_rank+1].clone(),weight)
elif index == _rank:
newstring[_rank] = torch.lerp(p[_rank-1].clone(),p[_rank].clone(),weight)
elif index == _rank-1:
newstring[_rank] = torch.lerp(p[_rank-2].clone(),p[_rank].clone(),weight)
else:
raise RuntimeError("Rank [{}]: You need to interpolate from points beyond your nearest neighbors. \n \
Reduce your timestep for the string update!".format(_rank))
dist.all_reduce(newstring)
p.zero_()
p.add_(newstring.clone().detach())
del newstring
| [
"torch.enable_grad",
"torch.set_printoptions",
"torch.mean",
"torch.optim.functional.adam",
"torch.sum",
"torch.linspace",
"torch.clone",
"torch.no_grad",
"torch.zeros_like",
"tpstorch.dist.all_reduce",
"torch.bucketize",
"torch.zeros",
"torch.inverse"
] | [((1804, 1819), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1817, 1819), False, 'import torch\n'), ((6107, 6122), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6120, 6122), False, 'import torch\n'), ((10112, 10127), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10125, 10127), False, 'import torch\n'), ((14295, 14310), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (14308, 14310), False, 'import torch\n'), ((8802, 8872), 'torch.zeros', 'torch.zeros', (['(dimN * _world_size)', '(dimN * _world_size)'], {'dtype': 'torch.float'}), '(dimN * _world_size, dimN * _world_size, dtype=torch.float)\n', (8813, 8872), False, 'import torch\n'), ((9197, 9236), 'torch.set_printoptions', 'torch.set_printoptions', ([], {'threshold': '(10000)'}), '(threshold=10000)\n', (9219, 9236), False, 'import torch\n'), ((9259, 9329), 'torch.zeros', 'torch.zeros', (['(dimN * _world_size)', '(dimN * _world_size)'], {'dtype': 'torch.float'}), '(dimN * _world_size, dimN * _world_size, dtype=torch.float)\n', (9270, 9329), False, 'import torch\n'), ((9985, 10011), 'torch.inverse', 'torch.inverse', (['self.matrix'], {}), '(self.matrix)\n', (9998, 10011), False, 'import torch\n'), ((4267, 4442), 'torch.optim.functional.adam', 'F.adam', (['params_with_grad', 'grads', 'exp_avgs', 'exp_avg_sqs', 'max_exp_avg_sqs', 'state_steps', "group['amsgrad']", 'beta1', 'beta2', "group['lr']", "group['weight_decay']", "group['eps']"], {}), "(params_with_grad, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs,\n state_steps, group['amsgrad'], beta1, beta2, group['lr'], group[\n 'weight_decay'], group['eps'])\n", (4273, 4442), True, 'import torch.optim.functional as F\n'), ((2120, 2139), 'torch.enable_grad', 'torch.enable_grad', ([], {}), '()\n', (2137, 2139), False, 'import torch\n'), ((6423, 6442), 'torch.enable_grad', 'torch.enable_grad', ([], {}), '()\n', (6440, 6442), False, 'import torch\n'), ((7105, 7125), 'tpstorch.dist.all_reduce', 'dist.all_reduce', (['d_p'], {}), '(d_p)\n', (7120, 7125), False, 'from tpstorch import dist\n'), ((10681, 10700), 'torch.zeros_like', 'torch.zeros_like', (['p'], {}), '(p)\n', (10697, 10700), False, 'import torch\n'), ((10736, 10762), 'torch.mean', 'torch.mean', (['configs'], {'dim': '(0)'}), '(configs, dim=0)\n', (10746, 10762), False, 'import torch\n'), ((10778, 10804), 'tpstorch.dist.all_reduce', 'dist.all_reduce', (['avgconfig'], {}), '(avgconfig)\n', (10793, 10804), False, 'from tpstorch import dist\n'), ((11400, 11433), 'torch.linspace', 'torch.linspace', (['(0)', '(1)', '_world_size'], {}), '(0, 1, _world_size)\n', (11414, 11433), False, 'import torch\n'), ((11528, 11544), 'torch.sum', 'torch.sum', (['ell_k'], {}), '(ell_k)\n', (11537, 11544), False, 'import torch\n'), ((11606, 11629), 'torch.zeros_like', 'torch.zeros_like', (['alpha'], {}), '(alpha)\n', (11622, 11629), False, 'import torch\n'), ((12051, 12070), 'torch.zeros_like', 'torch.zeros_like', (['p'], {}), '(p)\n', (12067, 12070), False, 'import torch\n'), ((13109, 13135), 'tpstorch.dist.all_reduce', 'dist.all_reduce', (['newstring'], {}), '(newstring)\n', (13124, 13135), False, 'from tpstorch import dist\n'), ((15101, 15120), 'torch.zeros_like', 'torch.zeros_like', (['p'], {}), '(p)\n', (15117, 15120), False, 'import torch\n'), ((15156, 15182), 'torch.mean', 'torch.mean', (['configs'], {'dim': '(0)'}), '(configs, dim=0)\n', (15166, 15182), False, 'import torch\n'), ((15198, 15224), 'tpstorch.dist.all_reduce', 'dist.all_reduce', (['avgconfig'], {}), '(avgconfig)\n', (15213, 15224), False, 'from tpstorch import dist\n'), ((15315, 15334), 'torch.zeros_like', 'torch.zeros_like', (['p'], {}), '(p)\n', (15331, 15334), False, 'import torch\n'), ((16420, 16453), 'torch.linspace', 'torch.linspace', (['(0)', '(1)', '_world_size'], {}), '(0, 1, _world_size)\n', (16434, 16453), False, 'import torch\n'), ((16548, 16564), 'torch.sum', 'torch.sum', (['ell_k'], {}), '(ell_k)\n', (16557, 16564), False, 'import torch\n'), ((16626, 16649), 'torch.zeros_like', 'torch.zeros_like', (['alpha'], {}), '(alpha)\n', (16642, 16649), False, 'import torch\n'), ((17071, 17090), 'torch.zeros_like', 'torch.zeros_like', (['p'], {}), '(p)\n', (17087, 17090), False, 'import torch\n'), ((18129, 18155), 'tpstorch.dist.all_reduce', 'dist.all_reduce', (['newstring'], {}), '(newstring)\n', (18144, 18155), False, 'from tpstorch import dist\n'), ((2936, 2956), 'tpstorch.dist.all_reduce', 'dist.all_reduce', (['d_p'], {}), '(d_p)\n', (2951, 2956), False, 'from tpstorch import dist\n'), ((12269, 12310), 'torch.bucketize', 'torch.bucketize', (['alpha[_rank]', 'intm_alpha'], {}), '(alpha[_rank], intm_alpha)\n', (12284, 12310), False, 'import torch\n'), ((17289, 17330), 'torch.bucketize', 'torch.bucketize', (['alpha[_rank]', 'intm_alpha'], {}), '(alpha[_rank], intm_alpha)\n', (17304, 17330), False, 'import torch\n'), ((3324, 3380), 'torch.zeros_like', 'torch.zeros_like', (['p'], {'memory_format': 'torch.preserve_format'}), '(p, memory_format=torch.preserve_format)\n', (3340, 3380), False, 'import torch\n'), ((3507, 3563), 'torch.zeros_like', 'torch.zeros_like', (['p'], {'memory_format': 'torch.preserve_format'}), '(p, memory_format=torch.preserve_format)\n', (3523, 3563), False, 'import torch\n'), ((3751, 3807), 'torch.zeros_like', 'torch.zeros_like', (['p'], {'memory_format': 'torch.preserve_format'}), '(p, memory_format=torch.preserve_format)\n', (3767, 3807), False, 'import torch\n'), ((7444, 7460), 'torch.clone', 'torch.clone', (['d_p'], {}), '(d_p)\n', (7455, 7460), False, 'import torch\n'), ((15810, 15826), 'torch.clone', 'torch.clone', (['d_p'], {}), '(d_p)\n', (15821, 15826), False, 'import torch\n')] |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""data prepare for CUB200-2011"""
import os
import shutil
import time
path = './'
ROOT_TRAIN = path + 'images/train/'
ROOT_TEST = path + 'images/test/'
BATCH_SIZE = 16
time_start = time.time()
path_images = path + 'images.txt'
path_split = path + 'train_test_split.txt'
trian_save_path = path + 'dataset/train/'
test_save_path = path + 'dataset/test/'
images = []
with open(path_images, 'r') as f:
for line in f:
images.append(list(line.strip('\n').split(',')))
split = []
with open(path_split, 'r') as f_:
for line in f_:
split.append(list(line.strip('\n').split(',')))
num = len(images)
for k in range(num):
file_name = images[k][0].split(' ')[1].split('/')[0]
aaa = int(split[k][0][-1])
if int(split[k][0][-1]) == 1:
if os.path.isdir(trian_save_path + file_name):
shutil.copy(path + 'images/' + images[k][0].split(' ')[1],
trian_save_path + file_name + '/' + images[k][0].split(' ')[1].split('/')[1])
else:
os.makedirs(trian_save_path + file_name)
shutil.copy(path + 'images/' + images[k][0].split(' ')[1],
trian_save_path + file_name + '/' + images[k][0].split(' ')[1].split('/')[1])
print('%s finished!' % images[k][0].split(' ')[1].split('/')[1])
else:
if os.path.isdir(test_save_path + file_name):
aaaa = path + 'images/' + images[k][0].split(' ')[1]
bbbb = test_save_path + file_name + '/' + images[k][0].split(' ')[1]
shutil.copy(path + 'images/' + images[k][0].split(' ')[1],
test_save_path + file_name + '/' + images[k][0].split(' ')[1].split('/')[1])
else:
os.makedirs(test_save_path + file_name)
shutil.copy(path + 'images/' + images[k][0].split(' ')[1],
test_save_path + file_name + '/' + images[k][0].split(' ')[1].split('/')[1])
print('%s finished!' % images[k][0].split(' ')[1].split('/')[1])
time_end = time.time()
print('CUB200 finished, time consume %s!!' % (time_end - time_start))
| [
"os.path.isdir",
"time.time",
"os.makedirs"
] | [((852, 863), 'time.time', 'time.time', ([], {}), '()\n', (861, 863), False, 'import time\n'), ((2677, 2688), 'time.time', 'time.time', ([], {}), '()\n', (2686, 2688), False, 'import time\n'), ((1442, 1484), 'os.path.isdir', 'os.path.isdir', (['(trian_save_path + file_name)'], {}), '(trian_save_path + file_name)\n', (1455, 1484), False, 'import os\n'), ((1993, 2034), 'os.path.isdir', 'os.path.isdir', (['(test_save_path + file_name)'], {}), '(test_save_path + file_name)\n', (2006, 2034), False, 'import os\n'), ((1685, 1725), 'os.makedirs', 'os.makedirs', (['(trian_save_path + file_name)'], {}), '(trian_save_path + file_name)\n', (1696, 1725), False, 'import os\n'), ((2380, 2419), 'os.makedirs', 'os.makedirs', (['(test_save_path + file_name)'], {}), '(test_save_path + file_name)\n', (2391, 2419), False, 'import os\n')] |
from pathlib import Path
def phase1(values):
total, prev = 0, values[0]
for curr in values:
if curr > prev:
total = total +1
prev = curr
return total
def phase2(values):
return phase1([values[i] + values[i+1] + values[i+2] for i in range(0,len(values)-2)])
if __name__ == "__main__":
with Path(__file__).parent.joinpath("../input/day1").open(encoding="UTF-8") as f:
VALUES = [int(i) for i in f]
print(f"Phase 1: {phase1(VALUES)}")
print(f"Phase 2: {phase2(VALUES)}")
| [
"pathlib.Path"
] | [((340, 354), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (344, 354), False, 'from pathlib import Path\n')] |
# Copyright (c) 2017 DataCore Software Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the Fibre Channel Driver for DataCore SANsymphony
storage array.
"""
import mock
from cinder import exception as cinder_exception
from cinder import test
from cinder.tests.unit.volume.drivers.datacore import test_datacore_driver
from cinder.volume.drivers.datacore import fc
PORTS = [
mock.Mock(Id='initiator_port_id1',
PortType='FibreChannel',
PortMode='Initiator',
PortName='AA-AA-AA-AA-AA-AA-AA-AA',
HostId='client_id1'),
mock.Mock(Id='initiator_port_id2',
PortType='FibreChannel',
PortMode='Initiator',
PortName='BB-BB-BB-BB-BB-BB-BB-BB'),
mock.Mock(Id='target_port_id1',
PortMode='Target',
PortName='CC-CC-CC-CC-CC-CC-CC-CC',
HostId='server_id1'),
mock.Mock(Id='target_port_id2',
PortMode='Target',
PortName='DD-DD-DD-DD-DD-DD-DD-DD',
HostId='server_id1'),
]
LOGICAL_UNITS = [
mock.Mock(VirtualTargetDeviceId='target_device_id1',
Lun=mock.Mock(Quad=4)),
mock.Mock(VirtualTargetDeviceId='target_device_id2',
Lun=mock.Mock(Quad=3)),
mock.Mock(VirtualTargetDeviceId='target_device_id3',
Lun=mock.Mock(Quad=2)),
mock.Mock(VirtualTargetDeviceId='target_device_id4',
Lun=mock.Mock(Quad=1)),
]
TARGET_DEVICES = [
mock.Mock(Id='target_device_id1',
TargetPortId='target_port_id1',
InitiatorPortId='initiator_port_id1'),
mock.Mock(Id='target_device_id2',
TargetPortId='target_port_id2',
InitiatorPortId='initiator_port_id1'),
mock.Mock(Id='target_device_id3',
TargetPortId='target_port_id2',
InitiatorPortId='initiator_port_id1'),
mock.Mock(Id='target_device_id4',
TargetPortId='target_port_id2',
InitiatorPortId='initiator_port_id2'),
]
class FibreChannelVolumeDriverTestCase(
test_datacore_driver.DataCoreVolumeDriverTestCase, test.TestCase):
"""Tests for the FC Driver for DataCore SANsymphony storage array."""
def setUp(self):
super(FibreChannelVolumeDriverTestCase, self).setUp()
self.mock_client.get_ports.return_value = PORTS
self.mock_client.get_target_devices.return_value = TARGET_DEVICES
@staticmethod
def init_driver(config):
driver = fc.FibreChannelVolumeDriver(configuration=config)
driver.do_setup(None)
return driver
def test_validate_connector(self):
driver = self.init_driver(self.setup_default_configuration())
connector = {
'host': 'host_name',
'wwpns': ['AA-AA-AA-AA-AA-AA-AA-AA'],
}
driver.validate_connector(connector)
def test_validate_connector_failed(self):
driver = self.init_driver(self.setup_default_configuration())
connector = {}
self.assertRaises(cinder_exception.InvalidConnectorException,
driver.validate_connector,
connector)
connector = {'host': 'host_name'}
self.assertRaises(cinder_exception.InvalidConnectorException,
driver.validate_connector,
connector)
connector = {'wwpns': ['AA-AA-AA-AA-AA-AA-AA-AA']}
self.assertRaises(cinder_exception.InvalidConnectorException,
driver.validate_connector,
connector)
def test_initialize_connection(self):
(self.mock_client.serve_virtual_disks_to_host
.return_value) = LOGICAL_UNITS
virtual_disk = test_datacore_driver.VIRTUAL_DISKS[0]
client = test_datacore_driver.CLIENTS[0]
driver = self.init_driver(self.setup_default_configuration())
volume = test_datacore_driver.VOLUME.copy()
volume['provider_location'] = virtual_disk.Id
initiator_wwpns = [port.PortName.replace('-', '').lower() for port
in PORTS
if port.PortMode == 'Initiator']
connector = {
'host': client.HostName,
'wwpns': initiator_wwpns,
}
result = driver.initialize_connection(volume, connector)
self.assertEqual('fibre_channel', result['driver_volume_type'])
target_wwns = [port.PortName.replace('-', '').lower() for port
in PORTS
if port.PortMode == 'Target']
self.assertIn(result['data']['target_wwn'], target_wwns)
target_wwn = result['data']['target_wwn']
target_port_id = next((
port.Id for port
in PORTS
if port.PortName.replace('-', '').lower() == target_wwn), None)
target_device_id = next((
device.Id for device
in TARGET_DEVICES
if device.TargetPortId == target_port_id), None)
target_lun = next((
unit.Lun.Quad for unit
in LOGICAL_UNITS
if unit.VirtualTargetDeviceId == target_device_id), None)
self.assertEqual(target_lun, result['data']['target_lun'])
self.assertFalse(result['data']['target_discovered'])
self.assertEqual(volume['id'], result['data']['volume_id'])
self.assertEqual('rw', result['data']['access_mode'])
def test_initialize_connection_unknown_client(self):
client = test_datacore_driver.CLIENTS[0]
self.mock_client.register_client.return_value = client
(self.mock_client.get_clients
.return_value) = test_datacore_driver.CLIENTS[1:]
(self.mock_client.serve_virtual_disks_to_host
.return_value) = LOGICAL_UNITS
virtual_disk = test_datacore_driver.VIRTUAL_DISKS[0]
driver = self.init_driver(self.setup_default_configuration())
volume = test_datacore_driver.VOLUME.copy()
volume['provider_location'] = virtual_disk.Id
initiator_wwpns = [port.PortName.replace('-', '').lower() for port
in PORTS
if port.PortMode == 'Initiator']
connector = {
'host': client.HostName,
'wwpns': initiator_wwpns,
}
result = driver.initialize_connection(volume, connector)
self.assertEqual('fibre_channel', result['driver_volume_type'])
target_wwns = [port.PortName.replace('-', '').lower() for port
in PORTS
if port.PortMode == 'Target']
self.assertIn(result['data']['target_wwn'], target_wwns)
target_wwn = result['data']['target_wwn']
target_port_id = next((
port.Id for port
in PORTS
if port.PortName.replace('-', '').lower() == target_wwn), None)
target_device_id = next((
device.Id for device
in TARGET_DEVICES
if device.TargetPortId == target_port_id), None)
target_lun = next((
unit.Lun.Quad for unit
in LOGICAL_UNITS
if unit.VirtualTargetDeviceId == target_device_id), None)
self.assertEqual(target_lun, result['data']['target_lun'])
self.assertFalse(result['data']['target_discovered'])
self.assertEqual(volume['id'], result['data']['volume_id'])
self.assertEqual('rw', result['data']['access_mode'])
def test_initialize_connection_failed_not_found(self):
client = test_datacore_driver.CLIENTS[0]
driver = self.init_driver(self.setup_default_configuration())
volume = test_datacore_driver.VOLUME.copy()
volume['provider_location'] = 'wrong_virtual_disk_id'
initiator_wwpns = [port.PortName.replace('-', '').lower() for port
in PORTS
if port.PortMode == 'Initiator']
connector = {
'host': client.HostName,
'wwpns': initiator_wwpns,
}
self.assertRaises(cinder_exception.VolumeDriverException,
driver.initialize_connection,
volume,
connector)
def test_initialize_connection_failed_initiator_not_found(self):
(self.mock_client.serve_virtual_disks_to_host
.return_value) = LOGICAL_UNITS
virtual_disk = test_datacore_driver.VIRTUAL_DISKS[0]
client = test_datacore_driver.CLIENTS[0]
driver = self.init_driver(self.setup_default_configuration())
volume = test_datacore_driver.VOLUME.copy()
volume['provider_location'] = virtual_disk.Id
connector = {
'host': client.HostName,
'wwpns': ['0000000000000000'],
}
self.assertRaises(cinder_exception.VolumeDriverException,
driver.initialize_connection,
volume,
connector)
def test_initialize_connection_failed_on_serve(self):
self.mock_client.serve_virtual_disks_to_host.return_value = []
virtual_disk = test_datacore_driver.VIRTUAL_DISKS[0]
client = test_datacore_driver.CLIENTS[0]
driver = self.init_driver(self.setup_default_configuration())
volume = test_datacore_driver.VOLUME.copy()
volume['provider_location'] = virtual_disk.Id
initiator_wwpns = [port.PortName.replace('-', '').lower() for port
in PORTS
if port.PortMode == 'Initiator']
connector = {
'host': client.HostName,
'wwpns': initiator_wwpns,
}
self.assertRaises(cinder_exception.VolumeDriverException,
driver.initialize_connection,
volume,
connector)
| [
"mock.Mock",
"cinder.volume.drivers.datacore.fc.FibreChannelVolumeDriver",
"cinder.tests.unit.volume.drivers.datacore.test_datacore_driver.VOLUME.copy"
] | [((952, 1095), 'mock.Mock', 'mock.Mock', ([], {'Id': '"""initiator_port_id1"""', 'PortType': '"""FibreChannel"""', 'PortMode': '"""Initiator"""', 'PortName': '"""AA-AA-AA-AA-AA-AA-AA-AA"""', 'HostId': '"""client_id1"""'}), "(Id='initiator_port_id1', PortType='FibreChannel', PortMode=\n 'Initiator', PortName='AA-AA-AA-AA-AA-AA-AA-AA', HostId='client_id1')\n", (961, 1095), False, 'import mock\n'), ((1152, 1274), 'mock.Mock', 'mock.Mock', ([], {'Id': '"""initiator_port_id2"""', 'PortType': '"""FibreChannel"""', 'PortMode': '"""Initiator"""', 'PortName': '"""BB-BB-BB-BB-BB-BB-BB-BB"""'}), "(Id='initiator_port_id2', PortType='FibreChannel', PortMode=\n 'Initiator', PortName='BB-BB-BB-BB-BB-BB-BB-BB')\n", (1161, 1274), False, 'import mock\n'), ((1317, 1429), 'mock.Mock', 'mock.Mock', ([], {'Id': '"""target_port_id1"""', 'PortMode': '"""Target"""', 'PortName': '"""CC-CC-CC-CC-CC-CC-CC-CC"""', 'HostId': '"""server_id1"""'}), "(Id='target_port_id1', PortMode='Target', PortName=\n 'CC-CC-CC-CC-CC-CC-CC-CC', HostId='server_id1')\n", (1326, 1429), False, 'import mock\n'), ((1472, 1584), 'mock.Mock', 'mock.Mock', ([], {'Id': '"""target_port_id2"""', 'PortMode': '"""Target"""', 'PortName': '"""DD-DD-DD-DD-DD-DD-DD-DD"""', 'HostId': '"""server_id1"""'}), "(Id='target_port_id2', PortMode='Target', PortName=\n 'DD-DD-DD-DD-DD-DD-DD-DD', HostId='server_id1')\n", (1481, 1584), False, 'import mock\n'), ((2050, 2157), 'mock.Mock', 'mock.Mock', ([], {'Id': '"""target_device_id1"""', 'TargetPortId': '"""target_port_id1"""', 'InitiatorPortId': '"""initiator_port_id1"""'}), "(Id='target_device_id1', TargetPortId='target_port_id1',\n InitiatorPortId='initiator_port_id1')\n", (2059, 2157), False, 'import mock\n'), ((2187, 2294), 'mock.Mock', 'mock.Mock', ([], {'Id': '"""target_device_id2"""', 'TargetPortId': '"""target_port_id2"""', 'InitiatorPortId': '"""initiator_port_id1"""'}), "(Id='target_device_id2', TargetPortId='target_port_id2',\n InitiatorPortId='initiator_port_id1')\n", (2196, 2294), False, 'import mock\n'), ((2324, 2431), 'mock.Mock', 'mock.Mock', ([], {'Id': '"""target_device_id3"""', 'TargetPortId': '"""target_port_id2"""', 'InitiatorPortId': '"""initiator_port_id1"""'}), "(Id='target_device_id3', TargetPortId='target_port_id2',\n InitiatorPortId='initiator_port_id1')\n", (2333, 2431), False, 'import mock\n'), ((2461, 2568), 'mock.Mock', 'mock.Mock', ([], {'Id': '"""target_device_id4"""', 'TargetPortId': '"""target_port_id2"""', 'InitiatorPortId': '"""initiator_port_id2"""'}), "(Id='target_device_id4', TargetPortId='target_port_id2',\n InitiatorPortId='initiator_port_id2')\n", (2470, 2568), False, 'import mock\n'), ((3066, 3115), 'cinder.volume.drivers.datacore.fc.FibreChannelVolumeDriver', 'fc.FibreChannelVolumeDriver', ([], {'configuration': 'config'}), '(configuration=config)\n', (3093, 3115), False, 'from cinder.volume.drivers.datacore import fc\n'), ((4496, 4530), 'cinder.tests.unit.volume.drivers.datacore.test_datacore_driver.VOLUME.copy', 'test_datacore_driver.VOLUME.copy', ([], {}), '()\n', (4528, 4530), False, 'from cinder.tests.unit.volume.drivers.datacore import test_datacore_driver\n'), ((6521, 6555), 'cinder.tests.unit.volume.drivers.datacore.test_datacore_driver.VOLUME.copy', 'test_datacore_driver.VOLUME.copy', ([], {}), '()\n', (6553, 6555), False, 'from cinder.tests.unit.volume.drivers.datacore import test_datacore_driver\n'), ((8232, 8266), 'cinder.tests.unit.volume.drivers.datacore.test_datacore_driver.VOLUME.copy', 'test_datacore_driver.VOLUME.copy', ([], {}), '()\n', (8264, 8266), False, 'from cinder.tests.unit.volume.drivers.datacore import test_datacore_driver\n'), ((9162, 9196), 'cinder.tests.unit.volume.drivers.datacore.test_datacore_driver.VOLUME.copy', 'test_datacore_driver.VOLUME.copy', ([], {}), '()\n', (9194, 9196), False, 'from cinder.tests.unit.volume.drivers.datacore import test_datacore_driver\n'), ((9884, 9918), 'cinder.tests.unit.volume.drivers.datacore.test_datacore_driver.VOLUME.copy', 'test_datacore_driver.VOLUME.copy', ([], {}), '()\n', (9916, 9918), False, 'from cinder.tests.unit.volume.drivers.datacore import test_datacore_driver\n'), ((1719, 1736), 'mock.Mock', 'mock.Mock', ([], {'Quad': '(4)'}), '(Quad=4)\n', (1728, 1736), False, 'import mock\n'), ((1814, 1831), 'mock.Mock', 'mock.Mock', ([], {'Quad': '(3)'}), '(Quad=3)\n', (1823, 1831), False, 'import mock\n'), ((1909, 1926), 'mock.Mock', 'mock.Mock', ([], {'Quad': '(2)'}), '(Quad=2)\n', (1918, 1926), False, 'import mock\n'), ((2004, 2021), 'mock.Mock', 'mock.Mock', ([], {'Quad': '(1)'}), '(Quad=1)\n', (2013, 2021), False, 'import mock\n')] |
from celery import task
from django.core.mail import send_mail
@task
def send_email(subject, message, from_email, recipient_list):
"""Send email async using a celery worker
args: Take sames args as django send_mail function.
"""
send_mail(subject, message, from_email, recipient_list)
@task
def update_object(index, instance, using=None):
index.update_object(instance, using=using)
@task
def remove_object(index, instance, using=None):
index.remove_object(instance, using=using) | [
"django.core.mail.send_mail"
] | [((252, 307), 'django.core.mail.send_mail', 'send_mail', (['subject', 'message', 'from_email', 'recipient_list'], {}), '(subject, message, from_email, recipient_list)\n', (261, 307), False, 'from django.core.mail import send_mail\n')] |
"""General project util functions"""
from typing import Callable
import inspect
import time
from functools import wraps
from sys import getsizeof
def timeit(method: Callable) -> Callable:
"""timeit is a wrapper for performance analysis which should
return the time taken for a function to run. Alters `log_time` `dict` if fed in.
Add @timeit to the function you want to time. Function needs `**kwargs` if
you want it to be able to feed in `log_time` `dict`.
Args:
method (Callable): the function that it takes as an input
Examples:
Here is an example with the tracking functionality and without::
>>> @timeit
... def loop(**kwargs):
... total = 0
... for i in range(int(10e2)):
... for j in range(int(10e2)):
... total += 1
>>> tmp_log_d = {}
>>> loop(log_time=tmp_log_d)
>>> print(tmp_log_d["loop"])
>>> loop()
"""
@wraps(method)
def timed(*args, **kw):
ts = time.perf_counter()
result = method(*args, **kw)
te = time.perf_counter()
if "log_time" in kw:
name = kw.get("log_name", method.__name__.lower())
kw["log_time"][name] = te - ts
else:
print("%r %2.5f s\n" % (method.__name__, (te - ts)))
return result
return timed
def human_readable_size(num: int, suffix: str = "B") -> str:
"""
Convert a number of bytes into human readable format.
This function is meant as a helper function for `get_byte_size`.
Args:
num (int): The number of bytes to convert
suffix (str, optional): The suffix to use for bytes. Defaults to 'B'.
Returns:
str: A human readable version of the number of bytes.
"""
assert num >= 0, "Size cannot be negative."
for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
if num < 1024:
return f"{num:.0f} {unit}{suffix}"
num /= 1024.0
return f"{num:1f} Y{suffix}"
def calculate_byte_size_recursively(obj: object, seen: set = None) -> int:
"""
Recursively calculate size of objects in memory in bytes.
From: https://github.com/bosswissam/pysize. Meant as a helper function for
`get_byte_size`.
Args:
obj (object): The python object to get the size of
seen (set, optional): This variable is needed to for the recusrive
function evaluations, to ensure each object only gets counted once.
Leave it at "None" to get the full byte size of an object. Defaults to None.
Returns:
int: The size of the object in bytes
"""
# Note: getsizeof alone is not enough, as it only returns the size of the top
# level object, not of its member variables/objects. Hence the recursive calls.
size = getsizeof(obj)
if seen is None:
# At first iteration (top level object), initialize 'seen' as empty set
seen = set()
obj_id = id(obj)
if obj_id in seen:
# If object was already counted, return 0 size to avoid double counting.
return 0
# Important: Mark as seen *before* entering recursion to handle
# self-referential objects
seen.add(obj_id)
if hasattr(obj, "__dict__"):
# handles class objects
for cls in obj.__class__.__mro__:
if "__dict__" in cls.__dict__:
d = cls.__dict__["__dict__"]
if inspect.isgetsetdescriptor(d) or inspect.ismemberdescriptor(d):
# Recursively calculate size of member objects & variables
size += calculate_byte_size_recursively(obj.__dict__, seen)
break
if isinstance(obj, dict):
# handles dictionaries
size += sum((calculate_byte_size_recursively(v, seen) for v in obj.values()))
size += sum((calculate_byte_size_recursively(k, seen) for k in obj.keys()))
elif hasattr(obj, "__iter__") and not isinstance(obj, (str, bytes, bytearray)):
# handles array like objects (need to exclude str, bytes bytearray since they
# also implement __iter__)
size += sum((calculate_byte_size_recursively(i, seen) for i in obj))
if hasattr(obj, "__slots__"): # can have __slots__ with __dict__
size += sum(
calculate_byte_size_recursively(getattr(obj, s), seen)
for s in obj.__slots__
if hasattr(obj, s)
)
return size
def get_byte_size(obj: object) -> str:
"""
Return human readable size of a python object in bytes.
Args:
obj (object): The python object to analyse
Returns:
str: Human readable string with the size of the object
"""
return human_readable_size(calculate_byte_size_recursively(obj))
| [
"sys.getsizeof",
"inspect.isgetsetdescriptor",
"time.perf_counter",
"functools.wraps",
"inspect.ismemberdescriptor"
] | [((1020, 1033), 'functools.wraps', 'wraps', (['method'], {}), '(method)\n', (1025, 1033), False, 'from functools import wraps\n'), ((2874, 2888), 'sys.getsizeof', 'getsizeof', (['obj'], {}), '(obj)\n', (2883, 2888), False, 'from sys import getsizeof\n'), ((1075, 1094), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1092, 1094), False, 'import time\n'), ((1145, 1164), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1162, 1164), False, 'import time\n'), ((3490, 3519), 'inspect.isgetsetdescriptor', 'inspect.isgetsetdescriptor', (['d'], {}), '(d)\n', (3516, 3519), False, 'import inspect\n'), ((3523, 3552), 'inspect.ismemberdescriptor', 'inspect.ismemberdescriptor', (['d'], {}), '(d)\n', (3549, 3552), False, 'import inspect\n')] |
#!/usr/bin/env python3
import sqlite3
db = sqlite3.connect('taginfo-db.db')
c = db.cursor()
mapost = ""
whitelist = ""
keyvals = []
for r in c.execute("select key,value from tags where key='shop' order by count_all desc limit 50"):
key, value = r
# no need for these
if value in ['yes', 'no']:
continue
keyvals.append([key, value])
keyvals.sort()
for r in keyvals:
key, value = r
mapost += ' TYPE ' + key + '_' + value + '\n'
mapost += ' = NODE AREA ("%s"=="%s")\n' % (key, value)
mapost += ' {Name, NameAlt}\n ADDRESS POI\n GROUP ' + key + '\n\n'
whitelist += key + '_' + value + '\n'
print(mapost)
print(whitelist)
| [
"sqlite3.connect"
] | [((45, 77), 'sqlite3.connect', 'sqlite3.connect', (['"""taginfo-db.db"""'], {}), "('taginfo-db.db')\n", (60, 77), False, 'import sqlite3\n')] |
#!/usr/bin/env python
"""
Generate sample olfactory model stimulus.
"""
import numpy as np
import h5py
osn_num = 1375
dt = 1e-4 # time step
Ot = 2000 # number of data point during reset period
Rt = 1000 # number of data point during odor delivery period
#Nt = 4*Ot + 3*Rt # number of data points in time
#Nt = 10000
#t = np.arange(0, dt*Nt, dt)
I = 0.5195 # amplitude of odorant concentration
u_1 = np.zeros(500, np.float64)
u_2 = I*np.ones(5000, np.float64)
u_3 = np.zeros(4500,np.float64)
u_4 = I*np.ones(1000,np.float64)
u_5 = np.zeros(1000, np.float64)
u_6 = I*np.ones(1500, np.float64)
u_7 = np.zeros(500,np.float64)
#u_on = I*np.ones(Ot, dtype=np.float64)
#u_off = np.zeros(Ot, dtype=np.float64)
#u_reset = np.zeros(Rt, dtype=np.float64)
u = np.concatenate((u_1,u_2,u_3,u_4,u_5,u_6,u_7))
Nt = u.size
#print Nt
u_all = np.transpose(np.kron(np.ones((osn_num, 1)), u))
with h5py.File('olfactory_input.h5', 'w') as f:
f.create_dataset('array', (Nt, osn_num),
dtype=np.float64,
data=u_all)
| [
"numpy.zeros",
"numpy.ones",
"numpy.concatenate",
"h5py.File"
] | [((412, 437), 'numpy.zeros', 'np.zeros', (['(500)', 'np.float64'], {}), '(500, np.float64)\n', (420, 437), True, 'import numpy as np\n'), ((478, 504), 'numpy.zeros', 'np.zeros', (['(4500)', 'np.float64'], {}), '(4500, np.float64)\n', (486, 504), True, 'import numpy as np\n'), ((543, 569), 'numpy.zeros', 'np.zeros', (['(1000)', 'np.float64'], {}), '(1000, np.float64)\n', (551, 569), True, 'import numpy as np\n'), ((610, 635), 'numpy.zeros', 'np.zeros', (['(500)', 'np.float64'], {}), '(500, np.float64)\n', (618, 635), True, 'import numpy as np\n'), ((772, 823), 'numpy.concatenate', 'np.concatenate', (['(u_1, u_2, u_3, u_4, u_5, u_6, u_7)'], {}), '((u_1, u_2, u_3, u_4, u_5, u_6, u_7))\n', (786, 823), True, 'import numpy as np\n'), ((446, 471), 'numpy.ones', 'np.ones', (['(5000)', 'np.float64'], {}), '(5000, np.float64)\n', (453, 471), True, 'import numpy as np\n'), ((512, 537), 'numpy.ones', 'np.ones', (['(1000)', 'np.float64'], {}), '(1000, np.float64)\n', (519, 537), True, 'import numpy as np\n'), ((578, 603), 'numpy.ones', 'np.ones', (['(1500)', 'np.float64'], {}), '(1500, np.float64)\n', (585, 603), True, 'import numpy as np\n'), ((904, 940), 'h5py.File', 'h5py.File', (['"""olfactory_input.h5"""', '"""w"""'], {}), "('olfactory_input.h5', 'w')\n", (913, 940), False, 'import h5py\n'), ((871, 892), 'numpy.ones', 'np.ones', (['(osn_num, 1)'], {}), '((osn_num, 1))\n', (878, 892), True, 'import numpy as np\n')] |
# tag::MYMAX_TYPES[]
from typing import Protocol, Any, TypeVar, overload, Callable, Iterable, Union
class _Comparable(Protocol):
def __lt__(self, other: Any) -> bool: ...
_T = TypeVar('_T')
_CT = TypeVar('_CT', bound=_Comparable)
_DT = TypeVar('_DT')
MISSING = object()
EMPTY_MSG = 'max() arg is an empty sequence'
@overload
def max(__arg1: _CT, __arg2: _CT, *_args: _CT, key: None = ...) -> _CT:
...
@overload
def max(__arg1: _T, __arg2: _T, *_args: _T, key: Callable[[_T], _CT]) -> _T:
...
@overload
def max(__iterable: Iterable[_CT], *, key: None = ...) -> _CT:
...
@overload
def max(__iterable: Iterable[_T], *, key: Callable[[_T], _CT]) -> _T:
...
@overload
def max(__iterable: Iterable[_CT], *, key: None = ...,
default: _DT) -> Union[_CT, _DT]:
...
@overload
def max(__iterable: Iterable[_T], *, key: Callable[[_T], _CT],
default: _DT) -> Union[_T, _DT]:
...
# end::MYMAX_TYPES[]
# tag::MYMAX[]
def max(first, *args, key=None, default=MISSING):
if args:
series = args
candidate = first
else:
series = iter(first)
try:
candidate = next(series)
except StopIteration:
if default is not MISSING:
return default
raise ValueError(EMPTY_MSG) from None
if key is None:
for current in series:
if candidate < current:
candidate = current
else:
candidate_key = key(candidate)
for current in series:
current_key = key(current)
if candidate_key < current_key:
candidate = current
candidate_key = current_key
return candidate
# end::MYMAX[] | [
"typing.TypeVar"
] | [((182, 195), 'typing.TypeVar', 'TypeVar', (['"""_T"""'], {}), "('_T')\n", (189, 195), False, 'from typing import Protocol, Any, TypeVar, overload, Callable, Iterable, Union\n'), ((202, 235), 'typing.TypeVar', 'TypeVar', (['"""_CT"""'], {'bound': '_Comparable'}), "('_CT', bound=_Comparable)\n", (209, 235), False, 'from typing import Protocol, Any, TypeVar, overload, Callable, Iterable, Union\n'), ((242, 256), 'typing.TypeVar', 'TypeVar', (['"""_DT"""'], {}), "('_DT')\n", (249, 256), False, 'from typing import Protocol, Any, TypeVar, overload, Callable, Iterable, Union\n')] |
from django.db import models
import datetime
from django.contrib.auth.models import (
BaseUserManager, AbstractBaseUser, Group, PermissionsMixin
)
class MyUserManager(BaseUserManager):
def create_user(self, username, password = None):
user = self.model(
username = username,
)
user.set_password(password)
user.save(using = self._db)
return user
def create_superuser(self, username, password):
user = self.create_user(username, password)
user.is_admin = True
user.is_superuser = True
user.save(using = self._db)
return user
class MyUser(AbstractBaseUser, PermissionsMixin):
username = models.CharField(max_length = 20, unique = True)
is_virtual = models.BooleanField(default = False)
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default = False)
objects = MyUserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELD = []
def __str__(self):
return self.username
def get_full_name(self):
return self.username
def get_short_name(self):
return self.username
@property
def is_staff(self):
return self.is_admin | [
"django.db.models.CharField",
"django.db.models.BooleanField"
] | [((618, 662), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'unique': '(True)'}), '(max_length=20, unique=True)\n', (634, 662), False, 'from django.db import models\n'), ((682, 716), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (701, 716), False, 'from django.db import models\n'), ((732, 765), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (751, 765), False, 'from django.db import models\n'), ((778, 812), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (797, 812), False, 'from django.db import models\n')] |
import pytest
from numpy.testing import assert_almost_equal, assert_array_equal, \
assert_array_almost_equal
from ctapipe.calib.camera.r1 import (
CameraR1CalibratorFactory,
HESSIOR1Calibrator,
TargetIOR1Calibrator,
NullR1Calibrator
)
from ctapipe.io.eventsource import EventSource
from ctapipe.io.simteleventsource import SimTelEventSource
from ctapipe.io.targetioeventsource import TargetIOEventSource
from ctapipe.utils import get_dataset_path
def test_hessio_r1_calibrator(example_event):
telid = 11
calibrator = HESSIOR1Calibrator()
calibrator.calibrate(example_event)
r1 = example_event.r1.tel[telid].waveform
assert_almost_equal(r1[0, 0, 0], -0.091, 3)
def test_null_r1_calibrator(example_event):
telid = 11
calibrator = NullR1Calibrator()
calibrator.calibrate(example_event)
r0 = example_event.r0.tel[telid].waveform
r1 = example_event.r1.tel[telid].waveform
assert_array_equal(r0, r1)
def test_targetio_calibrator():
pytest.importorskip("target_calib")
url_r0 = get_dataset_path("targetmodule_r0.tio")
url_r1 = get_dataset_path("targetmodule_r1.tio")
pedpath = get_dataset_path("targetmodule_ped.tcal")
source_r0 = TargetIOEventSource(input_url=url_r0)
source_r1 = TargetIOEventSource(input_url=url_r1)
r1c = CameraR1CalibratorFactory.produce(eventsource=source_r0)
event_r0 = source_r0._get_event_by_index(0)
event_r1 = source_r1._get_event_by_index(0)
r1c.calibrate(event_r0)
assert_array_equal(event_r0.r0.tel[0].waveform,
event_r0.r1.tel[0].waveform)
r1c = CameraR1CalibratorFactory.produce(
eventsource=source_r0,
pedestal_path=pedpath
)
r1c.calibrate(event_r0)
assert_array_almost_equal(event_r0.r1.tel[0].waveform,
event_r1.r1.tel[0].waveform, 1)
def test_targetio_calibrator_wrong_file(example_event):
pytest.importorskip("target_calib")
r1c = TargetIOR1Calibrator()
with pytest.raises(ValueError):
r1c.calibrate(example_event)
def test_check_r0_exists(example_event):
telid = 11
calibrator = HESSIOR1Calibrator()
assert (calibrator.check_r0_exists(example_event, telid) is True)
example_event.r0.tel[telid].waveform = None
assert (calibrator.check_r0_exists(example_event, telid) is False)
def test_factory_from_product():
calibrator = CameraR1CalibratorFactory.produce(
product="NullR1Calibrator"
)
assert isinstance(calibrator, NullR1Calibrator)
calibrator = CameraR1CalibratorFactory.produce(
product="HESSIOR1Calibrator"
)
assert isinstance(calibrator, HESSIOR1Calibrator)
def test_factory_default():
calibrator = CameraR1CalibratorFactory.produce()
assert isinstance(calibrator, NullR1Calibrator)
def test_factory_from_eventsource():
dataset = get_dataset_path("gamma_test.simtel.gz")
eventsource = SimTelEventSource(input_url=dataset)
calibrator = CameraR1CalibratorFactory.produce(eventsource=eventsource)
assert isinstance(calibrator, HESSIOR1Calibrator)
def test_factory_from_eventsource_override():
dataset = get_dataset_path("gamma_test.simtel.gz")
eventsource = SimTelEventSource(input_url=dataset)
calibrator = CameraR1CalibratorFactory.produce(
eventsource=eventsource,
product="NullR1Calibrator"
)
assert isinstance(calibrator, NullR1Calibrator)
class UnknownEventSource(EventSource):
"""
Simple working EventSource
"""
def _generator(self):
return range(len(self.input_url))
@staticmethod
def is_compatible(file_path):
return False
def test_factory_from_unknown_eventsource():
dataset = get_dataset_path("gamma_test.simtel.gz")
eventsource = UnknownEventSource(input_url=dataset)
calibrator = CameraR1CalibratorFactory.produce(eventsource=eventsource)
assert isinstance(calibrator, NullR1Calibrator)
| [
"ctapipe.utils.get_dataset_path",
"numpy.testing.assert_array_almost_equal",
"ctapipe.calib.camera.r1.CameraR1CalibratorFactory.produce",
"ctapipe.calib.camera.r1.TargetIOR1Calibrator",
"numpy.testing.assert_almost_equal",
"pytest.importorskip",
"ctapipe.io.simteleventsource.SimTelEventSource",
"ctapi... | [((549, 569), 'ctapipe.calib.camera.r1.HESSIOR1Calibrator', 'HESSIOR1Calibrator', ([], {}), '()\n', (567, 569), False, 'from ctapipe.calib.camera.r1 import CameraR1CalibratorFactory, HESSIOR1Calibrator, TargetIOR1Calibrator, NullR1Calibrator\n'), ((660, 703), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['r1[0, 0, 0]', '(-0.091)', '(3)'], {}), '(r1[0, 0, 0], -0.091, 3)\n', (679, 703), False, 'from numpy.testing import assert_almost_equal, assert_array_equal, assert_array_almost_equal\n'), ((783, 801), 'ctapipe.calib.camera.r1.NullR1Calibrator', 'NullR1Calibrator', ([], {}), '()\n', (799, 801), False, 'from ctapipe.calib.camera.r1 import CameraR1CalibratorFactory, HESSIOR1Calibrator, TargetIOR1Calibrator, NullR1Calibrator\n'), ((938, 964), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['r0', 'r1'], {}), '(r0, r1)\n', (956, 964), False, 'from numpy.testing import assert_almost_equal, assert_array_equal, assert_array_almost_equal\n'), ((1003, 1038), 'pytest.importorskip', 'pytest.importorskip', (['"""target_calib"""'], {}), "('target_calib')\n", (1022, 1038), False, 'import pytest\n'), ((1052, 1091), 'ctapipe.utils.get_dataset_path', 'get_dataset_path', (['"""targetmodule_r0.tio"""'], {}), "('targetmodule_r0.tio')\n", (1068, 1091), False, 'from ctapipe.utils import get_dataset_path\n'), ((1105, 1144), 'ctapipe.utils.get_dataset_path', 'get_dataset_path', (['"""targetmodule_r1.tio"""'], {}), "('targetmodule_r1.tio')\n", (1121, 1144), False, 'from ctapipe.utils import get_dataset_path\n'), ((1159, 1200), 'ctapipe.utils.get_dataset_path', 'get_dataset_path', (['"""targetmodule_ped.tcal"""'], {}), "('targetmodule_ped.tcal')\n", (1175, 1200), False, 'from ctapipe.utils import get_dataset_path\n'), ((1218, 1255), 'ctapipe.io.targetioeventsource.TargetIOEventSource', 'TargetIOEventSource', ([], {'input_url': 'url_r0'}), '(input_url=url_r0)\n', (1237, 1255), False, 'from ctapipe.io.targetioeventsource import TargetIOEventSource\n'), ((1272, 1309), 'ctapipe.io.targetioeventsource.TargetIOEventSource', 'TargetIOEventSource', ([], {'input_url': 'url_r1'}), '(input_url=url_r1)\n', (1291, 1309), False, 'from ctapipe.io.targetioeventsource import TargetIOEventSource\n'), ((1321, 1377), 'ctapipe.calib.camera.r1.CameraR1CalibratorFactory.produce', 'CameraR1CalibratorFactory.produce', ([], {'eventsource': 'source_r0'}), '(eventsource=source_r0)\n', (1354, 1377), False, 'from ctapipe.calib.camera.r1 import CameraR1CalibratorFactory, HESSIOR1Calibrator, TargetIOR1Calibrator, NullR1Calibrator\n'), ((1508, 1584), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['event_r0.r0.tel[0].waveform', 'event_r0.r1.tel[0].waveform'], {}), '(event_r0.r0.tel[0].waveform, event_r0.r1.tel[0].waveform)\n', (1526, 1584), False, 'from numpy.testing import assert_almost_equal, assert_array_equal, assert_array_almost_equal\n'), ((1619, 1698), 'ctapipe.calib.camera.r1.CameraR1CalibratorFactory.produce', 'CameraR1CalibratorFactory.produce', ([], {'eventsource': 'source_r0', 'pedestal_path': 'pedpath'}), '(eventsource=source_r0, pedestal_path=pedpath)\n', (1652, 1698), False, 'from ctapipe.calib.camera.r1 import CameraR1CalibratorFactory, HESSIOR1Calibrator, TargetIOR1Calibrator, NullR1Calibrator\n'), ((1753, 1844), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['event_r0.r1.tel[0].waveform', 'event_r1.r1.tel[0].waveform', '(1)'], {}), '(event_r0.r1.tel[0].waveform, event_r1.r1.tel[0].\n waveform, 1)\n', (1778, 1844), False, 'from numpy.testing import assert_almost_equal, assert_array_equal, assert_array_almost_equal\n'), ((1932, 1967), 'pytest.importorskip', 'pytest.importorskip', (['"""target_calib"""'], {}), "('target_calib')\n", (1951, 1967), False, 'import pytest\n'), ((1978, 2000), 'ctapipe.calib.camera.r1.TargetIOR1Calibrator', 'TargetIOR1Calibrator', ([], {}), '()\n', (1998, 2000), False, 'from ctapipe.calib.camera.r1 import CameraR1CalibratorFactory, HESSIOR1Calibrator, TargetIOR1Calibrator, NullR1Calibrator\n'), ((2150, 2170), 'ctapipe.calib.camera.r1.HESSIOR1Calibrator', 'HESSIOR1Calibrator', ([], {}), '()\n', (2168, 2170), False, 'from ctapipe.calib.camera.r1 import CameraR1CalibratorFactory, HESSIOR1Calibrator, TargetIOR1Calibrator, NullR1Calibrator\n'), ((2412, 2473), 'ctapipe.calib.camera.r1.CameraR1CalibratorFactory.produce', 'CameraR1CalibratorFactory.produce', ([], {'product': '"""NullR1Calibrator"""'}), "(product='NullR1Calibrator')\n", (2445, 2473), False, 'from ctapipe.calib.camera.r1 import CameraR1CalibratorFactory, HESSIOR1Calibrator, TargetIOR1Calibrator, NullR1Calibrator\n'), ((2557, 2620), 'ctapipe.calib.camera.r1.CameraR1CalibratorFactory.produce', 'CameraR1CalibratorFactory.produce', ([], {'product': '"""HESSIOR1Calibrator"""'}), "(product='HESSIOR1Calibrator')\n", (2590, 2620), False, 'from ctapipe.calib.camera.r1 import CameraR1CalibratorFactory, HESSIOR1Calibrator, TargetIOR1Calibrator, NullR1Calibrator\n'), ((2736, 2771), 'ctapipe.calib.camera.r1.CameraR1CalibratorFactory.produce', 'CameraR1CalibratorFactory.produce', ([], {}), '()\n', (2769, 2771), False, 'from ctapipe.calib.camera.r1 import CameraR1CalibratorFactory, HESSIOR1Calibrator, TargetIOR1Calibrator, NullR1Calibrator\n'), ((2877, 2917), 'ctapipe.utils.get_dataset_path', 'get_dataset_path', (['"""gamma_test.simtel.gz"""'], {}), "('gamma_test.simtel.gz')\n", (2893, 2917), False, 'from ctapipe.utils import get_dataset_path\n'), ((2936, 2972), 'ctapipe.io.simteleventsource.SimTelEventSource', 'SimTelEventSource', ([], {'input_url': 'dataset'}), '(input_url=dataset)\n', (2953, 2972), False, 'from ctapipe.io.simteleventsource import SimTelEventSource\n'), ((2990, 3048), 'ctapipe.calib.camera.r1.CameraR1CalibratorFactory.produce', 'CameraR1CalibratorFactory.produce', ([], {'eventsource': 'eventsource'}), '(eventsource=eventsource)\n', (3023, 3048), False, 'from ctapipe.calib.camera.r1 import CameraR1CalibratorFactory, HESSIOR1Calibrator, TargetIOR1Calibrator, NullR1Calibrator\n'), ((3165, 3205), 'ctapipe.utils.get_dataset_path', 'get_dataset_path', (['"""gamma_test.simtel.gz"""'], {}), "('gamma_test.simtel.gz')\n", (3181, 3205), False, 'from ctapipe.utils import get_dataset_path\n'), ((3224, 3260), 'ctapipe.io.simteleventsource.SimTelEventSource', 'SimTelEventSource', ([], {'input_url': 'dataset'}), '(input_url=dataset)\n', (3241, 3260), False, 'from ctapipe.io.simteleventsource import SimTelEventSource\n'), ((3278, 3369), 'ctapipe.calib.camera.r1.CameraR1CalibratorFactory.produce', 'CameraR1CalibratorFactory.produce', ([], {'eventsource': 'eventsource', 'product': '"""NullR1Calibrator"""'}), "(eventsource=eventsource, product=\n 'NullR1Calibrator')\n", (3311, 3369), False, 'from ctapipe.calib.camera.r1 import CameraR1CalibratorFactory, HESSIOR1Calibrator, TargetIOR1Calibrator, NullR1Calibrator\n'), ((3731, 3771), 'ctapipe.utils.get_dataset_path', 'get_dataset_path', (['"""gamma_test.simtel.gz"""'], {}), "('gamma_test.simtel.gz')\n", (3747, 3771), False, 'from ctapipe.utils import get_dataset_path\n'), ((3845, 3903), 'ctapipe.calib.camera.r1.CameraR1CalibratorFactory.produce', 'CameraR1CalibratorFactory.produce', ([], {'eventsource': 'eventsource'}), '(eventsource=eventsource)\n', (3878, 3903), False, 'from ctapipe.calib.camera.r1 import CameraR1CalibratorFactory, HESSIOR1Calibrator, TargetIOR1Calibrator, NullR1Calibrator\n'), ((2010, 2035), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2023, 2035), False, 'import pytest\n')] |
from __future__ import annotations
from copy import deepcopy
from dataclasses import dataclass, field
from typing import Optional
from models.backyard import Backyard
from models.heater import Heater
@dataclass
class Room:
id: int
name: str
title: str
coldThreshold: list[float]
optimalThreshold: list[float]
warmThreshold: list[float]
hotThreshold: list[float]
cooldownTemperature: float
width: int
height: int
xPos: int
yPos: int
wallHeight: float
density: float # [g/m^3]
specificHeat: float # [J/(g*k)]
heater: Heater
toMeterScale: float
currentTemperature: Optional[float] = 21
targetTemperature: Optional[float] = 21
owners: Optional[list[str]] = field(default_factory=list)
numberOfPeople: Optional[int] = 0
people: Optional[list[int]] = field(default_factory=list)
probabilityWeight: Optional[float] = 1
neighbours: Optional[dict[str, list[dict[str, int | Room | Backyard]]]] = field(default_factory=lambda: {
'south': [],
'north': [],
'west': [],
'east': []
})
def __post_init__(self):
self.total_wall_length = 2 * (self.width + self.height)
self.area = self.__get_area()
self.volume = self.__get_volume()
self.mass = self.density * self.volume
self.space_between_rooms = 20 # Has to passed like this because it is in pixels.
def as_dict(self):
neighbours = deepcopy(self.neighbours)
for neighbours_per_site in neighbours.values():
if not neighbours_per_site:
continue
for neighbour_data in neighbours_per_site:
neighbour_data.pop('neighbour', None)
return {
'id': self.id,
'name': self.name,
'title': self.title,
'coldThreshold': self.coldThreshold,
'optimalThreshold': self.optimalThreshold,
'warmThreshold': self.warmThreshold,
'hotThreshold': self.hotThreshold,
'cooldownTemperature': self.cooldownTemperature,
'currentTemperature': self.currentTemperature,
'owner': self.owners,
'numberOfPeople': self.numberOfPeople,
'people': self.people,
'neighbours': neighbours
}
def add_person(self, person_id: int):
self.numberOfPeople += 1
self.people.append(person_id)
def remove_person(self, person_id: int):
self.numberOfPeople -= 1
self.people.remove(person_id)
def set_room_neighbour(self, site: str, neighbour: Room | Backyard):
common_wall_length = self.__get_common_wall_length(site, neighbour)
self.neighbours[site].append({
'neighbour': neighbour,
'name': neighbour.name,
'commonWallLength': common_wall_length
})
if isinstance(neighbour, Room):
self.neighbours[site][-1]['roomId'] = neighbour.id
def set_backyard_as_lacking_neighbours(self, backyard: Backyard):
for site in self.neighbours:
if not self.neighbours[site]:
self.set_room_neighbour(site, backyard)
def check_if_room_is_a_vertical_neighbour(self, room: Room):
if abs(self.xPos - room.xPos) <= self.space_between_rooms or abs(
self.xPos + self.width - room.xPos - room.width) <= self.space_between_rooms:
if abs(self.yPos - room.height - room.yPos) <= self.space_between_rooms:
self.set_room_neighbour('north', room)
elif abs(self.yPos + self.height - room.yPos) <= self.space_between_rooms:
self.set_room_neighbour('south', room)
def check_if_room_is_a_horizontal_neighbour(self, room: Room):
if abs(self.yPos - room.yPos) <= self.space_between_rooms or abs(
self.yPos + self.height - room.yPos - room.height) <= self.space_between_rooms:
if abs(self.xPos + self.width - room.xPos) <= self.space_between_rooms:
self.set_room_neighbour('east', room)
elif abs(self.xPos - room.width - room.xPos) <= self.space_between_rooms:
self.set_room_neighbour('west', room)
def __get_common_wall_length(self, site: str, neighbour: Room | Backyard):
if site not in self.neighbours.keys():
raise KeyError(f'Neighbour site should be one of: {self.neighbours.keys()}')
if isinstance(neighbour, Backyard):
if site in ['east', 'west']:
return self.height
elif site in ['north', 'south']:
return self.width
elif isinstance(neighbour, Room):
if site in ['east', 'west']:
return min([self.height, neighbour.height])
elif site in ['north', 'south']:
return min([self.width, neighbour.width])
else:
raise TypeError('Not allowed type of neighbour provided!')
def __get_area(self):
return self.toMeterScale ** 2 * self.width * self.height
def __get_volume(self):
return self.__get_area() * self.wallHeight
| [
"dataclasses.field",
"copy.deepcopy"
] | [((739, 766), 'dataclasses.field', 'field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (744, 766), False, 'from dataclasses import dataclass, field\n'), ((839, 866), 'dataclasses.field', 'field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (844, 866), False, 'from dataclasses import dataclass, field\n'), ((988, 1074), 'dataclasses.field', 'field', ([], {'default_factory': "(lambda : {'south': [], 'north': [], 'west': [], 'east': []})"}), "(default_factory=lambda : {'south': [], 'north': [], 'west': [],\n 'east': []})\n", (993, 1074), False, 'from dataclasses import dataclass, field\n'), ((1464, 1489), 'copy.deepcopy', 'deepcopy', (['self.neighbours'], {}), '(self.neighbours)\n', (1472, 1489), False, 'from copy import deepcopy\n')] |
"""
"""
import os.path as op
from glob import glob
from os import mkdir
from shutil import copyfile
def make_image_file():
design_file = "design.fsf"
# Each file
gp_mem = "# Group membership for input {0}\nset fmri(groupmem.{0}) 1\n"
hi_thing = "# Higher-level EV value for EV 1 and input {0}\nset fmri(evg{0}.1) 1\n"
f_thing = '# 4D AVW data or FEAT directory ({n})\nset feat_files({n}) "{f}"\n'
# Once
n_fls = "# Number of first-level analyses\nset fmri(multiple) {0}\n"
out = '# Output directory\nset fmri(outputdir) "{0}"\n'
n_vols = "# Total volumes\nset fmri(npts) {0}\n"
# Get files
in_dir = "/home/data/hcp/"
subdir = "MNINonLinear/Results/tfMRI_WM/tfMRI_WM_hp200_s4_level2vol.feat"
subjects = glob(op.join(in_dir, "*"))
subjects = [op.basename(s) for s in subjects]
subjects = sorted([s for s in subjects if s.isdigit()])
feat_dirs = []
for s in subjects:
feat_dir = op.join(in_dir, s, subdir)
if op.isdir(feat_dir):
feat_dirs.append(feat_dir)
n = len(feat_dirs)
# 0back - fixation
cope_files = [op.join(fd, "cope10.feat/stats/cope1.nii.gz") for fd in feat_dirs]
with open(design_file, "r") as fo:
data = fo.read()
out_dir = "/scratch/tsalo006/visual/"
if not op.isdir(out_dir):
mkdir(out_dir)
data += n_fls.format(n)
data += "\n"
data += out.format(out_dir)
data += "\n"
data += n_vols.format(n)
data += "\n"
for i, f in enumerate(cope_files):
data += gp_mem.format(i + 1)
data += "\n"
data += hi_thing.format(i + 1)
data += "\n"
data += f_thing.format(f=f, n=i + 1)
data += "\n"
with open(op.join(out_dir, "visual_power_analysis_design.fsf"), "w") as fo:
fo.write(data)
copyfile(
op.join(out_dir, "visual_power_analysis_design.fsf"),
"visual_power_analysis_design.fsf",
)
def make_fingertapping_files():
design_file = "design.fsf"
# Each file
gp_mem = "# Group membership for input {0}\nset fmri(groupmem.{0}) 1\n"
hi_thing = "# Higher-level EV value for EV 1 and input {0}\nset fmri(evg{0}.1) 1\n"
f_thing = '# 4D AVW data or FEAT directory ({n})\nset feat_files({n}) "{f}"\n'
# Once
n_fls = "# Number of first-level analyses\nset fmri(multiple) {0}\n"
out = '# Output directory\nset fmri(outputdir) "{0}"\n'
n_vols = "# Total volumes\nset fmri(npts) {0}\n"
# Get files
in_dir = "/home/data/hcp/"
subdir = "MNINonLinear/Results/tfMRI_MOTOR/tfMRI_MOTOR_hp200_s4_level2vol.feat"
subjects = glob(op.join(in_dir, "*"))
subjects = [op.basename(s) for s in subjects]
subjects = sorted([s for s in subjects if s.isdigit()])
# Contrast 10 is LH-AVG
# Contrast 12 is RH-AVG
feat_dirs = []
for s in subjects:
feat_dir = op.join(in_dir, s, subdir)
if op.isdir(feat_dir):
feat_dirs.append(feat_dir)
n = len(feat_dirs)
# Left hand
cope_files = [op.join(fd, "cope10.feat/stats/cope1.nii.gz") for fd in feat_dirs]
with open(design_file, "r") as fo:
data = fo.read()
out_dir = "/scratch/tsalo006/motor-lh/"
if not op.isdir(out_dir):
mkdir(out_dir)
data += n_fls.format(n)
data += "\n"
data += out.format(out_dir)
data += "\n"
data += n_vols.format(n)
data += "\n"
for i, f in enumerate(cope_files):
data += gp_mem.format(i + 1)
data += "\n"
data += hi_thing.format(i + 1)
data += "\n"
data += f_thing.format(f=f, n=i + 1)
data += "\n"
with open(op.join(out_dir, "motor_lh_power_analysis_design.fsf"), "w") as fo:
fo.write(data)
copyfile(
op.join(out_dir, "motor_lh_power_analysis_design.fsf"),
"motor_lh_power_analysis_design.fsf",
)
# Right hand
cope_files = [op.join(fd, "cope12.feat/stats/cope1.nii.gz") for fd in feat_dirs]
with open(design_file, "r") as fo:
data = fo.read()
out_dir = "/scratch/tsalo006/motor-rh/"
if not op.isdir(out_dir):
mkdir(out_dir)
data += n_fls.format(n)
data += "\n"
data += out.format(out_dir)
data += "\n"
data += n_vols.format(n)
data += "\n"
for i, f in enumerate(cope_files):
data += gp_mem.format(i + 1)
data += "\n"
data += hi_thing.format(i + 1)
data += "\n"
data += f_thing.format(f=f, n=i + 1)
data += "\n"
with open(op.join(out_dir, "motor_rh_power_analysis_design.fsf"), "w") as fo:
fo.write(data)
copyfile(
op.join(out_dir, "motor_rh_power_analysis_design.fsf"),
"motor_rh_power_analysis_design.fsf",
)
| [
"os.mkdir",
"os.path.isdir",
"os.path.join",
"os.path.basename"
] | [((765, 785), 'os.path.join', 'op.join', (['in_dir', '"""*"""'], {}), "(in_dir, '*')\n", (772, 785), True, 'import os.path as op\n'), ((803, 817), 'os.path.basename', 'op.basename', (['s'], {}), '(s)\n', (814, 817), True, 'import os.path as op\n'), ((958, 984), 'os.path.join', 'op.join', (['in_dir', 's', 'subdir'], {}), '(in_dir, s, subdir)\n', (965, 984), True, 'import os.path as op\n'), ((996, 1014), 'os.path.isdir', 'op.isdir', (['feat_dir'], {}), '(feat_dir)\n', (1004, 1014), True, 'import os.path as op\n'), ((1120, 1165), 'os.path.join', 'op.join', (['fd', '"""cope10.feat/stats/cope1.nii.gz"""'], {}), "(fd, 'cope10.feat/stats/cope1.nii.gz')\n", (1127, 1165), True, 'import os.path as op\n'), ((1306, 1323), 'os.path.isdir', 'op.isdir', (['out_dir'], {}), '(out_dir)\n', (1314, 1323), True, 'import os.path as op\n'), ((1333, 1347), 'os.mkdir', 'mkdir', (['out_dir'], {}), '(out_dir)\n', (1338, 1347), False, 'from os import mkdir\n'), ((1839, 1891), 'os.path.join', 'op.join', (['out_dir', '"""visual_power_analysis_design.fsf"""'], {}), "(out_dir, 'visual_power_analysis_design.fsf')\n", (1846, 1891), True, 'import os.path as op\n'), ((2622, 2642), 'os.path.join', 'op.join', (['in_dir', '"""*"""'], {}), "(in_dir, '*')\n", (2629, 2642), True, 'import os.path as op\n'), ((2660, 2674), 'os.path.basename', 'op.basename', (['s'], {}), '(s)\n', (2671, 2674), True, 'import os.path as op\n'), ((2872, 2898), 'os.path.join', 'op.join', (['in_dir', 's', 'subdir'], {}), '(in_dir, s, subdir)\n', (2879, 2898), True, 'import os.path as op\n'), ((2910, 2928), 'os.path.isdir', 'op.isdir', (['feat_dir'], {}), '(feat_dir)\n', (2918, 2928), True, 'import os.path as op\n'), ((3027, 3072), 'os.path.join', 'op.join', (['fd', '"""cope10.feat/stats/cope1.nii.gz"""'], {}), "(fd, 'cope10.feat/stats/cope1.nii.gz')\n", (3034, 3072), True, 'import os.path as op\n'), ((3215, 3232), 'os.path.isdir', 'op.isdir', (['out_dir'], {}), '(out_dir)\n', (3223, 3232), True, 'import os.path as op\n'), ((3242, 3256), 'os.mkdir', 'mkdir', (['out_dir'], {}), '(out_dir)\n', (3247, 3256), False, 'from os import mkdir\n'), ((3750, 3804), 'os.path.join', 'op.join', (['out_dir', '"""motor_lh_power_analysis_design.fsf"""'], {}), "(out_dir, 'motor_lh_power_analysis_design.fsf')\n", (3757, 3804), True, 'import os.path as op\n'), ((3894, 3939), 'os.path.join', 'op.join', (['fd', '"""cope12.feat/stats/cope1.nii.gz"""'], {}), "(fd, 'cope12.feat/stats/cope1.nii.gz')\n", (3901, 3939), True, 'import os.path as op\n'), ((4082, 4099), 'os.path.isdir', 'op.isdir', (['out_dir'], {}), '(out_dir)\n', (4090, 4099), True, 'import os.path as op\n'), ((4109, 4123), 'os.mkdir', 'mkdir', (['out_dir'], {}), '(out_dir)\n', (4114, 4123), False, 'from os import mkdir\n'), ((4617, 4671), 'os.path.join', 'op.join', (['out_dir', '"""motor_rh_power_analysis_design.fsf"""'], {}), "(out_dir, 'motor_rh_power_analysis_design.fsf')\n", (4624, 4671), True, 'import os.path as op\n'), ((1727, 1779), 'os.path.join', 'op.join', (['out_dir', '"""visual_power_analysis_design.fsf"""'], {}), "(out_dir, 'visual_power_analysis_design.fsf')\n", (1734, 1779), True, 'import os.path as op\n'), ((3636, 3690), 'os.path.join', 'op.join', (['out_dir', '"""motor_lh_power_analysis_design.fsf"""'], {}), "(out_dir, 'motor_lh_power_analysis_design.fsf')\n", (3643, 3690), True, 'import os.path as op\n'), ((4503, 4557), 'os.path.join', 'op.join', (['out_dir', '"""motor_rh_power_analysis_design.fsf"""'], {}), "(out_dir, 'motor_rh_power_analysis_design.fsf')\n", (4510, 4557), True, 'import os.path as op\n')] |
####
#
# The MIT License (MIT)
#
# Copyright 2021 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is furnished
# to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
####
import os
import argparse
import pandas as pd
import numpy as np
import sqlite3
import logging
import gzip
from tqdm import tqdm
# ================
# Setup the Logger
LOGGER = logging.getLogger("Get CFM-ID Candidates")
LOGGER.setLevel(logging.INFO)
LOGGER.propagate = False
CH = logging.StreamHandler()
CH.setLevel(logging.INFO)
FORMATTER = logging.Formatter('[%(levelname)s] %(name)s : %(message)s')
CH.setFormatter(FORMATTER)
LOGGER.addHandler(CH)
# ================
IONIZATION_MODES = ["neg", "pos"]
def fopener(fn: str):
# Output writer
if args.gzip:
return gzip.open(fn, "wt")
else:
return open(fn, "w")
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("base_output_dir")
arg_parser.add_argument("--massbank_db_fn", help="Filepath of the Massbank database.", default="./massbank.sqlite")
arg_parser.add_argument("--gzip", action="store_true")
arg_parser.add_argument("--store_candidates_separately", action="store_true")
args = arg_parser.parse_args()
# Read in training molecules (inchikeys) and their left-out cv-folds
df_train = {}
for imode in IONIZATION_MODES:
df_train[imode] = pd.read_csv(os.path.join(args.base_output_dir, imode, "mol_list_cv.tsv"), sep="\t")
df_train[imode]["INCHIKEY1"] = [ikey.split("-")[0] for ikey in df_train[imode]["INCHIKEY"]]
# There is a candidate set for each CFM-ID model
candidates = {imode: [set() for _ in range(10)] for imode in IONIZATION_MODES}
# Track which model was used for which spectrum
df_spec2model = {imode: [] for imode in IONIZATION_MODES}
# Connect to db
conn = sqlite3.connect(args.massbank_db_fn)
try:
# Get all spectrum ids and the corresponding InChIKey(1)s
rows = conn.execute(
"SELECT accession, cid, inchikey1, precursor_type FROM scored_spectra_meta"
" INNER JOIN molecules m on m.cid = scored_spectra_meta.molecule"
).fetchall()
for idx, (acc, cid, ikey1, ptype) in tqdm(enumerate(rows), desc="Process spectra", total=len(rows)):
# Determine ionization time
if ptype.endswith("+"):
imode = "pos"
elif ptype.endswith("-"):
imode = "neg"
else:
raise ValueError("Cannot determine ionization mode from precursor type: '%s'." % ptype)
# Check for the spectrum, whether it is used for the CFM-ID training and if yes in which fold
try:
idx = df_train[imode]["INCHIKEY1"].tolist().index(ikey1)
cv_fold = df_train[imode].iloc[idx]["CV"]
except ValueError:
cv_fold = np.random.RandomState(idx).randint(0, 10) # Use a random fold as fallback
# Get the candidates for the current spectrum
for cid_can, smi_cnd in conn.execute(
"SELECT cid, smiles_iso FROM candidates_spectra "
" INNER JOIN molecules m ON m.cid = candidates_spectra.candidate"
" WHERE spectrum IS ?", (acc, )
):
# Add the molecule and its isomeric SMILES representation to prediction list for the current model
candidates[imode][cv_fold] |= {(cid_can, smi_cnd)}
# Track spectra information and their corresponding models
df_spec2model[imode].append((acc, cid, cv_fold, imode, ikey1))
finally:
conn.close()
# Write out which model is used for which spectrum
for imode in IONIZATION_MODES:
pd.DataFrame(df_spec2model[imode], columns=["accession", "cid", "cv_fold", "ionization", "inchikey1"]) \
.to_csv(os.path.join(args.base_output_dir, imode, "spec2model.tsv"), sep="\t", index=False)
# Write out the model specific candidate sets
if args.store_candidates_separately:
for imode in IONIZATION_MODES:
for cv_fold in tqdm(range(10), desc="Write out candidate files (%s)" % imode):
if len(candidates[imode][cv_fold]) > 0:
for cid, smi in candidates[imode][cv_fold]:
ofn = os.path.join(args.base_output_dir, imode, "%d__cv=%d.cand" % (cid, cv_fold))
with open(ofn, "w") as ofile:
ofile.write("%s %s\n" % (cid, smi))
else:
for imode in IONIZATION_MODES:
for cv_fold in tqdm(range(10), desc="Write out candidate files (%s)" % imode):
if len(candidates[imode][cv_fold]) > 0:
ofn = os.path.join(args.base_output_dir, imode, "candidates__cv=%d.csv" % cv_fold)
if args.gzip:
ofn += ".gz"
with fopener(ofn) as ofile:
for cid, smi in candidates[imode][cv_fold]:
ofile.write("%s %s\n" % (cid, smi))
| [
"logging.getLogger",
"logging.StreamHandler",
"sqlite3.connect",
"argparse.ArgumentParser",
"gzip.open",
"logging.Formatter",
"os.path.join",
"pandas.DataFrame",
"numpy.random.RandomState"
] | [((1309, 1351), 'logging.getLogger', 'logging.getLogger', (['"""Get CFM-ID Candidates"""'], {}), "('Get CFM-ID Candidates')\n", (1326, 1351), False, 'import logging\n'), ((1413, 1436), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (1434, 1436), False, 'import logging\n'), ((1476, 1535), 'logging.Formatter', 'logging.Formatter', (['"""[%(levelname)s] %(name)s : %(message)s"""'], {}), "('[%(levelname)s] %(name)s : %(message)s')\n", (1493, 1535), False, 'import logging\n'), ((1822, 1847), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1845, 1847), False, 'import argparse\n'), ((2812, 2848), 'sqlite3.connect', 'sqlite3.connect', (['args.massbank_db_fn'], {}), '(args.massbank_db_fn)\n', (2827, 2848), False, 'import sqlite3\n'), ((1717, 1736), 'gzip.open', 'gzip.open', (['fn', '"""wt"""'], {}), "(fn, 'wt')\n", (1726, 1736), False, 'import gzip\n'), ((2356, 2416), 'os.path.join', 'os.path.join', (['args.base_output_dir', 'imode', '"""mol_list_cv.tsv"""'], {}), "(args.base_output_dir, imode, 'mol_list_cv.tsv')\n", (2368, 2416), False, 'import os\n'), ((4846, 4905), 'os.path.join', 'os.path.join', (['args.base_output_dir', 'imode', '"""spec2model.tsv"""'], {}), "(args.base_output_dir, imode, 'spec2model.tsv')\n", (4858, 4905), False, 'import os\n'), ((4721, 4827), 'pandas.DataFrame', 'pd.DataFrame', (['df_spec2model[imode]'], {'columns': "['accession', 'cid', 'cv_fold', 'ionization', 'inchikey1']"}), "(df_spec2model[imode], columns=['accession', 'cid', 'cv_fold',\n 'ionization', 'inchikey1'])\n", (4733, 4827), True, 'import pandas as pd\n'), ((5719, 5795), 'os.path.join', 'os.path.join', (['args.base_output_dir', 'imode', "('candidates__cv=%d.csv' % cv_fold)"], {}), "(args.base_output_dir, imode, 'candidates__cv=%d.csv' % cv_fold)\n", (5731, 5795), False, 'import os\n'), ((5302, 5378), 'os.path.join', 'os.path.join', (['args.base_output_dir', 'imode', "('%d__cv=%d.cand' % (cid, cv_fold))"], {}), "(args.base_output_dir, imode, '%d__cv=%d.cand' % (cid, cv_fold))\n", (5314, 5378), False, 'import os\n'), ((3860, 3886), 'numpy.random.RandomState', 'np.random.RandomState', (['idx'], {}), '(idx)\n', (3881, 3886), True, 'import numpy as np\n')] |
from collections import Counter, namedtuple
def print_freqs(freqs):
"""
Prints an easy to read format of the frequencies extracted from the corpus
to the file frequencies.txt
:return: None
"""
with open('frequencies.txt', 'w') as f:
for t, c in freqs.items():
print(str(t), file=f)
for k, v in c.most_common():
print('\t' + str(k) + ':\t' + str(v), file=f)
### Declare useful named tuples
# (dep_POS, arc_label, head_POS)
Arc = namedtuple('Arc', 'dep_POS, head_POS')
# 1 From from ADP IN _ 3 case 3:case _
# (index, word, lemma, pos, og_pos, ignore, head_index, arc_label, enhanced_label, ignore_2)
Conll = namedtuple('Conll', 'index, word, lemma, pos, og_pos, ignore, head_index, arc_label, enhanced_label, ignore_2')
def extract_freqs(file):
'''
Returns a dictionary of counters of dependency arc frequencies found in file
First level of dict is arc head pairs, inner dict is frequencies of each label
:return: Dict of Counters
'''
# open file given as function parameter
with open(file ,'r', encoding='utf8') as f:
sentences = f.read().split('\n\n')
# extract Conll named tuple for each sentence in the read file
sentences = [[Conll(*x.split('\t')) for x in s.split('\n') if x and not x.startswith('#')]
for s in sentences]
# print(sentences[2])
# extract all the arcs from the sentences and add them to lists
arcs = {} # Key will be Arc Head pairs and value will be list of labels found
for sentence in sentences:
for c in sentence:
# skip parataxis words
if '.' in c.index:
continue
try:
# get the arc head pair and add the label to the arc head pair list
arc = Arc(c.pos, sentence[int(c.head_index)-1].pos)
# if the arc head pair is not in the dict yet
if not arc in arcs:
arcs[arc] = []
arcs[arc].append(c.arc_label)
except ValueError as e:
print(sentence)
print(e)
print(c)
# Pass each value in arcs to a Counter to generate label frequencies for each arc
# head pair
freqs = {k: Counter(v) for k, v in arcs.items()}
return freqs
if __name__== "__main__":
freqs = extract_freqs('../ud-treebanks-v2.4/UD_English-EWT/en_ewt-ud-train.conllu')
print_freqs(freqs) | [
"collections.Counter",
"collections.namedtuple"
] | [((506, 544), 'collections.namedtuple', 'namedtuple', (['"""Arc"""', '"""dep_POS, head_POS"""'], {}), "('Arc', 'dep_POS, head_POS')\n", (516, 544), False, 'from collections import Counter, namedtuple\n'), ((686, 806), 'collections.namedtuple', 'namedtuple', (['"""Conll"""', '"""index, word, lemma, pos, og_pos, ignore, head_index, arc_label, enhanced_label, ignore_2"""'], {}), "('Conll',\n 'index, word, lemma, pos, og_pos, ignore, head_index, arc_label, enhanced_label, ignore_2'\n )\n", (696, 806), False, 'from collections import Counter, namedtuple\n'), ((2281, 2291), 'collections.Counter', 'Counter', (['v'], {}), '(v)\n', (2288, 2291), False, 'from collections import Counter, namedtuple\n')] |
# -*- coding: utf-8 -*-
'''Module that defines classes and functions for Brillouin zone sampling
'''
import os
import re
from copy import deepcopy
import numpy as np
from mykit.core._control import (build_tag_map_obj, extract_from_tagdict,
parse_to_tagdict, prog_mapper, tags_mapping)
from mykit.core.log import Verbose
from mykit.core.numeric import Prec
# from mykit.core.utils import if_vec_same_direction
# Allowed pattern for kpoint symbols and kpath string
KSYM_PATTERN = r'[A-Z]{1,2}'
KPATH_PATTERN = r'^('+ KSYM_PATTERN + r'-)+' + KSYM_PATTERN + r'$'
class KmeshError(Exception):
pass
class kmesh_control(Verbose, prog_mapper):
_meta = os.path.join(os.path.dirname(__file__), 'metadata', 'kmeshmap.json')
_tagMaps = build_tag_map_obj(_meta, "mykit", "json")
_kmeshTagMaps = _tagMaps
_kmeshValMaps = {}
def __init__(self, progName, **kmargs):
self._kmeshTags = {}
self._parse_kmeshtags(progName, **kmargs)
def parse_tags(self, progName, **kmtags):
'''
'''
self._parse_kmeshtags(progName, **kmtags)
def _parse_kmeshtags(self, progName, **kmtags):
if len(kmtags) == 0:
return
parse_to_tagdict(self._kmeshTags, self._kmeshTagMaps, progName, **kmtags)
def delete_tags(self, progName, *tags):
self._pop_kmeshtags(progName, *tags)
def pop_tags(self, progName, *tags):
return self._pop_kmeshtags(progName, *tags)
def _pop_kmeshtags(self, progName, *tags):
vals = self._kmeshtag_vals(progName, *tags, delete=True)
return vals
def _get_one_mykit_tag(self, kmTagName):
return self._get_one_kmeshtag(kmTagName)
def _get_one_kmeshtag(self, kmTagName):
return self._kmeshTags.get(kmTagName, None)
def tag_vals(self, progName, *tags):
return self._kmeshtag_vals(progName, *tags)
def _kmeshtag_vals(self, progName, *tags, delete=False):
if len(tags) == 0:
return []
vals = extract_from_tagdict(kmesh_control, self._kmeshTags, progName, *tags, delete=delete)
return vals
@property
def kmeshTags(self):
return self._kmeshTags
@property
def kmode(self):
return self._kmeshTags.get("kmode")
@property
def kdiv(self):
return self._kmeshTags.get("div")
@classmethod
def map_tags(cls, *tags, progFrom="mykit", progTo="mykit", getAll=False):
'''
'''
_pF = progFrom.lower()
_pT = progTo.lower()
return tags_mapping(cls._kmeshTagMaps, _pF, _pF, *tags, getAll=getAll)
def kpath_decoder(kpath):
'''Decode string consisting kpoints symbol to a list of strings.
Those with even and odd indices (from 0) are the starting and
ending point in reciprocal space, repectively
The path can have more than one continuous path in reciprocal space,
separated by space.
However, each continuous path should match the ``KPATH_PATTERN``,
otherwise `KmeshError` will be raised.
Args:
kpath (str): the string containing kpoints symbol and
representing a trajectory in reciprocal space
Examples:
>>> kpath_decoder("A-B-C D-E")
["A", "B", "B", "C", "D", "E"]
>>> kpath_decoder("GM-W-X-L-GM-X")
["GM", "W", "W", "X", "X", "L", "L", "GM", "GM", "X"]
'''
try:
_klines = kpath.split()
except (AttributeError, SyntaxError):
raise KmeshError("Input kpath should be string: {}".format(kpath))
# the pattern of each path segment
linePat = re.compile(KPATH_PATTERN)
ksegs = []
for kline in _klines:
if not re.match(linePat, kline):
raise KmeshError("Invalid kpath line string: {}".format(kline))
symbols = kline.split('-')
nSyms = len(symbols)
for i in range(nSyms-1):
# ksegs.append('{}-{}'.format(symbols[i], symbols[i+1]))
if symbols[i] == symbols[i+1]:
raise KmeshError("kpath with zero length: {}-{}".format(symbols[i], symbols[i+1]))
ksegs.extend([symbols[i], symbols[i+1]])
return ksegs
def kpath_encoder(ksyms):
'''Encode a list/tuple of strings to a complete kpath string.
Args:
ksyms (list or tuple): container of kpath symbols, must have an even length
'''
try:
assert isinstance(ksyms, (list, tuple))
except AssertionError:
raise KmeshError("require list or tuple, received {}".format(type(ksyms)))
try:
assert len(ksyms)%2 == 0
except AssertionError:
raise KmeshError("require even length, received {}".format(len(ksyms)))
nLineSeg = int(len(ksyms)/2)
kpath = ''
symPat = re.compile(r'^' + KSYM_PATTERN + r'$')
lastSym = ''
for _i in range(nLineSeg):
st = ksyms[2*_i]
ed = ksyms[2*_i+1]
if st == ed:
raise KmeshError("kpath with zero length: {}-{}".format(st, ed))
seg = (st, ed)
for ksym in seg:
if not re.match(symPat, ksym):
raise KmeshError("Invalid kpoint symbol: {}".format(ksym))
if _i == 0:
kpath += '-'.join(seg)
else:
if st == lastSym:
kpath += '-' + ed
else:
kpath += ' ' + '-'.join(seg)
lastSym = ed
return kpath
def _check_valid_ksym_coord_pair(ksym, coord):
if not re.match(r"^" + KSYM_PATTERN + r"$", ksym):
raise KeyError("Invalid kpoint symbol: {}".format(ksym))
try:
shape = np.shape(coord)
except ValueError:
raise ValueError("Invalid kpoint coordinate for symbol {}".format(ksym))
else:
if shape != (3,):
raise ValueError("Invalid kpoint coordinate for symbol {}".format(ksym))
def _check_valid_kpath_dict(kpathDict):
try:
assert isinstance(kpathDict, dict)
except AssertionError:
raise TypeError("kpath must be dictionary.")
try:
assert set(["symbols", "coordinates"]) == set(kpathDict.keys())
except AssertionError:
raise KeyError("\"symbols\", \"coordinates\" keys not found. Please check")
for (ksym, coord) in zip(kpathDict["symbols"],kpathDict["coordinates"]):
try:
_check_valid_ksym_coord_pair(ksym, coord)
except (KeyError, ValueError) as _err:
raise _err
def check_kvecs_form_kpath(kvec):
'''Check if the kpoint vectors form several line segments in the reciprocal space
Usually, the number of kpoints on one line segments is no less than 3.
Args:
kvec (array-like): the kpoint vectors to analysis, shape, (n,3)
Returns:
list, with tuple as members. Each tuple has 2 int members,
the indices of kpoint vectors at the beginning and end of
a line segment
'''
segs = []
# check the shape of kvec
try:
shape = np.shape(kvec)
assert len(shape) == 2
assert shape[1] == 3
except (TypeError, AssertionError):
return segs
nkpt = shape[0]
if nkpt < 3:
return segs
_kvec = np.array(kvec, dtype=Prec._dtype)
dkvec = _kvec[1:, :] - _kvec[:-1, :]
# normalize the dkvec,
n = np.linalg.norm(dkvec, axis=1)
for i in range(nkpt-1):
if np.isclose(n[i], 0):
dkvec[i,:] = 1000.0
else:
dkvec[i,:] = dkvec[i,:]/n[i]
dp = np.sum(dkvec[:-1,:] * dkvec[1:,:], axis=1)
st = 0
ed = 2
while ed < nkpt:
if not np.isclose(dp[ed-2], 1):
if ed - st > 2:
segs.append((st, ed-1))
st = ed - 1
ed = ed + 1
if ed - st > 2:
segs.append((st, ed-1))
return segs
# the mapping from kpoint symbol to LaTeX commands
#pylint: disable=anomalous-backslash-in-string
KSYMBOL_LATEX = {
"GM": "$\Gamma$",
"LM": "$\lambda$",
}
| [
"numpy.isclose",
"re.compile",
"mykit.core._control.build_tag_map_obj",
"mykit.core._control.extract_from_tagdict",
"re.match",
"mykit.core._control.tags_mapping",
"numpy.array",
"numpy.sum",
"os.path.dirname",
"numpy.linalg.norm",
"mykit.core._control.parse_to_tagdict",
"numpy.shape"
] | [((777, 818), 'mykit.core._control.build_tag_map_obj', 'build_tag_map_obj', (['_meta', '"""mykit"""', '"""json"""'], {}), "(_meta, 'mykit', 'json')\n", (794, 818), False, 'from mykit.core._control import build_tag_map_obj, extract_from_tagdict, parse_to_tagdict, prog_mapper, tags_mapping\n'), ((3609, 3634), 're.compile', 're.compile', (['KPATH_PATTERN'], {}), '(KPATH_PATTERN)\n', (3619, 3634), False, 'import re\n'), ((4747, 4783), 're.compile', 're.compile', (["('^' + KSYM_PATTERN + '$')"], {}), "('^' + KSYM_PATTERN + '$')\n", (4757, 4783), False, 'import re\n'), ((7133, 7166), 'numpy.array', 'np.array', (['kvec'], {'dtype': 'Prec._dtype'}), '(kvec, dtype=Prec._dtype)\n', (7141, 7166), True, 'import numpy as np\n'), ((7244, 7273), 'numpy.linalg.norm', 'np.linalg.norm', (['dkvec'], {'axis': '(1)'}), '(dkvec, axis=1)\n', (7258, 7273), True, 'import numpy as np\n'), ((7430, 7474), 'numpy.sum', 'np.sum', (['(dkvec[:-1, :] * dkvec[1:, :])'], {'axis': '(1)'}), '(dkvec[:-1, :] * dkvec[1:, :], axis=1)\n', (7436, 7474), True, 'import numpy as np\n'), ((706, 731), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (721, 731), False, 'import os\n'), ((1233, 1306), 'mykit.core._control.parse_to_tagdict', 'parse_to_tagdict', (['self._kmeshTags', 'self._kmeshTagMaps', 'progName'], {}), '(self._kmeshTags, self._kmeshTagMaps, progName, **kmtags)\n', (1249, 1306), False, 'from mykit.core._control import build_tag_map_obj, extract_from_tagdict, parse_to_tagdict, prog_mapper, tags_mapping\n'), ((2056, 2144), 'mykit.core._control.extract_from_tagdict', 'extract_from_tagdict', (['kmesh_control', 'self._kmeshTags', 'progName', '*tags'], {'delete': 'delete'}), '(kmesh_control, self._kmeshTags, progName, *tags,\n delete=delete)\n', (2076, 2144), False, 'from mykit.core._control import build_tag_map_obj, extract_from_tagdict, parse_to_tagdict, prog_mapper, tags_mapping\n'), ((2588, 2651), 'mykit.core._control.tags_mapping', 'tags_mapping', (['cls._kmeshTagMaps', '_pF', '_pF', '*tags'], {'getAll': 'getAll'}), '(cls._kmeshTagMaps, _pF, _pF, *tags, getAll=getAll)\n', (2600, 2651), False, 'from mykit.core._control import build_tag_map_obj, extract_from_tagdict, parse_to_tagdict, prog_mapper, tags_mapping\n'), ((5445, 5485), 're.match', 're.match', (["('^' + KSYM_PATTERN + '$')", 'ksym'], {}), "('^' + KSYM_PATTERN + '$', ksym)\n", (5453, 5485), False, 'import re\n'), ((5579, 5594), 'numpy.shape', 'np.shape', (['coord'], {}), '(coord)\n', (5587, 5594), True, 'import numpy as np\n'), ((6928, 6942), 'numpy.shape', 'np.shape', (['kvec'], {}), '(kvec)\n', (6936, 6942), True, 'import numpy as np\n'), ((7313, 7332), 'numpy.isclose', 'np.isclose', (['n[i]', '(0)'], {}), '(n[i], 0)\n', (7323, 7332), True, 'import numpy as np\n'), ((3692, 3716), 're.match', 're.match', (['linePat', 'kline'], {}), '(linePat, kline)\n', (3700, 3716), False, 'import re\n'), ((7531, 7556), 'numpy.isclose', 'np.isclose', (['dp[ed - 2]', '(1)'], {}), '(dp[ed - 2], 1)\n', (7541, 7556), True, 'import numpy as np\n'), ((5052, 5074), 're.match', 're.match', (['symPat', 'ksym'], {}), '(symPat, ksym)\n', (5060, 5074), False, 'import re\n')] |
from pathlib import Path
from tqdm.auto import tqdm
import numpy as np
import pickle
import os
from astropy.table import Table
import pickle as pkl
from multiprocessing import Pool, Manager
from threading import Lock
from .cones import make_cone_density
from .utils import load_data
from .cones import make_cone
from .constants import x_isgri, x_picsit
class BaseDataset:
"""
Base Dataset classs
"""
def __init__(self):
self.basedir = Path(__file__).parent.joinpath('data')
def generate(self, src_dir):
"""
generate dataset from source directory of *.npy filess
:param src_dir: path
"""
self.data = None
raise NotImplementedError("This is only the base class, supercharge this method please")
return self.data
def save(self, filename='basefile.pickle'):
self.filepath = self.basedir.joinpath(filename)
with open(self.filepath, 'wb') as file:
pickle.dump(self.data, file)
def load(self, filename=None):
if filename is not None:
self.filepath = self.basedir.joinpath(filename)
with open(self.filepath, 'rb') as file:
self.data = pickle.load(file)
"""Generation of the Cone Density Dataset with a single source
"""
class SingleSourceDensityDataset:
target_filename = "single_source_density_dataset.pkl"
source_directory = "save_Compton"
max_threads = 1
n = 100
def __init__(self, filename=None):
if filename is not None:
self.filename = filename
pass
def generate(self):
"""Create the datafile
"""
# get cone density data for all files in dataset
manager = Manager()
data = manager.list()
labels = manager.list()
lock = Lock()
def get_data(filename):
for i in range(self.n):
print("Loading from {} {}".format(filename, i))
if filename.endswith(".npy"):
_, theta_source, _, phi_source = filename.replace(".npy", "").split("_")
lock.acquire()
labels.append([float(theta_source), float(phi_source)])
data.append(make_cone_density(theta_source, phi_source, x_isgri, x_picsit, progress=False,
n_events=[100, 2000]))
lock.release()
if len(data) % 100 == 0:
print("Aquiring lock")
lock.acquire()
# load data already available
x, y = pkl.load(open(self.target_filename))
new_x, new_y = np.array(list(data)), np.array(list(labels))
x = np.concatenate((x, new_x), axis=0)
y = np.concatenate((y, new_y), axis=0)
pkl.dump((x, y), open(self.target_filename, "wb"))
# clear the data and label lists
data.clear()
labels.clear()
lock.release()
print("Realeased lock")
with Pool(self.max_threads, maxtasksperchild=10) as p:
for t in p.imap(get_data, os.listdir("save_Compton"), chunksize=365):
pass
@staticmethod
def load(filename=None):
"""Load the dataset from the pickle file
"""
if filename is not None:
return pkl.load(open(SingleSourceDensityDataset.target_filename))
return pkl.load(open(SingleSourceDensityDataset.target_filename))
| [
"os.listdir",
"pickle.dump",
"pathlib.Path",
"threading.Lock",
"pickle.load",
"multiprocessing.Pool",
"numpy.concatenate",
"multiprocessing.Manager"
] | [((1710, 1719), 'multiprocessing.Manager', 'Manager', ([], {}), '()\n', (1717, 1719), False, 'from multiprocessing import Pool, Manager\n'), ((1797, 1803), 'threading.Lock', 'Lock', ([], {}), '()\n', (1801, 1803), False, 'from threading import Lock\n'), ((964, 992), 'pickle.dump', 'pickle.dump', (['self.data', 'file'], {}), '(self.data, file)\n', (975, 992), False, 'import pickle\n'), ((1194, 1211), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (1205, 1211), False, 'import pickle\n'), ((3122, 3165), 'multiprocessing.Pool', 'Pool', (['self.max_threads'], {'maxtasksperchild': '(10)'}), '(self.max_threads, maxtasksperchild=10)\n', (3126, 3165), False, 'from multiprocessing import Pool, Manager\n'), ((3210, 3236), 'os.listdir', 'os.listdir', (['"""save_Compton"""'], {}), "('save_Compton')\n", (3220, 3236), False, 'import os\n'), ((462, 476), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (466, 476), False, 'from pathlib import Path\n'), ((2743, 2777), 'numpy.concatenate', 'np.concatenate', (['(x, new_x)'], {'axis': '(0)'}), '((x, new_x), axis=0)\n', (2757, 2777), True, 'import numpy as np\n'), ((2802, 2836), 'numpy.concatenate', 'np.concatenate', (['(y, new_y)'], {'axis': '(0)'}), '((y, new_y), axis=0)\n', (2816, 2836), True, 'import numpy as np\n')] |
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
import numpy as np
import pandas as pd
import pickle as pi
class Classifier:
def __init__(self):
#Array für alle Ergebnisse
self.ergebnis = []
def train_models(self, X_train, X_test, y_train, y_test, models):
for self.model in models:
#-----------------------
#Knn-Classifier
#-----------------------
if self.model == 'knn':
#Optimalen Knn-Classifier bestimmen
error = []
for i in range(1, 40):
knn = KNeighborsClassifier(n_neighbors=i)
knn.fit(X_train, y_train)
pred_i = knn.predict(X_test)
error.append(np.mean(pred_i != y_test))
#Knn-Classifier trainieren
knnclf = KNeighborsClassifier(n_neighbors=7)
knnclf.fit(X_train, y_train)
#Knn-Classifier Akkuranz bestimmen
score = knnclf.score(X_test,y_test)
self.ergebnis.append(['knn-classifier', score, knnclf])
#-----------------------
#-----------------------
#Decision Tree
#-----------------------
elif self.model == 'dt':
#class_weight gebrauchen für DT und RF
#Optimalen Decision Tree bestimmen
#Zu testende Decision Tree Parameter
dt = DecisionTreeClassifier()
tree_para = {'criterion':['gini','entropy'],'max_depth':[i for i in range(1,20)], 'min_samples_split':[i for i in range (2,20)]}
#GridSearchCV
grd_clf = GridSearchCV(dt, tree_para, cv=5)
grd_clf.fit(X_train, y_train)
#Besten gefundenen Decision Tree übergeben
dt_clf = grd_clf.best_estimator_
score = dt_clf.score(X_test,y_test)
self.ergebnis.append(['decision tree', score, dt_clf])
#-----------------------
#-----------------------
#Random Forest
#-----------------------
elif self.model == 'rf':
#rf = RandomForestClassifier(max_depth=8, criterion="entropy", min_samples_split=9)
rf = RandomForestClassifier(n_estimators=100)
rf.fit(X_train,y_train)
score = rf.score(X_test,y_test)
self.ergebnis.append(['random forest', score, rf])
#-----------------------
#-----------------------
#Support Vector Machine
#-----------------------
elif self.model == 'svm':
svm = SVC(kernel = 'poly')
svm.fit(X_train, y_train)
score = svm.score(X_test,y_test)
self.ergebnis.append(['support vector machine', score, svm])
#-----------------------
#MLP
#-----------------------
elif self.model == 'mlp':
mlp = MLPClassifier(hidden_layer_sizes=[100,100], max_iter=5000, solver='sgd'
, learning_rate='adaptive', learning_rate_init=0.01, n_iter_no_change=200, early_stopping=True)
mlp.fit(X_train, y_train)
score = mlp.score(X_test,y_test)
self.ergebnis.append(['multi-layer perceptron', score, mlp])
print("iterations: {}; layers: {}; loss: {}".format(mlp.n_iter_, mlp.n_layers_, mlp.loss_))
epochs = np.linspace(1,mlp.n_iter_, mlp.n_iter_)
#plt.plot(epochs, mlp.loss_curve_, label="Fehlerfunktion")
#plt.plot(weight,2* weight,label="Ableitung")
#plt.show()
return self.ergebnis | [
"sklearn.model_selection.GridSearchCV",
"numpy.mean",
"sklearn.neural_network.MLPClassifier",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.ensemble.RandomForestClassifier",
"numpy.linspace",
"sklearn.svm.SVC"
] | [((1092, 1127), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(7)'}), '(n_neighbors=7)\n', (1112, 1127), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((832, 867), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': 'i'}), '(n_neighbors=i)\n', (852, 867), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((1722, 1746), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (1744, 1746), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((1950, 1983), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['dt', 'tree_para'], {'cv': '(5)'}), '(dt, tree_para, cv=5)\n', (1962, 1983), False, 'from sklearn.model_selection import GridSearchCV\n'), ((996, 1021), 'numpy.mean', 'np.mean', (['(pred_i != y_test)'], {}), '(pred_i != y_test)\n', (1003, 1021), True, 'import numpy as np\n'), ((2560, 2600), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(100)'}), '(n_estimators=100)\n', (2582, 2600), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((2964, 2982), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""poly"""'}), "(kernel='poly')\n", (2967, 2982), False, 'from sklearn.svm import SVC\n'), ((3305, 3480), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'hidden_layer_sizes': '[100, 100]', 'max_iter': '(5000)', 'solver': '"""sgd"""', 'learning_rate': '"""adaptive"""', 'learning_rate_init': '(0.01)', 'n_iter_no_change': '(200)', 'early_stopping': '(True)'}), "(hidden_layer_sizes=[100, 100], max_iter=5000, solver='sgd',\n learning_rate='adaptive', learning_rate_init=0.01, n_iter_no_change=200,\n early_stopping=True)\n", (3318, 3480), False, 'from sklearn.neural_network import MLPClassifier\n'), ((3790, 3830), 'numpy.linspace', 'np.linspace', (['(1)', 'mlp.n_iter_', 'mlp.n_iter_'], {}), '(1, mlp.n_iter_, mlp.n_iter_)\n', (3801, 3830), True, 'import numpy as np\n')] |
def brute_force_root_finder(f, a, b, n):
from numpy import linspace
x = linspace(a, b, n)
y = f(x)
roots = []
for i in range(n-1):
if y[i]*y[i+1] < 0:
root = x[i] - (x[i+1] - x[i])/(y[i+1] - y[i])*y[i]
roots.append(root)
elif y[i] == 0:
root = x[i]
roots.append(root)
return roots
def demo():
from numpy import exp, cos
roots = brute_force_root_finder(
lambda x: exp(-x**2)*cos(4*x), 0, 4, 1001)
if roots:
print(roots)
else:
print('Could not find any roots')
if __name__ == '__main__':
demo()
| [
"numpy.exp",
"numpy.linspace",
"numpy.cos"
] | [((82, 99), 'numpy.linspace', 'linspace', (['a', 'b', 'n'], {}), '(a, b, n)\n', (90, 99), False, 'from numpy import linspace\n'), ((491, 503), 'numpy.exp', 'exp', (['(-x ** 2)'], {}), '(-x ** 2)\n', (494, 503), False, 'from numpy import exp, cos\n'), ((502, 512), 'numpy.cos', 'cos', (['(4 * x)'], {}), '(4 * x)\n', (505, 512), False, 'from numpy import exp, cos\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# Генерация списка
items = ['KMS1.kmch.pos.out_dE_%s.mx' % i for i in range(20)]
# Перемешивание элементов списка
import random
random.shuffle(items)
print(items)
# Обычная сортировка не работает
print(sorted(items))
print()
def get_number_1(x):
return int(x.split('.')[-2].split('_')[-1])
def get_number_2(x):
import re
match = re.search('KMS1.kmch.pos.out_dE_(\d+).mx', x)
return int(match.group(1))
print(sorted(items, key=get_number_1))
print(sorted(items, key=get_number_2))
| [
"random.shuffle",
"re.search"
] | [((203, 224), 'random.shuffle', 'random.shuffle', (['items'], {}), '(items)\n', (217, 224), False, 'import random\n'), ((422, 468), 're.search', 're.search', (['"""KMS1.kmch.pos.out_dE_(\\\\d+).mx"""', 'x'], {}), "('KMS1.kmch.pos.out_dE_(\\\\d+).mx', x)\n", (431, 468), False, 'import re\n')] |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djangocms_charts', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='chartjsbarmodel',
name='chart_position',
field=models.CharField(max_length=100, verbose_name='Chart Position', blank=True),
),
migrations.AddField(
model_name='chartjsdoughnutmodel',
name='chart_position',
field=models.CharField(max_length=100, verbose_name='Chart Position', blank=True),
),
migrations.AddField(
model_name='chartjslinemodel',
name='chart_position',
field=models.CharField(max_length=100, verbose_name='Chart Position', blank=True),
),
migrations.AddField(
model_name='chartjspiemodel',
name='chart_position',
field=models.CharField(max_length=100, verbose_name='Chart Position', blank=True),
),
migrations.AddField(
model_name='chartjspolarmodel',
name='chart_position',
field=models.CharField(max_length=100, verbose_name='Chart Position', blank=True),
),
migrations.AddField(
model_name='chartjsradarmodel',
name='chart_position',
field=models.CharField(max_length=100, verbose_name='Chart Position', blank=True),
),
migrations.AlterField(
model_name='chartjsbarmodel',
name='legend_position',
field=models.CharField(max_length=100, verbose_name='Legend Position', blank=True),
),
migrations.AlterField(
model_name='chartjsdoughnutmodel',
name='legend_position',
field=models.CharField(max_length=100, verbose_name='Legend Position', blank=True),
),
migrations.AlterField(
model_name='chartjslinemodel',
name='legend_position',
field=models.CharField(max_length=100, verbose_name='Legend Position', blank=True),
),
migrations.AlterField(
model_name='chartjspiemodel',
name='legend_position',
field=models.CharField(max_length=100, verbose_name='Legend Position', blank=True),
),
migrations.AlterField(
model_name='chartjspolarmodel',
name='legend_position',
field=models.CharField(max_length=100, verbose_name='Legend Position', blank=True),
),
migrations.AlterField(
model_name='chartjsradarmodel',
name='legend_position',
field=models.CharField(max_length=100, verbose_name='Legend Position', blank=True),
),
]
| [
"django.db.models.CharField"
] | [((303, 378), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'verbose_name': '"""Chart Position"""', 'blank': '(True)'}), "(max_length=100, verbose_name='Chart Position', blank=True)\n", (319, 378), False, 'from django.db import migrations, models\n'), ((520, 595), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'verbose_name': '"""Chart Position"""', 'blank': '(True)'}), "(max_length=100, verbose_name='Chart Position', blank=True)\n", (536, 595), False, 'from django.db import migrations, models\n'), ((733, 808), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'verbose_name': '"""Chart Position"""', 'blank': '(True)'}), "(max_length=100, verbose_name='Chart Position', blank=True)\n", (749, 808), False, 'from django.db import migrations, models\n'), ((945, 1020), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'verbose_name': '"""Chart Position"""', 'blank': '(True)'}), "(max_length=100, verbose_name='Chart Position', blank=True)\n", (961, 1020), False, 'from django.db import migrations, models\n'), ((1159, 1234), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'verbose_name': '"""Chart Position"""', 'blank': '(True)'}), "(max_length=100, verbose_name='Chart Position', blank=True)\n", (1175, 1234), False, 'from django.db import migrations, models\n'), ((1373, 1448), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'verbose_name': '"""Chart Position"""', 'blank': '(True)'}), "(max_length=100, verbose_name='Chart Position', blank=True)\n", (1389, 1448), False, 'from django.db import migrations, models\n'), ((1588, 1664), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'verbose_name': '"""Legend Position"""', 'blank': '(True)'}), "(max_length=100, verbose_name='Legend Position', blank=True)\n", (1604, 1664), False, 'from django.db import migrations, models\n'), ((1809, 1885), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'verbose_name': '"""Legend Position"""', 'blank': '(True)'}), "(max_length=100, verbose_name='Legend Position', blank=True)\n", (1825, 1885), False, 'from django.db import migrations, models\n'), ((2026, 2102), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'verbose_name': '"""Legend Position"""', 'blank': '(True)'}), "(max_length=100, verbose_name='Legend Position', blank=True)\n", (2042, 2102), False, 'from django.db import migrations, models\n'), ((2242, 2318), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'verbose_name': '"""Legend Position"""', 'blank': '(True)'}), "(max_length=100, verbose_name='Legend Position', blank=True)\n", (2258, 2318), False, 'from django.db import migrations, models\n'), ((2460, 2536), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'verbose_name': '"""Legend Position"""', 'blank': '(True)'}), "(max_length=100, verbose_name='Legend Position', blank=True)\n", (2476, 2536), False, 'from django.db import migrations, models\n'), ((2678, 2754), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'verbose_name': '"""Legend Position"""', 'blank': '(True)'}), "(max_length=100, verbose_name='Legend Position', blank=True)\n", (2694, 2754), False, 'from django.db import migrations, models\n')] |
import geo.geo_utils
import geo.raster_lookup
from progress.null_callback import NullCallback
from progress.progress import Progress
import glob
import numpy as np
class Heightmap:
def __init__(self):
self.pixels = []
self.heightmap = None
self.nodata_fillin = 0
self.out_of_bounds_count = 0
self.nodata_count = 0
def createFromRaster(self,
raster_lookup,
geo_transform,
heightmap_size,
progress_callback=NullCallback()):
pixel_count = heightmap_size[0] * heightmap_size[1]
self.pixels = [0 for i in range(pixel_count)]
for y in range(heightmap_size[1]):
for x in range(heightmap_size[0]):
geo_pos = geo_transform.transformPixelLocationToGeoLocation(x, y)
pixel_index = x + y*heightmap_size[0]
if raster_lookup.locationInBounds(geo_pos[0], geo_pos[1]):
elevation = raster_lookup.getElevationAtPosition(geo_pos[0], geo_pos[1])
if elevation is not None:
self.pixels[pixel_index] = elevation
else:
self.pixels[pixel_index] = self.nodata_fillin
self.nodata_count += 1
else:
self.out_of_bounds_count += 1
progress_callback(Progress(progress=pixel_index + 1,
message="Creating heightmap",
max_progress=heightmap_size[0] * heightmap_size[1],))
raster_matrix = np.array(self.pixels).reshape(heightmap_size)
self.heightmap = raster_matrix
return self
def pixelCount(self):
return self.heightmap.shape[0] * self.heightmap.shape[1]
def getStatistics(self):
return {
'out_of_bounds_percentage':
100.0 * float(self.out_of_bounds_count) / self.pixelCount(),
'nodata_percentage':
100.0 * float(self.nodata_count) / self.pixelCount()
}
def loadFromFile(self, file_name):
self.heightmap = np.load(file_name)
self.pixels = list(self.heightmap.reshape(self.heightmap.shape[0] * self.heightmap.shape[1]))
return self
def writeToFile(self, file_name):
if self.heightmap is None or len(self.pixels) <= 0:
raise Exception("Heigtmap is not loaded")
np.save(file_name, self.heightmap)
def getWidth(self):
return self.heightmap.shape[0]
def getHeight(self):
return self.heightmap.shape[1]
def getHeightmap(self):
return self.heightmap
| [
"progress.null_callback.NullCallback",
"numpy.array",
"numpy.load",
"progress.progress.Progress",
"numpy.save"
] | [((557, 571), 'progress.null_callback.NullCallback', 'NullCallback', ([], {}), '()\n', (569, 571), False, 'from progress.null_callback import NullCallback\n'), ((2163, 2181), 'numpy.load', 'np.load', (['file_name'], {}), '(file_name)\n', (2170, 2181), True, 'import numpy as np\n'), ((2466, 2500), 'numpy.save', 'np.save', (['file_name', 'self.heightmap'], {}), '(file_name, self.heightmap)\n', (2473, 2500), True, 'import numpy as np\n'), ((1624, 1645), 'numpy.array', 'np.array', (['self.pixels'], {}), '(self.pixels)\n', (1632, 1645), True, 'import numpy as np\n'), ((1440, 1560), 'progress.progress.Progress', 'Progress', ([], {'progress': '(pixel_index + 1)', 'message': '"""Creating heightmap"""', 'max_progress': '(heightmap_size[0] * heightmap_size[1])'}), "(progress=pixel_index + 1, message='Creating heightmap',\n max_progress=heightmap_size[0] * heightmap_size[1])\n", (1448, 1560), False, 'from progress.progress import Progress\n')] |
#!/usr/bin/env python
# coding: utf_8
import os
import csv, sqlite3
import unicodedata
import pdb
# 0 全国地方公共団体コード
# 1 旧郵便番号
# 2 郵便番号
# 3 都道府県名
# 4 市区町村名
# 5 町域名
# 6 都道府県名
# 7 市区町村名
# 8 町域名
# 9 一町域が二以上の郵便番号で表される場合の表示 (注3) (「1」は該当、「0」は該当せず)
# 10 小字毎に番地が起番されている町域の表示 (注4) (「1」は該当、「0」は該当せず)
# 11 丁目を有する町域の場合の表示 (「1」は該当、「0」は該当せず)
# 12 一つの郵便番号で二以上の町域を表す場合の表示 (注5) (「1」は該当、「0」は該当せず)
# 13 更新の表示(注6)(「0」は変更なし、「1」は変更あり、「2」廃止(廃止データのみ使用))
# 14 変更理由(「0」は変更なし、「1」市政・区政・町政・分区・政令指定都市施行、「2」住居表示の実施、「3」区画整理、「4」郵便区調整等、「5」訂正、「6」廃止(廃止データのみ使用))
class PostalCode:
def make_db(
ken_all_csv="./assets/KEN_ALL.CSV", postalcode_sqlite3="./postalcode.sqlite3"
):
if os.path.isfile(postalcode_sqlite3):
os.remove(postalcode_sqlite3)
# SQLite3のデータベースを開く --- (*1)
conn = sqlite3.connect(postalcode_sqlite3)
c = conn.cursor()
# テーブルを作る --- (*2)
c.execute(
"""CREATE TABLE zip (
postc text, pref text, prefr text, city text, cityr text, addr text, addrr text)"""
)
c.execute("begin")
# CSVファイルを開く
with open(ken_all_csv, "rt", encoding="Shift_JIS") as fp:
# CSVを読み込む
reader = csv.reader(fp)
# 一行ずつ処理する
for row in reader:
postc = row[2] # 郵便番号
prefr = unicodedata.normalize("NFKC", row[3]) # 都道府県名(読み),
cityr = unicodedata.normalize("NFKC", row[4]) # 市区町村名(読み),
addrr = unicodedata.normalize("NFKC", row[5]) # 町域名(読み),
pref = row[6] # 都道府県名
city = row[7] # 市区町村名
addr = row[8] # 町域名
if addr == "以下に掲載がない場合":
addr = ""
# SQLiteに追加 --- (*3)
c.execute(
"""INSERT INTO zip (postc,pref,prefr,city,cityr,addr,addrr)
VALUES(?,?,?,?,?,?,?)""",
(postc, pref, prefr, city, cityr, addr, addrr),
)
# データベースを閉じる --- (*4)
c.execute("commit")
conn.close()
def add_fulladdr(
incomplete_csv="./incomplete.csv",
postalcode_sqlite3="./postalcode.sqlite3",
output_csv="./output.csv",
):
if not os.path.isfile(postalcode_sqlite3):
return False
# SQLite3のデータベースを開く
with sqlite3.connect(postalcode_sqlite3) as conn:
c = conn.cursor()
# CSVファイルを開く
with open(
incomplete_csv, "r", encoding="utf_8_sig", errors="", newline=""
) as fp:
# CSVを読み込む
with open(output_csv, "w", encoding="utf_8_sig") as csvoutput:
reader = csv.DictReader(
fp,
delimiter=",",
quotechar='"',
doublequote=True,
skipinitialspace=True,
)
fn = reader.fieldnames.copy()
colmns = [
"postc",
"都道府県",
"市区町村",
"町域",
"番地",
"建物名等",
]
for cn in colmns:
if fn.count(cn):
fn.remove(cn)
fn = colmns + fn
writer = csv.DictWriter(csvoutput, fieldnames=fn)
writer.writeheader()
# 一行ずつ処理する
for i, row in enumerate(reader):
row["都道府県"], row["市区町村"], row["町域"] = PostalCode.get_detail(
c, row["postc"]
)
writer.writerow(row)
def get_detail(cursor, postc):
cursor.execute(f"SELECT * from zip where postc is {postc}")
hit = cursor.fetchone()
if hit is None:
print(f"Code: {postc} is invalid code!!")
return ("郵便番号が正しくありません", "郵便番号が正しくありません", "郵便番号が正しくありません")
return (hit[1], hit[3], hit[5])
def main():
PostalCode.make_db()
if __name__ == "__main__":
main()
| [
"csv.DictWriter",
"csv.DictReader",
"sqlite3.connect",
"os.path.isfile",
"unicodedata.normalize",
"csv.reader",
"os.remove"
] | [((665, 699), 'os.path.isfile', 'os.path.isfile', (['postalcode_sqlite3'], {}), '(postalcode_sqlite3)\n', (679, 699), False, 'import os\n'), ((795, 830), 'sqlite3.connect', 'sqlite3.connect', (['postalcode_sqlite3'], {}), '(postalcode_sqlite3)\n', (810, 830), False, 'import csv, sqlite3\n'), ((713, 742), 'os.remove', 'os.remove', (['postalcode_sqlite3'], {}), '(postalcode_sqlite3)\n', (722, 742), False, 'import os\n'), ((1201, 1215), 'csv.reader', 'csv.reader', (['fp'], {}), '(fp)\n', (1211, 1215), False, 'import csv, sqlite3\n'), ((2246, 2280), 'os.path.isfile', 'os.path.isfile', (['postalcode_sqlite3'], {}), '(postalcode_sqlite3)\n', (2260, 2280), False, 'import os\n'), ((2349, 2384), 'sqlite3.connect', 'sqlite3.connect', (['postalcode_sqlite3'], {}), '(postalcode_sqlite3)\n', (2364, 2384), False, 'import csv, sqlite3\n'), ((1333, 1370), 'unicodedata.normalize', 'unicodedata.normalize', (['"""NFKC"""', 'row[3]'], {}), "('NFKC', row[3])\n", (1354, 1370), False, 'import unicodedata\n'), ((1409, 1446), 'unicodedata.normalize', 'unicodedata.normalize', (['"""NFKC"""', 'row[4]'], {}), "('NFKC', row[4])\n", (1430, 1446), False, 'import unicodedata\n'), ((1485, 1522), 'unicodedata.normalize', 'unicodedata.normalize', (['"""NFKC"""', 'row[5]'], {}), "('NFKC', row[5])\n", (1506, 1522), False, 'import unicodedata\n'), ((2709, 2802), 'csv.DictReader', 'csv.DictReader', (['fp'], {'delimiter': '""","""', 'quotechar': '"""\\""""', 'doublequote': '(True)', 'skipinitialspace': '(True)'}), '(fp, delimiter=\',\', quotechar=\'"\', doublequote=True,\n skipinitialspace=True)\n', (2723, 2802), False, 'import csv, sqlite3\n'), ((3421, 3461), 'csv.DictWriter', 'csv.DictWriter', (['csvoutput'], {'fieldnames': 'fn'}), '(csvoutput, fieldnames=fn)\n', (3435, 3461), False, 'import csv, sqlite3\n')] |
from distutils.core import setup
from Cython.Build import cythonize
setup(
name = "tax",
ext_modules = cythonize('tax.pyx'),
script_name = 'setup.py',
script_args = ['build_ext', '--inplace']
)
import tax
import numpy as np
print(tax.tax(np.ones(10)))
| [
"Cython.Build.cythonize",
"numpy.ones"
] | [((112, 132), 'Cython.Build.cythonize', 'cythonize', (['"""tax.pyx"""'], {}), "('tax.pyx')\n", (121, 132), False, 'from Cython.Build import cythonize\n'), ((255, 266), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (262, 266), True, 'import numpy as np\n')] |
from i3pystatus import IntervalModule
from i3pystatus.core.util import internet, require
from datetime import datetime
from urllib.request import urlopen
import json
import re
GEOLOOKUP_URL = 'http://api.wunderground.com/api/%s/geolookup%s/q/%s.json'
STATION_QUERY_URL = 'http://api.wunderground.com/api/%s/%s/q/%s.json'
class Wunderground(IntervalModule):
'''
This module retrieves weather data using the Weather Underground API.
.. note::
A Weather Underground API key is required to use this module, you can
sign up for a developer API key free at
https://www.wunderground.com/weather/api/
A developer API key is allowed 500 queries per day, and no more than 10
in a given minute. Therefore, it is recommended to be conservative when
setting the update interval.
Valid values for ``location_code`` include:
* **State/City_Name** - CA/San_Francisco
* **Country/City** - France/Paris
* **Geolocation by IP** - autoip
* **Zip or Postal Code** - 60616
* **ICAO Airport Code** - icao:LAX
* **Latitude/Longitude** - 41.8301943,-87.6342619
* **Personal Weather Station (PWS)** - pws:KILCHICA30
When not using a ``pws`` or ``icao`` station ID, the location will be
queried, and the closest station will be used. For a list of PWS
station IDs, visit the following URL:
http://www.wunderground.com/weatherstation/ListStations.asp
.. _weather-usage-wunderground:
.. rubric:: Usage example
.. code-block:: python
from i3pystatus import Status
from i3pystatus.weather import wunderground
status = Status()
status.register(
'weather',
format='{condition} {current_temp}{temp_unit}{icon}[ Hi: {high_temp}] Lo: {low_temp}',
colorize=True,
backend=wunderground.Wunderground(
api_key='dbafe887d56ba4ad',
location_code='pws:MAT645',
units='imperial',
),
)
status.run()
See :ref:`here <weather-formatters>` for a list of formatters which can be
used.
'''
interval = 300
settings = (
('api_key', 'Weather Underground API key'),
('location_code', 'Location code from wunderground.com'),
('units', '\'metric\' or \'imperial\''),
('use_pws', 'Set to False to use only airport stations'),
('forecast', 'Set to ``True`` to check forecast (generates one '
'additional API request per weather update). If set to '
'``False``, then the ``low_temp`` and ``high_temp`` '
'formatters will be set to empty strings.'),
)
required = ('api_key', 'location_code')
api_key = None
location_code = None
units = 'metric'
use_pws = True
forecast = False
# These will be set once weather data has been checked
station_id = None
forecast_url = None
@require(internet)
def api_request(self, url):
'''
Execute an HTTP POST to the specified URL and return the content
'''
with urlopen(url) as content:
try:
content_type = dict(content.getheaders())['Content-Type']
charset = re.search(r'charset=(.*)', content_type).group(1)
except AttributeError:
charset = 'utf-8'
response = json.loads(content.read().decode(charset))
try:
raise Exception(response['response']['error']['description'])
except KeyError:
pass
return response
@require(internet)
def geolookup(self):
'''
Use the location_code to perform a geolookup and find the closest
station. If the location is a pws or icao station ID, no lookup will be
peformed.
'''
if self.station_id is None:
try:
for no_lookup in ('pws', 'icao'):
sid = self.location_code.partition(no_lookup + ':')[-1]
if sid:
self.station_id = self.location_code
return
except AttributeError:
# Numeric or some other type, either way we'll just stringify
# it below and perform a lookup.
pass
extra_opts = '/pws:0' if not self.use_pws else ''
api_url = GEOLOOKUP_URL % (self.api_key,
extra_opts,
self.location_code)
response = self.api_request(api_url)
station_type = 'pws' if self.use_pws else 'airport'
try:
stations = response['location']['nearby_weather_stations']
nearest = stations[station_type]['station'][0]
except (KeyError, IndexError):
raise Exception('No locations matched location_code %s'
% self.location_code)
if self.use_pws:
nearest_pws = nearest.get('id', '')
if not nearest_pws:
raise Exception('No id entry for station')
self.station_id = 'pws:%s' % nearest_pws
else:
nearest_airport = nearest.get('icao', '')
if not nearest_airport:
raise Exception('No icao entry for station')
self.station_id = 'icao:%s' % nearest_airport
@require(internet)
def get_forecast(self):
'''
If configured to do so, make an API request to retrieve the forecast
data for the configured/queried weather station, and return the low and
high temperatures. Otherwise, return two empty strings.
'''
if self.forecast:
query_url = STATION_QUERY_URL % (self.api_key,
'forecast',
self.station_id)
try:
response = self.api_request(query_url)['forecast']
response = response['simpleforecast']['forecastday'][0]
except (KeyError, IndexError, TypeError):
raise Exception('No forecast data found for %s' % self.station_id)
unit = 'celsius' if self.units == 'metric' else 'fahrenheit'
low_temp = response.get('low', {}).get(unit, '')
high_temp = response.get('high', {}).get(unit, '')
return low_temp, high_temp
else:
return '', ''
@require(internet)
def weather_data(self):
'''
Query the configured/queried station and return the weather data
'''
# If necessary, do a geolookup to set the station_id
self.geolookup()
query_url = STATION_QUERY_URL % (self.api_key,
'conditions',
self.station_id)
try:
response = self.api_request(query_url)['current_observation']
self.forecast_url = response.pop('ob_url', None)
except KeyError:
raise Exception('No weather data found for %s' % self.station_id)
low_temp, high_temp = self.get_forecast()
if self.units == 'metric':
temp_unit = 'c'
speed_unit = 'kph'
distance_unit = 'km'
pressure_unit = 'mb'
else:
temp_unit = 'f'
speed_unit = 'mph'
distance_unit = 'mi'
pressure_unit = 'in'
def _find(key, data=None):
data = data or response
return data.get(key, 'N/A')
try:
observation_time = int(_find('observation_epoch'))
except TypeError:
observation_time = 0
return dict(
city=_find('city', response['observation_location']),
condition=_find('weather'),
observation_time=datetime.fromtimestamp(observation_time),
current_temp=_find('temp_' + temp_unit),
low_temp=low_temp,
high_temp=high_temp,
temp_unit='°' + temp_unit.upper(),
feelslike=_find('feelslike_' + temp_unit),
dewpoint=_find('dewpoint_' + temp_unit),
wind_speed=_find('wind_' + speed_unit),
wind_unit=speed_unit,
wind_direction=_find('wind_dir'),
wind_gust=_find('wind_gust_' + speed_unit),
pressure=_find('pressure_' + pressure_unit),
pressure_unit=pressure_unit,
pressure_trend=_find('pressure_trend'),
visibility=_find('visibility_' + distance_unit),
visibility_unit=distance_unit,
humidity=_find('relative_humidity').rstrip('%'),
uv_index=_find('uv'),
)
| [
"i3pystatus.core.util.require",
"datetime.datetime.fromtimestamp",
"urllib.request.urlopen",
"re.search"
] | [((3024, 3041), 'i3pystatus.core.util.require', 'require', (['internet'], {}), '(internet)\n', (3031, 3041), False, 'from i3pystatus.core.util import internet, require\n'), ((3690, 3707), 'i3pystatus.core.util.require', 'require', (['internet'], {}), '(internet)\n', (3697, 3707), False, 'from i3pystatus.core.util import internet, require\n'), ((5561, 5578), 'i3pystatus.core.util.require', 'require', (['internet'], {}), '(internet)\n', (5568, 5578), False, 'from i3pystatus.core.util import internet, require\n'), ((6632, 6649), 'i3pystatus.core.util.require', 'require', (['internet'], {}), '(internet)\n', (6639, 6649), False, 'from i3pystatus.core.util import internet, require\n'), ((3184, 3196), 'urllib.request.urlopen', 'urlopen', (['url'], {}), '(url)\n', (3191, 3196), False, 'from urllib.request import urlopen\n'), ((8037, 8077), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['observation_time'], {}), '(observation_time)\n', (8059, 8077), False, 'from datetime import datetime\n'), ((3326, 3365), 're.search', 're.search', (['"""charset=(.*)"""', 'content_type'], {}), "('charset=(.*)', content_type)\n", (3335, 3365), False, 'import re\n')] |
#!/usr/bin/python
"""
Train multihead-classifier with triplet loss
"""
from __future__ import print_function, division
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.model_selection import train_test_split
from tensorflow.contrib.layers import fully_connected
from tensorflow.contrib.rnn import GRUCell
from tensorflow.python.ops.rnn import bidirectional_dynamic_rnn as bi_rnn
from utils import get_vocabulary_size, batch_generator
from words_encoder import WordsEncoder
# Embeddings params
NUM_WORDS = 10000
EMBEDDING_DIM = 100
# Model params
HIDDEN_SIZE = 150
HEAD_SIZE = 50
KEEP_PROB = 0.8
LEARNING_RATE = 1e-3
# Triplet loss params
MARGIN = 1.
TRIPLET_LOSS_COEF = 0.01
# Training params
BATCH_SIZE = 256
NUM_EPOCHS = 10
DELTA = 0.5
MODEL_PATH = './model'
# Load the data set
df = pd.read_csv("tweets.csv")
num_classes = df.shape[1] - 1
X_train, X_test, y_train, y_test = train_test_split(df.request.values,
df.iloc[:,1:].values,
test_size=0.1,
stratify=df.iloc[:,1:].values.argmax(axis=1),
random_state=42)
# Sequences pre-processing
words_encoder = WordsEncoder()
words_encoder.fit(X_train)
X_train = words_encoder.transform(X_train)
X_test = words_encoder.transform(X_test)
vocabulary_size = get_vocabulary_size(X_train)
sequence_length = words_encoder.max_len
# Different placeholders
with tf.name_scope('Inputs'):
batch_ph = tf.placeholder(tf.int32, [None, sequence_length], name='batch_ph')
target_ph = tf.placeholder(tf.float32, [None, num_classes], name='target_ph')
seq_len_ph = tf.placeholder(tf.int32, [None], name='seq_len_ph')
keep_prob_ph = tf.placeholder(tf.float32, name='keep_prob_ph')
# Embedding layer
with tf.name_scope('Embedding_layer'):
embeddings_var = tf.Variable(tf.random_uniform([vocabulary_size, EMBEDDING_DIM], -1.0, 1.0), trainable=True)
# tf.summary.histogram('embeddings_var', embeddings_var)
batch_embedded = tf.nn.embedding_lookup(embeddings_var, batch_ph)
# (Bi-)RNN layer(-s)
_, rnn_outputs = bi_rnn(GRUCell(HIDDEN_SIZE), GRUCell(HIDDEN_SIZE),
inputs=batch_embedded, sequence_length=seq_len_ph, dtype=tf.float32)
# tf.summary.histogram('RNN_outputs', rnn_outputs)
rnn_outputs = tf.concat(rnn_outputs, 1)
# Multi-head layer
heads = []
with tf.name_scope('Multihead_layer'):
for _ in range(num_classes):
heads.append(fully_connected(rnn_outputs, HEAD_SIZE))
heads_concatenated = tf.concat(heads, axis=1)
# Triplet loss
with tf.name_scope("Triplet_loss"):
triplet_loss = []
for i, head in enumerate(heads):
# positive_mask = tf.equal(tf.squeeze(tf.slice(target_ph, [0, i], [-1, i + 1])), 1.)
positive_mask = tf.equal(target_ph[:, i], 1.)
negative_mask = tf.logical_not(positive_mask)
positive_mask.set_shape([None]) # Shape is required by tf.boolean_mask
negative_mask.set_shape([None])
anchor = tf.boolean_mask(head, positive_mask)
negative = tf.boolean_mask(head, negative_mask)
pos_indices = tf.random_uniform(
tf.shape(anchor)[0: 1],
minval=0,
maxval=tf.shape(anchor)[0],
dtype=tf.int32
)
neg_indices = tf.random_uniform(
tf.shape(anchor)[0: 1],
minval=0,
maxval=tf.shape(negative)[0],
dtype=tf.int32
)
positive = tf.gather(anchor, pos_indices)
negative = tf.gather(negative, neg_indices)
distance_positive = tf.norm(tf.subtract(anchor, positive), axis=1)
distance_negative = tf.norm(tf.subtract(anchor, negative), axis=1)
# triplet_loss += tf.reduce_mean(tf.maximum(0., MARGIN + distance_positive - distance_negative))
triplet_loss.append(tf.reduce_mean(tf.maximum(0., MARGIN + distance_positive - distance_negative)))
triplet_loss = tf.add_n(triplet_loss)
# Dropout
# drop = tf.nn.dropout(attention_output, keep_prob_ph)
# Fully connected layer
with tf.name_scope('Fully_connected_layer'):
logits = fully_connected(heads_concatenated, num_classes)
with tf.name_scope('Metrics'):
# Cross-entropy loss and optimizer initialization
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=target_ph))
tf.summary.scalar('loss', cross_entropy)
loss = cross_entropy + triplet_loss * TRIPLET_LOSS_COEF
optimizer = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE).minimize(loss)
# Accuracy metric
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(tf.nn.softmax(logits), 1),
tf.argmax(target_ph, 1)),
tf.float32))
tf.summary.scalar('accuracy', accuracy)
merged = tf.summary.merge_all()
# Batch generators
train_batch_generator = batch_generator(X_train, y_train, BATCH_SIZE)
test_batch_generator = batch_generator(X_test, y_test, BATCH_SIZE)
train_writer = tf.summary.FileWriter('./logdir/train', accuracy.graph)
test_writer = tf.summary.FileWriter('./logdir/test', accuracy.graph)
session_conf = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
saver = tf.train.Saver()
if __name__ == "__main__":
with tf.Session(config=session_conf) as sess:
sess.run(tf.global_variables_initializer())
print("Start learning...")
for epoch in range(NUM_EPOCHS):
loss_train = 0
loss_test = 0
accuracy_train = 0
accuracy_test = 0
print("epoch: {}\t".format(epoch), end="")
# Training
num_batches = X_train.shape[0] // BATCH_SIZE
for b in range(num_batches):
x_batch, y_batch = next(train_batch_generator)
seq_len = np.array([list(x).index(0) + 1 for x in x_batch]) # actual lengths of sequences
loss_tr, acc, _, summary = sess.run([loss, accuracy, optimizer, merged],
feed_dict={batch_ph: x_batch,
target_ph: y_batch,
seq_len_ph: seq_len,
keep_prob_ph: KEEP_PROB})
accuracy_train += acc
loss_train = loss_tr * DELTA + loss_train * (1 - DELTA)
train_writer.add_summary(summary, b + num_batches * epoch)
accuracy_train /= num_batches
# Testing
num_batches = X_test.shape[0] // BATCH_SIZE
for b in range(num_batches):
x_batch, y_batch = next(test_batch_generator)
seq_len = np.array([list(x).index(0) + 1 for x in x_batch]) # actual lengths of sequences
loss_test_batch, acc, summary = sess.run([loss, accuracy, merged],
feed_dict={batch_ph: x_batch,
target_ph: y_batch,
seq_len_ph: seq_len,
keep_prob_ph: 1.0})
accuracy_test += acc
loss_test += loss_test_batch
test_writer.add_summary(summary, b + num_batches * epoch)
accuracy_test /= num_batches
loss_test /= num_batches
print("loss: {:.3f}, val_loss: {:.3f}, acc: {:.3f}, val_acc: {:.3f}".format(
loss_train, loss_test, accuracy_train, accuracy_test
))
train_writer.close()
test_writer.close()
saver.save(sess, MODEL_PATH)
print("Run 'tensorboard --logdir=./logdir' to checkout tensorboard logs.")
| [
"words_encoder.WordsEncoder",
"tensorflow.equal",
"tensorflow.shape",
"pandas.read_csv",
"tensorflow.boolean_mask",
"tensorflow.logical_not",
"tensorflow.contrib.rnn.GRUCell",
"tensorflow.nn.softmax",
"tensorflow.GPUOptions",
"tensorflow.nn.embedding_lookup",
"utils.batch_generator",
"tensorfl... | [((820, 845), 'pandas.read_csv', 'pd.read_csv', (['"""tweets.csv"""'], {}), "('tweets.csv')\n", (831, 845), True, 'import pandas as pd\n'), ((1299, 1313), 'words_encoder.WordsEncoder', 'WordsEncoder', ([], {}), '()\n', (1311, 1313), False, 'from words_encoder import WordsEncoder\n'), ((1443, 1471), 'utils.get_vocabulary_size', 'get_vocabulary_size', (['X_train'], {}), '(X_train)\n', (1462, 1471), False, 'from utils import get_vocabulary_size, batch_generator\n'), ((2418, 2443), 'tensorflow.concat', 'tf.concat', (['rnn_outputs', '(1)'], {}), '(rnn_outputs, 1)\n', (2427, 2443), True, 'import tensorflow as tf\n'), ((4932, 4954), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (4952, 4954), True, 'import tensorflow as tf\n'), ((4999, 5044), 'utils.batch_generator', 'batch_generator', (['X_train', 'y_train', 'BATCH_SIZE'], {}), '(X_train, y_train, BATCH_SIZE)\n', (5014, 5044), False, 'from utils import get_vocabulary_size, batch_generator\n'), ((5068, 5111), 'utils.batch_generator', 'batch_generator', (['X_test', 'y_test', 'BATCH_SIZE'], {}), '(X_test, y_test, BATCH_SIZE)\n', (5083, 5111), False, 'from utils import get_vocabulary_size, batch_generator\n'), ((5128, 5183), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['"""./logdir/train"""', 'accuracy.graph'], {}), "('./logdir/train', accuracy.graph)\n", (5149, 5183), True, 'import tensorflow as tf\n'), ((5198, 5252), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['"""./logdir/test"""', 'accuracy.graph'], {}), "('./logdir/test', accuracy.graph)\n", (5219, 5252), True, 'import tensorflow as tf\n'), ((5339, 5355), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (5353, 5355), True, 'import tensorflow as tf\n'), ((1543, 1566), 'tensorflow.name_scope', 'tf.name_scope', (['"""Inputs"""'], {}), "('Inputs')\n", (1556, 1566), True, 'import tensorflow as tf\n'), ((1583, 1649), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, sequence_length]'], {'name': '"""batch_ph"""'}), "(tf.int32, [None, sequence_length], name='batch_ph')\n", (1597, 1649), True, 'import tensorflow as tf\n'), ((1666, 1731), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, num_classes]'], {'name': '"""target_ph"""'}), "(tf.float32, [None, num_classes], name='target_ph')\n", (1680, 1731), True, 'import tensorflow as tf\n'), ((1749, 1800), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {'name': '"""seq_len_ph"""'}), "(tf.int32, [None], name='seq_len_ph')\n", (1763, 1800), True, 'import tensorflow as tf\n'), ((1820, 1867), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""keep_prob_ph"""'}), "(tf.float32, name='keep_prob_ph')\n", (1834, 1867), True, 'import tensorflow as tf\n'), ((1892, 1924), 'tensorflow.name_scope', 'tf.name_scope', (['"""Embedding_layer"""'], {}), "('Embedding_layer')\n", (1905, 1924), True, 'import tensorflow as tf\n'), ((2121, 2169), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embeddings_var', 'batch_ph'], {}), '(embeddings_var, batch_ph)\n', (2143, 2169), True, 'import tensorflow as tf\n'), ((2216, 2236), 'tensorflow.contrib.rnn.GRUCell', 'GRUCell', (['HIDDEN_SIZE'], {}), '(HIDDEN_SIZE)\n', (2223, 2236), False, 'from tensorflow.contrib.rnn import GRUCell\n'), ((2238, 2258), 'tensorflow.contrib.rnn.GRUCell', 'GRUCell', (['HIDDEN_SIZE'], {}), '(HIDDEN_SIZE)\n', (2245, 2258), False, 'from tensorflow.contrib.rnn import GRUCell\n'), ((2480, 2512), 'tensorflow.name_scope', 'tf.name_scope', (['"""Multihead_layer"""'], {}), "('Multihead_layer')\n", (2493, 2512), True, 'import tensorflow as tf\n'), ((2634, 2658), 'tensorflow.concat', 'tf.concat', (['heads'], {'axis': '(1)'}), '(heads, axis=1)\n', (2643, 2658), True, 'import tensorflow as tf\n'), ((2680, 2709), 'tensorflow.name_scope', 'tf.name_scope', (['"""Triplet_loss"""'], {}), "('Triplet_loss')\n", (2693, 2709), True, 'import tensorflow as tf\n'), ((4041, 4063), 'tensorflow.add_n', 'tf.add_n', (['triplet_loss'], {}), '(triplet_loss)\n', (4049, 4063), True, 'import tensorflow as tf\n'), ((4160, 4198), 'tensorflow.name_scope', 'tf.name_scope', (['"""Fully_connected_layer"""'], {}), "('Fully_connected_layer')\n", (4173, 4198), True, 'import tensorflow as tf\n'), ((4213, 4261), 'tensorflow.contrib.layers.fully_connected', 'fully_connected', (['heads_concatenated', 'num_classes'], {}), '(heads_concatenated, num_classes)\n', (4228, 4261), False, 'from tensorflow.contrib.layers import fully_connected\n'), ((4268, 4292), 'tensorflow.name_scope', 'tf.name_scope', (['"""Metrics"""'], {}), "('Metrics')\n", (4281, 4292), True, 'import tensorflow as tf\n'), ((4461, 4501), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'cross_entropy'], {}), "('loss', cross_entropy)\n", (4478, 4501), True, 'import tensorflow as tf\n'), ((4882, 4921), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""accuracy"""', 'accuracy'], {}), "('accuracy', accuracy)\n", (4899, 4921), True, 'import tensorflow as tf\n'), ((1959, 2021), 'tensorflow.random_uniform', 'tf.random_uniform', (['[vocabulary_size, EMBEDDING_DIM]', '(-1.0)', '(1.0)'], {}), '([vocabulary_size, EMBEDDING_DIM], -1.0, 1.0)\n', (1976, 2021), True, 'import tensorflow as tf\n'), ((2887, 2917), 'tensorflow.equal', 'tf.equal', (['target_ph[:, i]', '(1.0)'], {}), '(target_ph[:, i], 1.0)\n', (2895, 2917), True, 'import tensorflow as tf\n'), ((2941, 2970), 'tensorflow.logical_not', 'tf.logical_not', (['positive_mask'], {}), '(positive_mask)\n', (2955, 2970), True, 'import tensorflow as tf\n'), ((3109, 3145), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['head', 'positive_mask'], {}), '(head, positive_mask)\n', (3124, 3145), True, 'import tensorflow as tf\n'), ((3165, 3201), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['head', 'negative_mask'], {}), '(head, negative_mask)\n', (3180, 3201), True, 'import tensorflow as tf\n'), ((3575, 3605), 'tensorflow.gather', 'tf.gather', (['anchor', 'pos_indices'], {}), '(anchor, pos_indices)\n', (3584, 3605), True, 'import tensorflow as tf\n'), ((3625, 3657), 'tensorflow.gather', 'tf.gather', (['negative', 'neg_indices'], {}), '(negative, neg_indices)\n', (3634, 3657), True, 'import tensorflow as tf\n'), ((4383, 4455), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'logits': 'logits', 'labels': 'target_ph'}), '(logits=logits, labels=target_ph)\n', (4422, 4455), True, 'import tensorflow as tf\n'), ((5296, 5328), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'allow_growth': '(True)'}), '(allow_growth=True)\n', (5309, 5328), True, 'import tensorflow as tf\n'), ((5393, 5424), 'tensorflow.Session', 'tf.Session', ([], {'config': 'session_conf'}), '(config=session_conf)\n', (5403, 5424), True, 'import tensorflow as tf\n'), ((2568, 2607), 'tensorflow.contrib.layers.fully_connected', 'fully_connected', (['rnn_outputs', 'HEAD_SIZE'], {}), '(rnn_outputs, HEAD_SIZE)\n', (2583, 2607), False, 'from tensorflow.contrib.layers import fully_connected\n'), ((3695, 3724), 'tensorflow.subtract', 'tf.subtract', (['anchor', 'positive'], {}), '(anchor, positive)\n', (3706, 3724), True, 'import tensorflow as tf\n'), ((3770, 3799), 'tensorflow.subtract', 'tf.subtract', (['anchor', 'negative'], {}), '(anchor, negative)\n', (3781, 3799), True, 'import tensorflow as tf\n'), ((4580, 4631), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'LEARNING_RATE'}), '(learning_rate=LEARNING_RATE)\n', (4602, 4631), True, 'import tensorflow as tf\n'), ((5451, 5484), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5482, 5484), True, 'import tensorflow as tf\n'), ((3255, 3271), 'tensorflow.shape', 'tf.shape', (['anchor'], {}), '(anchor)\n', (3263, 3271), True, 'import tensorflow as tf\n'), ((3431, 3447), 'tensorflow.shape', 'tf.shape', (['anchor'], {}), '(anchor)\n', (3439, 3447), True, 'import tensorflow as tf\n'), ((3957, 4020), 'tensorflow.maximum', 'tf.maximum', (['(0.0)', '(MARGIN + distance_positive - distance_negative)'], {}), '(0.0, MARGIN + distance_positive - distance_negative)\n', (3967, 4020), True, 'import tensorflow as tf\n'), ((4801, 4824), 'tensorflow.argmax', 'tf.argmax', (['target_ph', '(1)'], {}), '(target_ph, 1)\n', (4810, 4824), True, 'import tensorflow as tf\n'), ((3320, 3336), 'tensorflow.shape', 'tf.shape', (['anchor'], {}), '(anchor)\n', (3328, 3336), True, 'import tensorflow as tf\n'), ((3496, 3514), 'tensorflow.shape', 'tf.shape', (['negative'], {}), '(negative)\n', (3504, 3514), True, 'import tensorflow as tf\n'), ((4727, 4748), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (4740, 4748), True, 'import tensorflow as tf\n')] |
from .utils import *
def test_g2p():
output = get_tmp_out()
input = os.path.join(dir_path, 'test_data', 'ex2.bcf')
test_args = dict(
no_warnings=True,
input=input,
output=output,
ped=os.path.join(dir_path, "test_data", "test.ped"),
de_novo=True,
biallelic=True,
csq=['default'],
check_g2p_consequence=True,
check_g2p_inheritance=True,
g2p=os.path.join(dir_path, "test_data", "test_g2p.csv")
)
results, expected = run_args(test_args, output,
sys._getframe().f_code.co_name)
assert_equal(results, expected)
os.remove(output)
def test_g2p_snpeff():
output = get_tmp_out()
input = os.path.join(dir_path, 'test_data', 'ex2.snpeff.bcf')
test_args = dict(
no_warnings=True,
snpeff=True,
input=input,
output=output,
ped=os.path.join(dir_path, "test_data", "test.ped"),
de_novo=True,
biallelic=True,
csq=['default'],
check_g2p_consequence=True,
check_g2p_inheritance=True,
g2p=os.path.join(dir_path, "test_data", "test_g2p.csv")
)
results, expected = run_args(test_args, output, 'test_g2p')
assert_equal(results, expected)
os.remove(output)
if __name__ == '__main__':
import nose
nose.run(defaultTest=__name__)
| [
"nose.run"
] | [((1341, 1371), 'nose.run', 'nose.run', ([], {'defaultTest': '__name__'}), '(defaultTest=__name__)\n', (1349, 1371), False, 'import nose\n')] |
import turtle
wn=turtle.Screen()
alex=turtle.Turtle()
alex.forward(50)
alex.left(90)
alex.forward(30)
wn.mainloop() | [
"turtle.Screen",
"turtle.Turtle"
] | [((17, 32), 'turtle.Screen', 'turtle.Screen', ([], {}), '()\n', (30, 32), False, 'import turtle\n'), ((38, 53), 'turtle.Turtle', 'turtle.Turtle', ([], {}), '()\n', (51, 53), False, 'import turtle\n')] |
import datetime
from django.core.management.base import BaseCommand
from data_import.models import DataFileKey
class Command(BaseCommand):
"""
A management command for expunging expired keys
"""
help = "Expunge expired keys"
def handle(self, *args, **options):
self.stdout.write("Expunging expired keys")
now = datetime.datetime.utcnow()
# Note: astimezone reapplies the timezone so that django doesn't
# complain
six_hours_ago = (now - datetime.timedelta(hours=6)).astimezone()
keys = DataFileKey.objects.filter(created__lte=six_hours_ago)
num_deletes = keys.delete()[0]
self.stdout.write("Removed {0} keys".format(num_deletes))
| [
"datetime.timedelta",
"data_import.models.DataFileKey.objects.filter",
"datetime.datetime.utcnow"
] | [((353, 379), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (377, 379), False, 'import datetime\n'), ((561, 615), 'data_import.models.DataFileKey.objects.filter', 'DataFileKey.objects.filter', ([], {'created__lte': 'six_hours_ago'}), '(created__lte=six_hours_ago)\n', (587, 615), False, 'from data_import.models import DataFileKey\n'), ((504, 531), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(6)'}), '(hours=6)\n', (522, 531), False, 'import datetime\n')] |
import getpass
import telnetlib
port_num = str(input("Enter the Number of Port and type: "))
HOST = "10.1.1.1"
user = input("\nEnter The Username: ")
password = getpass.getpass()
tn = telnetlib.Telnet(HOST)
tn.read_until(b"Username: ")
tn.write(user.encode('ascii') + b"\n")
if password:
tn.read_until(b"Password: ")
tn.write(password.encode('ascii') + b"\n")
tn.write(b"enable \n")
tn.write(b"123\n")
tn.write(b"config t \n")
tn.write(b"hostname " + user.encode('ascii') + b"\n")
tn.write(b"banner motd #Hello!# \n")
tn.write(b"interface " + str(port_num).encode('ascii') + b"\n")
# tn.write(b"interface s2/0 \n")
tn.write(b"no shutdown \n")
tn.write(b"ip address 10.1.2.1 255.255.255.0 \n")
tn.write(b"exit \n")
tn.write(b"exit \n") # To exit from Global Configration mode
tn.write(b"exit \n") # To exit from execuite mode so it will close the Session
tn.write(b"exit \n") # To exit from the terminal (cmd)
print("\nDone")
input("Just Press Enter To Exit!")
| [
"getpass.getpass",
"telnetlib.Telnet"
] | [((175, 192), 'getpass.getpass', 'getpass.getpass', ([], {}), '()\n', (190, 192), False, 'import getpass\n'), ((202, 224), 'telnetlib.Telnet', 'telnetlib.Telnet', (['HOST'], {}), '(HOST)\n', (218, 224), False, 'import telnetlib\n')] |
import argparse
import os
argparser = argparse.ArgumentParser()
argparser.add_argument("--dataset_names", default="all", type=str) # "all" or names joined by comma
argparser.add_argument("--dataset_path", default="DATASET/odinw", type=str)
args = argparser.parse_args()
root = "https://vlpdatasets.blob.core.windows.net/odinw/odinw/odinw_35"
all_datasets = ["AerialMaritimeDrone", "AmericanSignLanguageLetters", "Aquarium", "BCCD", "ChessPieces", "CottontailRabbits", "DroneControl", "EgoHands", "HardHatWorkers", "MaskWearing", "MountainDewCommercial", "NorthAmericaMushrooms", "OxfordPets", "PKLot", "Packages", "PascalVOC", "Raccoon", "ShellfishOpenImages", "ThermalCheetah", "UnoCards", "VehiclesOpenImages", "WildfireSmoke", "boggleBoards", "brackishUnderwater", "dice", "openPoetryVision", "pistols", "plantdoc", "pothole", "selfdrivingCar", "thermalDogsAndPeople", "vector", "websiteScreenshots"]
datasets_to_download = []
if args.dataset_names == "all":
datasets_to_download = all_datasets
else:
datasets_to_download = args.dataset_names.split(",")
for dataset in datasets_to_download:
if dataset in all_datasets:
print("Downloading dataset: ", dataset)
os.system("wget " + root + "/" + dataset + ".zip" + " -O " + args.dataset_path + "/" + dataset + ".zip")
os.system("unzip " + args.dataset_path + "/" + dataset + ".zip -d " + args.dataset_path)
os.system("rm " + args.dataset_path + "/" + dataset + ".zip")
else:
print("Dataset not found: ", dataset)
| [
"os.system",
"argparse.ArgumentParser"
] | [((39, 64), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (62, 64), False, 'import argparse\n'), ((1195, 1304), 'os.system', 'os.system', (["('wget ' + root + '/' + dataset + '.zip' + ' -O ' + args.dataset_path + '/' +\n dataset + '.zip')"], {}), "('wget ' + root + '/' + dataset + '.zip' + ' -O ' + args.\n dataset_path + '/' + dataset + '.zip')\n", (1204, 1304), False, 'import os\n'), ((1308, 1401), 'os.system', 'os.system', (["('unzip ' + args.dataset_path + '/' + dataset + '.zip -d ' + args.dataset_path)"], {}), "('unzip ' + args.dataset_path + '/' + dataset + '.zip -d ' + args.\n dataset_path)\n", (1317, 1401), False, 'import os\n'), ((1405, 1466), 'os.system', 'os.system', (["('rm ' + args.dataset_path + '/' + dataset + '.zip')"], {}), "('rm ' + args.dataset_path + '/' + dataset + '.zip')\n", (1414, 1466), False, 'import os\n')] |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from iso3166 import countries
import pycountry_convert as pc
import pycountry
import re
from datetime import datetime
from statsmodels.distributions.empirical_distribution import ECDF
FINAL_DATAFRAME = '../aggregate_data/final_dataframe.csv'
PATH_RIPE_RIS_PEERS = '../../Datasets/RIPE_RIS_peers/list_of_RIPE_RIS_peers.json'
RIPE_ATLAS_PROBES = '../../Datasets/RIPE_Atlas_probes/bq_results.json'
ROUTEVIEWS_PEERS = '../../Datasets/RouteViews_peers/RouteViews-Peering-1_11_21.csv'
def read_ripe_peers():
"""
:return: A dataframe with one column that contains all RIPE peer-monitors ASn
"""
data_ripe = pd.read_json(PATH_RIPE_RIS_PEERS, typ='dictionary')
list_of_ripe_peers = [i for i in data_ripe.values]
list_of_uniques_ripe_peers = set(list_of_ripe_peers)
# convert list to dataframe
df = pd.DataFrame(list_of_uniques_ripe_peers, columns=['ASn'])
return df
def take_unique_ATLAS_ASNs():
"""
:return: The returned set contains only the unique ASns
"""
data = pd.read_json(RIPE_ATLAS_PROBES, lines=True)
data = data[(data['status'] == 'Connected')]
set1 = set(data['asn_v4'])
set2 = set(data['asn_v6'])
union = set.union(set1, set2)
union = {x for x in union if pd.notna(x)}
atlas_dataframe = pd.DataFrame(union)
atlas_dataframe.columns = ['Atlas_ASN']
return atlas_dataframe
def take_unique_RouteViews_ASNs():
"""
:return: The returned set contains only the unique ASns
"""
data = pd.read_csv(ROUTEVIEWS_PEERS, sep=',')
set1 = set(data['ASNUMBER'])
route_views_dataframe = pd.DataFrame(set1)
route_views_dataframe.columns = ['RouteViews_ASn']
return route_views_dataframe
def read_final_dataframe():
"""
:return: A dataframe that is created from the concatenation of 5 datasets
"""
return pd.read_csv(FINAL_DATAFRAME, dtype={"prb_id": "string"}, sep=',')
def call_categorize(final_df, current_df, type):
"""
:param final_df: A dataframe that is created from the concatenation of 5 datasets
:param current_df: Contains the unique AS numbers of RIPE RIS or the AS numbers of RIPE ATLAS probes
:param type: Can take 2 values ('Ripe_Ris_monitors', 'Ripe_Atlas_probes')
"""
for column_name in final_df.columns:
dataTypeObj = final_df.dtypes[column_name]
categorize_features(final_df, current_df, dataTypeObj, column_name, type)
def call_categorize_all(final_df, ripe, atlas, route):
for column_name in final_df.columns:
dataTypeObj = final_df.dtypes[column_name]
dcopy = final_df.copy(deep=True)
dcopy1 = final_df.copy(deep=True)
dcopy2 = final_df.copy(deep=True)
if column_name == 'peeringDB_created':
pass
elif column_name == 'is_personal_AS':
y1 = categorize_features_all(dcopy, ripe, dataTypeObj, column_name)
y2 = categorize_features_all(dcopy, atlas, dataTypeObj, column_name)
y3 = categorize_features_all(dcopy, route, dataTypeObj, column_name)
x = final_df[column_name].fillna(0)
x = x.astype(str)
plt.hist([x, y1, y2, y3], density=True, bins=2, histtype='bar',
align='left',
label=['All_ASes', 'Ripe Ris', 'Atlas', 'RouteViews'])
plt.legend(prop={'size': 10})
plt.ylabel('CDF')
plt.ylim(0, 1)
plt.suptitle('Feature: ' + str(column_name), fontsize=14)
plt.xticks(rotation='vertical')
plt.tight_layout()
plt.savefig(str(column_name) + 'All' + f'.png')
plt.show()
elif column_name == 'AS_rank_iso':
y1 = categorize_features_all(dcopy, ripe, dataTypeObj, column_name)
y2 = categorize_features_all(dcopy1, atlas, dataTypeObj, column_name)
y3 = categorize_features_all(dcopy2, route, dataTypeObj, column_name)
final_df[column_name] = convert_country_to_continent(final_df)
x = final_df[column_name].dropna()
x = x.astype(str)
plt.hist([x, y1, y2, y3], density=True, bins=abs(final_df[column_name].nunique()), histtype='bar',
align='left',
label=['All_ASes', 'Ripe Ris', 'Atlas', 'RouteViews'])
plt.legend(prop={'size': 10})
plt.ylabel('CDF')
plt.ylim(0, 1)
plt.suptitle('Feature: ' + str(column_name), fontsize=14)
plt.xticks(rotation='vertical')
plt.tight_layout()
plt.savefig(str(column_name) + 'All' + f'.png')
plt.show()
elif dataTypeObj == np.int64 or dataTypeObj == np.float64:
x1, y1 = categorize_features_all(dcopy, ripe, dataTypeObj, column_name)
x2, y2 = categorize_features_all(dcopy, atlas, dataTypeObj, column_name)
x3, y3 = categorize_features_all(dcopy, route, dataTypeObj, column_name)
x = final_df[column_name].dropna()
final_cdf = ECDF(x)
plt.plot(final_cdf.x, final_cdf.y, label='All_ASes')
plt.plot(x1, y1, label='<NAME>')
plt.plot(x2, y2, label='Atlas')
plt.plot(x3, y3, label='RouteViews')
plt.title('Feature: ' + str(column_name), fontsize=14)
plt.legend()
plt.tight_layout()
plt.savefig(str(column_name) + 'All' + f'.png')
plt.show()
elif dataTypeObj == np.object:
temp = final_df[column_name].dropna()
x = temp.unique()
x = x.astype(str)
y0 = temp
y0_counts = y0.value_counts()
y1 = categorize_features_all(dcopy, ripe, dataTypeObj, column_name)
c1 = 0
c2 = 0
c3 = 0
for i in y1:
if i == 'nan':
c1 = c1 + 1
y1_counts = y1.value_counts()
y2 = categorize_features_all(dcopy, atlas, dataTypeObj, column_name)
for i in y2:
if i == 'nan':
c2 = c2 + 1
y2_counts = y2.value_counts()
y3 = categorize_features_all(dcopy, route, dataTypeObj, column_name)
for i in y3:
if i == 'nan':
c3 = c3 + 1
y3_counts = y3.value_counts()
y0_list = []
y1_list = []
y2_list = []
y3_list = []
for item in x:
y0_value = 0
y1_value = 0
y2_value = 0
y3_value = 0
if item in y0_counts:
y0_value = y0_counts[item]
if item in y1_counts:
y1_value = y1_counts[item]
if item in y2_counts:
y2_value = y2_counts[item]
if item in y3_counts:
y3_value = y3_counts[item]
y0_list.append(y0_value/len(y0))
y1_list.append(y1_value/(len(y1) - c1))
y2_list.append(y2_value/(len(y2) - c2))
y3_list.append(y3_value/(len(y3) - c3))
bar_width = 0.2
x_1 = np.arange(len(x))
x_2 = [x + bar_width for x in x_1]
x_3 = [x + bar_width for x in x_2]
x_4 = [x + bar_width for x in x_3]
plt.bar(x_1, y0_list, label='All ASes', width=bar_width)
plt.bar(x_2, y1_list, label='Ripe Ris', width=bar_width)
plt.bar(x_3, y2_list, label='ATLAS', width=bar_width)
plt.bar(x_4, y3_list, label='RouteView', width=bar_width)
plt.legend(prop={'size': 10})
plt.ylabel('CDF')
plt.ylim(0, 1)
plt.suptitle('Feature: ' + str(column_name), fontsize=14)
plt.xticks([r + bar_width for r in range(len(x))], x, rotation='vertical')
plt.tight_layout()
plt.savefig(str(column_name) + 'All' + f'.png')
plt.show()
plt.close()
def convert_to_numerical(data):
"""
The function subtracts the created year of peeringDB from the current year.
:param data: It contains all features from 3 different datasets
:return: A numerical feature containing the above described subtraction
"""
data['peeringDB_created'] = data['peeringDB_created'].astype('str')
data['peeringDB_created'] = data['peeringDB_created'].apply(lambda x: keep_number(x))
today_year = datetime.today()
data['peeringDB_created'] = data['peeringDB_created'].apply(lambda x: (int(today_year.year)) - int(x))
return data['peeringDB_created']
def keep_number(text):
"""
:param text: example 2005-06-10T02:28:32Z
:return: Only the year --> 2005
"""
if text == '0':
return '0000'
else:
num = re.findall(r'\d{4}', text)
return "".join(num)
def country_flag(data):
"""
:param data: Contains a dataframe combining 3 datasets
:param list_alpha_2: Contains the 2-letter abbreviation from each country
:return: Matches the acronyms with the Fullname of the countries
"""
list_alpha_2 = [i.alpha2 for i in list(countries)]
if data['AS_rank_iso'] in list_alpha_2:
return pycountry.countries.get(alpha_2=data['AS_rank_iso']).name
else:
return 'Unknown Code'
def country_to_continent(country_name):
"""
This function takes as input a country name and returns the continent that the given country belongs.
:param country_name: Contains the name of a country
:return: The continent
"""
try:
country_alpha2 = pc.country_name_to_country_alpha2(country_name)
country_continent_code = pc.country_alpha2_to_continent_code(country_alpha2)
country_continent_name = pc.convert_continent_code_to_continent_name(country_continent_code)
return country_continent_name
except:
return np.nan
def convert_country_to_continent(data):
"""
The function converts iso = alpha_2 (example: US) to the whole name of the country. Needs (import iso3166)
:param data: Contains a dataframe combining 4 datasets
:return: The continent for each country
"""
data['AS_rank_iso'] = data.apply(country_flag, axis=1)
temp_list = []
for i in range(0, len(data)):
temp_list.append(country_to_continent(data['AS_rank_iso'][i]))
df = pd.DataFrame(temp_list, columns=['AS_rank_iso'])
data['AS_rank_iso'] = df['AS_rank_iso']
return data['AS_rank_iso']
def categorize_features(data, current, type, feature, type_of_monitors):
if type == np.int64 or type == np.float64:
if feature == 'peeringDB_info_prefixes4':
data['peeringDB_info_prefixes4'] = data.peeringDB_info_prefixes4.fillna(0)
data['peeringDB_info_prefixes4'] = data.peeringDB_info_prefixes4.astype('Int64')
cdf_plot(current, data, feature, type_of_monitors)
elif feature == 'peeringDB_info_prefixes6':
data['peeringDB_info_prefixes6'] = data.peeringDB_info_prefixes6.fillna(0)
data['peeringDB_info_prefixes6'] = data.peeringDB_info_prefixes6.astype('Int64')
cdf_plot(current, data, feature, type_of_monitors)
elif feature == 'peeringDB_ix_count':
data['peeringDB_ix_count'] = data.peeringDB_ix_count.fillna(0)
data['peeringDB_ix_count'] = data.peeringDB_ix_count.astype('Int64')
cdf_plot(current, data, feature, type_of_monitors)
elif feature == 'peeringDB_fac_count':
data['peeringDB_fac_count'] = data.peeringDB_fac_count.fillna(0)
data['peeringDB_fac_count'] = data.peeringDB_fac_count.astype('Int64')
cdf_plot(current, data, feature, type_of_monitors)
elif feature == 'is_personal_AS':
data['is_personal_AS'] = data['is_personal_AS'].replace('', np.nan)
data['is_personal_AS'] = data.is_personal_AS.astype('Int64')
histogram_plot(current, data, feature, type_of_monitors)
elif feature == 'has_atlas_probe':
data['has_atlas_probe'] = data.has_atlas_probe.fillna(0)
data['has_atlas_probe'] = data.has_atlas_probe.astype('Int64')
histogram_plot(current, data, feature, type_of_monitors)
else:
cdf_plot(current, data, feature, type_of_monitors)
# cdf_subplot(ripe, data, feature)
elif type == np.object:
if feature == 'AS_rank_iso':
data['AS_rank_iso'] = convert_country_to_continent(data)
histogram_plot(current, data, feature, type_of_monitors)
elif feature == 'peeringDB_created':
data['peeringDB_created'] = data.peeringDB_created.fillna(0)
data['peeringDB_created'] = convert_to_numerical(data)
cdf_plot(current, data, feature, type_of_monitors)
elif feature == 'peeringDB_info_type':
histogram_plot(current, data, feature, type_of_monitors)
elif feature == 'AS_rank_source':
data['AS_rank_source'].fillna(np.nan, inplace=True)
histogram_plot(current, data, feature, type_of_monitors)
else:
histogram_plot(current, data, feature, type_of_monitors)
def categorize_features_all(data, current, type, feature):
if type == np.int64 or type == np.float64:
if feature == 'peeringDB_info_prefixes4':
data['peeringDB_info_prefixes4'] = data.peeringDB_info_prefixes4.fillna(0)
data['peeringDB_info_prefixes4'] = data.peeringDB_info_prefixes4.astype('Int64')
x, y = cdf_plot_all(current, data, feature)
return x, y
elif feature == 'peeringDB_info_prefixes6':
data['peeringDB_info_prefixes6'] = data.peeringDB_info_prefixes6.fillna(0)
data['peeringDB_info_prefixes6'] = data.peeringDB_info_prefixes6.astype('Int64')
x, y = cdf_plot_all(current, data, feature)
return x, y
elif feature == 'peeringDB_ix_count':
data['peeringDB_ix_count'] = data.peeringDB_ix_count.fillna(0)
data['peeringDB_ix_count'] = data.peeringDB_ix_count.astype('Int64')
x, y = cdf_plot_all(current, data, feature)
return x, y
elif feature == 'peeringDB_fac_count':
data['peeringDB_fac_count'] = data.peeringDB_fac_count.fillna(0)
data['peeringDB_fac_count'] = data.peeringDB_fac_count.astype('Int64')
x, y = cdf_plot_all(current, data, feature)
return x, y
elif feature == 'AS_hegemony':
data['AS_hegemony'] = data.AS_hegemony.replace('', np.nan)
data['AS_hegemony'] = data.AS_hegemony.astype(float)
x, y = cdf_plot_all(current, data, feature)
return x, y
elif feature == 'is_personal_AS':
data['is_personal_AS'] = data['is_personal_AS'].replace('', np.nan)
data['is_personal_AS'] = data.is_personal_AS.astype('string')
y = histogram_plot_all(current, data, feature)
return y
elif feature == 'has_atlas_probe':
data['has_atlas_probe'] = data.has_atlas_probe.fillna(0)
data['has_atlas_probe'] = data.has_atlas_probe.astype('Int64')
y = histogram_plot_all(current, data, feature)
return y
else:
x, y = cdf_plot_all(current, data, feature)
return x, y
elif type == np.object:
if feature == 'AS_rank_iso':
data['AS_rank_iso'] = convert_country_to_continent(data)
y = histogram_plot_all(current, data, feature)
return y
elif feature == 'peeringDB_created':
data['peeringDB_created'] = data.peeringDB_created.fillna(0)
data['peeringDB_created'] = convert_to_numerical(data)
x, y = cdf_plot_all(current, data, feature)
return x, y
elif feature == 'peeringDB_info_type':
y = histogram_plot_all(current, data, feature)
return y
elif feature == 'AS_rank_source':
data['AS_rank_source'].fillna(np.nan, inplace=True)
y = histogram_plot_all(current, data, feature)
return y
else:
y = histogram_plot_all(current, data, feature)
return y
def cdf_plot_all(unique_monitors, final, feature):
"""
:param unique_monitors: Contains the unique AS numbers of RIPE RIS or the AS numbers of RIPE ATLAS probes
:param final: Contains a dataframe combining 4 datasets
:param feature: Is the column name of final
"""
merged_data = pd.merge(unique_monitors, final, on='ASn', how='inner')
merged_data.sort_values('ASn', inplace=True)
merged_data.drop_duplicates(subset='ASn', keep=False, inplace=True)
merged_data.sort_values(feature, inplace=True)
ripe_cdf = ECDF(merged_data[feature].dropna())
plt.ylabel('CDF')
if feature == 'AS_rank_numberAddresses' or feature == 'AS_rank_numberAsns' or feature == 'AS_rank_numberPrefixes' \
or feature == 'AS_rank_peer' or feature == 'AS_rank_provider' or feature == 'AS_rank_total' \
or feature == 'ASn' or feature == 'AS_rank_customer' or feature == 'peeringDB_info_prefixes4' or \
feature == 'peeringDB_info_prefixes6' or feature == 'peeringDB_ix_count' or feature == 'peeringDB_fac_count' \
or feature == 'peeringDB_created' or feature == 'AS_hegemony':
plt.xscale('log')
else:
plt.xscale('linear')
return ripe_cdf.x, ripe_cdf.y
def histogram_plot_all(unique_monitors, final, feature):
"""
:param monitors_origin:
:param ripe: Contains the AS numbers of RIPE RIS
:param final: Contains a dataframe combining 3 datasets
:param feature: Is the column name of final
"""
# Without dropna we pass all arguments except one (NaN) and the plots are all wrong
merged_data = pd.merge(unique_monitors, final, on=['ASn'], how='inner')
y = merged_data[feature].astype(str)
return y
def cdf_plot(unique_monitors, final, feature, monitors_origin):
"""
:param unique_monitors: Contains the unique AS numbers of RIPE RIS or the AS numbers of RIPE ATLAS probes
:param final: Contains a dataframe combining 4 datasets
:param feature: Is the column name of final
"""
x = final[feature].dropna()
final_cdf = ECDF(x)
plt.plot(final_cdf.x, final_cdf.y, label='All_ASes')
merged_data = pd.merge(unique_monitors, final, on='ASn', how='inner')
merged_data.sort_values('ASn', inplace=True)
merged_data.drop_duplicates(subset='ASn', keep=False, inplace=True)
merged_data.sort_values(feature, inplace=True)
ripe_cdf = ECDF(merged_data[feature].dropna())
plt.plot(ripe_cdf.x, ripe_cdf.y, label=monitors_origin)
plt.ylabel('CDF')
if feature == 'AS_rank_numberAddresses' or feature == 'AS_rank_numberAsns' or feature == 'AS_rank_numberPrefixes' \
or feature == 'AS_rank_peer' or feature == 'AS_rank_provider' or feature == 'AS_rank_total' \
or feature == 'ASn' or feature == 'AS_rank_customer' or feature == 'peeringDB_info_prefixes4' or \
feature == 'peeringDB_info_prefixes6' or feature == 'peeringDB_ix_count' or feature == 'peeringDB_fac_count' \
or feature == 'peeringDB_created':
plt.xscale('log')
else:
plt.xscale('linear')
plt.title('Feature: ' + str(feature), fontsize=14)
plt.legend()
plt.tight_layout()
plt.savefig(str(feature) + str(monitors_origin) + f'.png')
plt.show()
def histogram_plot(unique_monitors, final, feature, monitors_origin):
"""
:param monitors_origin:
:param ripe: Contains the AS numbers of RIPE RIS
:param final: Contains a dataframe combining 3 datasets
:param feature: Is the column name of final
"""
# Without dropna we pass all arguments except one (NaN) and the plots are all wrong
x = final[feature].dropna()
x = x.astype(str)
merged_data = pd.merge(unique_monitors, final, on=['ASn'], how='inner')
y = merged_data[feature].astype(str)
plt.hist((x, y), density=True, bins=final[feature].nunique(), histtype='bar', align='left',
label=['All_ASes', monitors_origin],
color=['blue', 'orange'])
plt.legend(prop={'size': 10})
plt.ylabel('CDF')
plt.ylim(0, 1)
plt.suptitle('Feature: ' + str(feature), fontsize=14)
plt.xticks(rotation='vertical')
plt.tight_layout()
plt.savefig(str(feature) + str(monitors_origin) + f'.png')
plt.show()
def plot_analysis(dataset):
"""
:param dataset: It contains all the datasets that will be plot
"""
final_dataframe = read_final_dataframe()
final_dataframe.rename(columns={'ASN': 'ASn'}, inplace=True)
for dt in dataset:
dd = final_dataframe.copy(deep=True)
if dt == 'Ripe_Ris_monitors':
ripe_df = read_ripe_peers()
type = 'RIPE_RIS_peers'
call_categorize(dd, ripe_df, type)
elif dt == 'Ripe_Atlas_probes':
atlas_df = take_unique_ATLAS_ASNs()
atlas_df.rename(columns={'Atlas_ASN': 'ASn'}, inplace=True)
type = 'RIPE_ATLAS_probes'
call_categorize(dd, atlas_df, type)
elif dt == 'RouteViews_peers':
route_df = take_unique_RouteViews_ASNs()
route_df.rename(columns={'RouteViews_ASn': 'ASn'}, inplace=True)
type = 'RouteViews_peers'
call_categorize(dd, route_df, type)
elif dt == 'Compare_All':
ripe = read_ripe_peers()
atlas = take_unique_ATLAS_ASNs()
atlas.rename(columns={'Atlas_ASN': 'ASn'}, inplace=True)
route = take_unique_RouteViews_ASNs()
route.rename(columns={'RouteViews_ASn': 'ASn'}, inplace=True)
call_categorize_all(dd, ripe, atlas, route)
else:
raise Exception('Not defined type of dataset')
| [
"matplotlib.pyplot.hist",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"datetime.datetime.today",
"pycountry.countries.get",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"pycountry_convert.country_alpha2_to_continent_code",
"pandas.DataFrame",
"matplotlib.pyplot.ylim",
"pycountry_conve... | [((690, 741), 'pandas.read_json', 'pd.read_json', (['PATH_RIPE_RIS_PEERS'], {'typ': '"""dictionary"""'}), "(PATH_RIPE_RIS_PEERS, typ='dictionary')\n", (702, 741), True, 'import pandas as pd\n'), ((895, 952), 'pandas.DataFrame', 'pd.DataFrame', (['list_of_uniques_ripe_peers'], {'columns': "['ASn']"}), "(list_of_uniques_ripe_peers, columns=['ASn'])\n", (907, 952), True, 'import pandas as pd\n'), ((1088, 1131), 'pandas.read_json', 'pd.read_json', (['RIPE_ATLAS_PROBES'], {'lines': '(True)'}), '(RIPE_ATLAS_PROBES, lines=True)\n', (1100, 1131), True, 'import pandas as pd\n'), ((1346, 1365), 'pandas.DataFrame', 'pd.DataFrame', (['union'], {}), '(union)\n', (1358, 1365), True, 'import pandas as pd\n'), ((1563, 1601), 'pandas.read_csv', 'pd.read_csv', (['ROUTEVIEWS_PEERS'], {'sep': '""","""'}), "(ROUTEVIEWS_PEERS, sep=',')\n", (1574, 1601), True, 'import pandas as pd\n'), ((1664, 1682), 'pandas.DataFrame', 'pd.DataFrame', (['set1'], {}), '(set1)\n', (1676, 1682), True, 'import pandas as pd\n'), ((1908, 1973), 'pandas.read_csv', 'pd.read_csv', (['FINAL_DATAFRAME'], {'dtype': "{'prb_id': 'string'}", 'sep': '""","""'}), "(FINAL_DATAFRAME, dtype={'prb_id': 'string'}, sep=',')\n", (1919, 1973), True, 'import pandas as pd\n'), ((8532, 8548), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (8546, 8548), False, 'from datetime import datetime\n'), ((10449, 10497), 'pandas.DataFrame', 'pd.DataFrame', (['temp_list'], {'columns': "['AS_rank_iso']"}), "(temp_list, columns=['AS_rank_iso'])\n", (10461, 10497), True, 'import pandas as pd\n'), ((16671, 16726), 'pandas.merge', 'pd.merge', (['unique_monitors', 'final'], {'on': '"""ASn"""', 'how': '"""inner"""'}), "(unique_monitors, final, on='ASn', how='inner')\n", (16679, 16726), True, 'import pandas as pd\n'), ((16954, 16971), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""CDF"""'], {}), "('CDF')\n", (16964, 16971), True, 'import matplotlib.pyplot as plt\n'), ((17978, 18035), 'pandas.merge', 'pd.merge', (['unique_monitors', 'final'], {'on': "['ASn']", 'how': '"""inner"""'}), "(unique_monitors, final, on=['ASn'], how='inner')\n", (17986, 18035), True, 'import pandas as pd\n'), ((18439, 18446), 'statsmodels.distributions.empirical_distribution.ECDF', 'ECDF', (['x'], {}), '(x)\n', (18443, 18446), False, 'from statsmodels.distributions.empirical_distribution import ECDF\n'), ((18451, 18503), 'matplotlib.pyplot.plot', 'plt.plot', (['final_cdf.x', 'final_cdf.y'], {'label': '"""All_ASes"""'}), "(final_cdf.x, final_cdf.y, label='All_ASes')\n", (18459, 18503), True, 'import matplotlib.pyplot as plt\n'), ((18522, 18577), 'pandas.merge', 'pd.merge', (['unique_monitors', 'final'], {'on': '"""ASn"""', 'how': '"""inner"""'}), "(unique_monitors, final, on='ASn', how='inner')\n", (18530, 18577), True, 'import pandas as pd\n'), ((18805, 18860), 'matplotlib.pyplot.plot', 'plt.plot', (['ripe_cdf.x', 'ripe_cdf.y'], {'label': 'monitors_origin'}), '(ripe_cdf.x, ripe_cdf.y, label=monitors_origin)\n', (18813, 18860), True, 'import matplotlib.pyplot as plt\n'), ((18865, 18882), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""CDF"""'], {}), "('CDF')\n", (18875, 18882), True, 'import matplotlib.pyplot as plt\n'), ((19514, 19526), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (19524, 19526), True, 'import matplotlib.pyplot as plt\n'), ((19531, 19549), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (19547, 19549), True, 'import matplotlib.pyplot as plt\n'), ((19617, 19627), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19625, 19627), True, 'import matplotlib.pyplot as plt\n'), ((20065, 20122), 'pandas.merge', 'pd.merge', (['unique_monitors', 'final'], {'on': "['ASn']", 'how': '"""inner"""'}), "(unique_monitors, final, on=['ASn'], how='inner')\n", (20073, 20122), True, 'import pandas as pd\n'), ((20353, 20382), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'prop': "{'size': 10}"}), "(prop={'size': 10})\n", (20363, 20382), True, 'import matplotlib.pyplot as plt\n'), ((20387, 20404), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""CDF"""'], {}), "('CDF')\n", (20397, 20404), True, 'import matplotlib.pyplot as plt\n'), ((20409, 20423), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (20417, 20423), True, 'import matplotlib.pyplot as plt\n'), ((20486, 20517), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '"""vertical"""'}), "(rotation='vertical')\n", (20496, 20517), True, 'import matplotlib.pyplot as plt\n'), ((20522, 20540), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (20538, 20540), True, 'import matplotlib.pyplot as plt\n'), ((20608, 20618), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20616, 20618), True, 'import matplotlib.pyplot as plt\n'), ((8883, 8909), 're.findall', 're.findall', (['"""\\\\d{4}"""', 'text'], {}), "('\\\\d{4}', text)\n", (8893, 8909), False, 'import re\n'), ((9679, 9726), 'pycountry_convert.country_name_to_country_alpha2', 'pc.country_name_to_country_alpha2', (['country_name'], {}), '(country_name)\n', (9712, 9726), True, 'import pycountry_convert as pc\n'), ((9760, 9811), 'pycountry_convert.country_alpha2_to_continent_code', 'pc.country_alpha2_to_continent_code', (['country_alpha2'], {}), '(country_alpha2)\n', (9795, 9811), True, 'import pycountry_convert as pc\n'), ((9845, 9912), 'pycountry_convert.convert_continent_code_to_continent_name', 'pc.convert_continent_code_to_continent_name', (['country_continent_code'], {}), '(country_continent_code)\n', (9888, 9912), True, 'import pycountry_convert as pc\n'), ((17515, 17532), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (17525, 17532), True, 'import matplotlib.pyplot as plt\n'), ((17551, 17571), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""linear"""'], {}), "('linear')\n", (17561, 17571), True, 'import matplotlib.pyplot as plt\n'), ((19398, 19415), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (19408, 19415), True, 'import matplotlib.pyplot as plt\n'), ((19434, 19454), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""linear"""'], {}), "('linear')\n", (19444, 19454), True, 'import matplotlib.pyplot as plt\n'), ((1310, 1321), 'pandas.notna', 'pd.notna', (['x'], {}), '(x)\n', (1318, 1321), True, 'import pandas as pd\n'), ((9300, 9352), 'pycountry.countries.get', 'pycountry.countries.get', ([], {'alpha_2': "data['AS_rank_iso']"}), "(alpha_2=data['AS_rank_iso'])\n", (9323, 9352), False, 'import pycountry\n'), ((3202, 3339), 'matplotlib.pyplot.hist', 'plt.hist', (['[x, y1, y2, y3]'], {'density': '(True)', 'bins': '(2)', 'histtype': '"""bar"""', 'align': '"""left"""', 'label': "['All_ASes', 'Ripe Ris', 'Atlas', 'RouteViews']"}), "([x, y1, y2, y3], density=True, bins=2, histtype='bar', align=\n 'left', label=['All_ASes', 'Ripe Ris', 'Atlas', 'RouteViews'])\n", (3210, 3339), True, 'import matplotlib.pyplot as plt\n'), ((3389, 3418), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'prop': "{'size': 10}"}), "(prop={'size': 10})\n", (3399, 3418), True, 'import matplotlib.pyplot as plt\n'), ((3431, 3448), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""CDF"""'], {}), "('CDF')\n", (3441, 3448), True, 'import matplotlib.pyplot as plt\n'), ((3461, 3475), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (3469, 3475), True, 'import matplotlib.pyplot as plt\n'), ((3558, 3589), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '"""vertical"""'}), "(rotation='vertical')\n", (3568, 3589), True, 'import matplotlib.pyplot as plt\n'), ((3602, 3620), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3618, 3620), True, 'import matplotlib.pyplot as plt\n'), ((3693, 3703), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3701, 3703), True, 'import matplotlib.pyplot as plt\n'), ((4377, 4406), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'prop': "{'size': 10}"}), "(prop={'size': 10})\n", (4387, 4406), True, 'import matplotlib.pyplot as plt\n'), ((4419, 4436), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""CDF"""'], {}), "('CDF')\n", (4429, 4436), True, 'import matplotlib.pyplot as plt\n'), ((4449, 4463), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (4457, 4463), True, 'import matplotlib.pyplot as plt\n'), ((4546, 4577), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '"""vertical"""'}), "(rotation='vertical')\n", (4556, 4577), True, 'import matplotlib.pyplot as plt\n'), ((4590, 4608), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4606, 4608), True, 'import matplotlib.pyplot as plt\n'), ((4681, 4691), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4689, 4691), True, 'import matplotlib.pyplot as plt\n'), ((5084, 5091), 'statsmodels.distributions.empirical_distribution.ECDF', 'ECDF', (['x'], {}), '(x)\n', (5088, 5091), False, 'from statsmodels.distributions.empirical_distribution import ECDF\n'), ((5104, 5156), 'matplotlib.pyplot.plot', 'plt.plot', (['final_cdf.x', 'final_cdf.y'], {'label': '"""All_ASes"""'}), "(final_cdf.x, final_cdf.y, label='All_ASes')\n", (5112, 5156), True, 'import matplotlib.pyplot as plt\n'), ((5169, 5201), 'matplotlib.pyplot.plot', 'plt.plot', (['x1', 'y1'], {'label': '"""<NAME>"""'}), "(x1, y1, label='<NAME>')\n", (5177, 5201), True, 'import matplotlib.pyplot as plt\n'), ((5214, 5245), 'matplotlib.pyplot.plot', 'plt.plot', (['x2', 'y2'], {'label': '"""Atlas"""'}), "(x2, y2, label='Atlas')\n", (5222, 5245), True, 'import matplotlib.pyplot as plt\n'), ((5258, 5294), 'matplotlib.pyplot.plot', 'plt.plot', (['x3', 'y3'], {'label': '"""RouteViews"""'}), "(x3, y3, label='RouteViews')\n", (5266, 5294), True, 'import matplotlib.pyplot as plt\n'), ((5374, 5386), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5384, 5386), True, 'import matplotlib.pyplot as plt\n'), ((5399, 5417), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5415, 5417), True, 'import matplotlib.pyplot as plt\n'), ((5490, 5500), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5498, 5500), True, 'import matplotlib.pyplot as plt\n'), ((7421, 7477), 'matplotlib.pyplot.bar', 'plt.bar', (['x_1', 'y0_list'], {'label': '"""All ASes"""', 'width': 'bar_width'}), "(x_1, y0_list, label='All ASes', width=bar_width)\n", (7428, 7477), True, 'import matplotlib.pyplot as plt\n'), ((7490, 7546), 'matplotlib.pyplot.bar', 'plt.bar', (['x_2', 'y1_list'], {'label': '"""Ripe Ris"""', 'width': 'bar_width'}), "(x_2, y1_list, label='Ripe Ris', width=bar_width)\n", (7497, 7546), True, 'import matplotlib.pyplot as plt\n'), ((7559, 7612), 'matplotlib.pyplot.bar', 'plt.bar', (['x_3', 'y2_list'], {'label': '"""ATLAS"""', 'width': 'bar_width'}), "(x_3, y2_list, label='ATLAS', width=bar_width)\n", (7566, 7612), True, 'import matplotlib.pyplot as plt\n'), ((7625, 7682), 'matplotlib.pyplot.bar', 'plt.bar', (['x_4', 'y3_list'], {'label': '"""RouteView"""', 'width': 'bar_width'}), "(x_4, y3_list, label='RouteView', width=bar_width)\n", (7632, 7682), True, 'import matplotlib.pyplot as plt\n'), ((7696, 7725), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'prop': "{'size': 10}"}), "(prop={'size': 10})\n", (7706, 7725), True, 'import matplotlib.pyplot as plt\n'), ((7738, 7755), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""CDF"""'], {}), "('CDF')\n", (7748, 7755), True, 'import matplotlib.pyplot as plt\n'), ((7768, 7782), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (7776, 7782), True, 'import matplotlib.pyplot as plt\n'), ((7952, 7970), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7968, 7970), True, 'import matplotlib.pyplot as plt\n'), ((8043, 8053), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8051, 8053), True, 'import matplotlib.pyplot as plt\n'), ((8066, 8077), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8075, 8077), True, 'import matplotlib.pyplot as plt\n')] |
import os, pdb
# ______________________________________NLPDV____________________________________
# _______________________________________________________________________
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from transformers import *
import _pickle as pkl
import shutil
import numpy as np
from tqdm import trange, tqdm
# _______________________________________________________________________
# ______________________________________NLPDV____________________________________
#gpu 0,2 on NLP9 are culprit gpu 3,4 on nlp8
CUDA_VISIBLE_DEVICES = [0,1,3,4,5,6,7]
BASE_DATA_DIR = '/local/rizwan/UDTree/'
run_file = './examples/run_multi_domain_pos.py'
model_type = 'bert'
train_model_name_or_path = 'bert-base-multilingual-cased' # 'bert-large-uncased-whole-word-masking'
do_lower_case = False
num_train_epochs = 4.0
num_eval_epochs = 1.0
per_gpu_eval_batch_size = 32
per_gpu_train_batch_size = 32
learning_rate = 5e-5
max_seq_length = 128
fp16 = True
overwrite_cache = False
evaluate_during_training = True
#batch sizes: 8, 16, 32, 64, 128 (for max seq 128, max batch size is 32)
#learning rates: 3e-4, 1e-4, 5e-5, 3e-5, 2e-5
'''
Runs:
'''
ALL_EVAL_TASKS = [
'UD_ARABIC',
'UD_BASQUE',
'UD_BULGARIAN',
'UD_CATALAN',
'UD_CHINESE',
'UD_CROATIAN',
'UD_CZECH',
'UD_DANISH',
'UD_DUTCH',
'UD_ENGLISH',
'UD_FINNISH',
'UD_FRENCH',
'UD_GERMAN',
'UD_HEBREW',
'UD_HINDI',
'UD_INDONESIAN',
'UD_ITALIAN',
'UD_JAPANESE',
'UD_KOREAN',
'UD_NORWEGIAN',
'UD_PERSIAN',
'UD_POLISH',
'UD_PORTUGUESE',
'UD_ROMANIAN',
'UD_RUSSIAN',
'UD_SERBIAN',
'UD_SLOVAK',
'UD_SLOVENIAN',
'UD_SPANISH',
'UD_SWEDISH',
'UD_TURKISH']
shpley_removals = {
'UD_ARABIC': [0, 3, 7, 8, 11, 13, 16, 21, 29],
'UD_BASQUE': [17, 19],
'UD_BULGARIAN': [3, 19], #[3, 13, 17, 19],
'UD_CATALAN': [ 0, 3, 17, 19, 20],
'UD_CHINESE':[5, 13, 20, 25, 26],
'UD_CROATIAN': [13, 17, 19],
'UD_CZECH': [13, 17, 19],
'UD_DANISH': [13],
'UD_DUTCH': [17,19],
'UD_ENGLISH': [0, 3, 5, 6, 8, 10, 11, 12, 13, 15, 16, 17, 18, 19, 20, 21, 23, 24, 25, 26, 29],
'UD_FINNISH': [13, 17, 19],
'UD_FRENCH': [17],
'UD_GERMAN': [ 17, 19], # Try with [3, 5, 16, 17, 19, 20]
'UD_HEBREW': [17],
'UD_HINDI': [ 0, 17, 19],
'UD_INDONESIAN': [ 0, 13, 17, 19],
'UD_ITALIAN': [ 5, 17, 19, 20],
'UD_JAPANESE': [19],
'UD_KOREAN': [ 0, 13, 19],
'UD_NORWEGIAN': [0, 13, 19],
'UD_PERSIAN': [4, 17, 19],
'UD_POLISH': [13, 17, 19],
'UD_PORTUGUESE': [17],
'UD_ROMANIAN': [ 13, 17, 19],
'UD_RUSSIAN': [ 13, 17, 19],
'UD_SERBIAN': [ 13, 17, 19],
'UD_SLOVAK': [ 13, 17, 19],
'UD_SLOVENIAN': [17],
'UD_SPANISH':[5, 17, 19],
'UD_SWEDISH':[17],
'UD_TURKISH': [13, 17, 19]
}
all_acc_shapley = {eval_task_name:[] for eval_task_name in ALL_EVAL_TASKS}
all_acc_baseline = {eval_task_name:[] for eval_task_name in ALL_EVAL_TASKS}
all_acc_baseline_s = {eval_task_name:[] for eval_task_name in ALL_EVAL_TASKS}
is_tune=True
BASELINES_S = 'baseline-s'
if not is_tune: num_train_epochs=4.0
for eval_task_name in ['UD_FINNISH']:
if len(shpley_removals[eval_task_name])<1: continue
for i in range(1):
seed = 43
np.random.seed(seed)
for is_few_shot in [False]:
best_shapley_learning_rate = None
best_shapley_per_gpu_train_batch_size = None
best_baseline_learning_rate = None
best_baseline_per_gpu_train_batch_size = None
best_baseline_s_learning_rate = None
best_baseline_s_per_gpu_train_batch_size = None
BEST_BASELINE_ACC = None
BEST_SHAPLEY_ACC = None
for is_Shapley in [ BASELINES_S,]:
best_learning_rate = None
best_per_gpu_train_batch_size = None
best_acc = -1
if BEST_BASELINE_ACC and BEST_SHAPLEY_ACC and BEST_BASELINE_ACC > BEST_SHAPLEY_ACC: continue
# _______________________________________________________________________
# ______________________________________NLPDV____________________________________
ALL_BINARY_TASKS = [
'UD_ARABIC',
'UD_BASQUE',
'UD_BULGARIAN',
'UD_CATALAN',
'UD_CHINESE',
'UD_CROATIAN',
'UD_CZECH',
'UD_DANISH',
'UD_DUTCH',
'UD_ENGLISH',
'UD_FINNISH',
'UD_FRENCH',
'UD_GERMAN',
'UD_HEBREW',
'UD_HINDI',
'UD_INDONESIAN',
'UD_ITALIAN',
'UD_JAPANESE',
'UD_KOREAN',
'UD_NORWEGIAN',
'UD_PERSIAN',
'UD_POLISH',
'UD_PORTUGUESE',
'UD_ROMANIAN',
'UD_RUSSIAN',
'UD_SERBIAN',
'UD_SLOVAK',
'UD_SLOVENIAN',
'UD_SPANISH',
'UD_SWEDISH',
'UD_TURKISH']
DOMAIN_TRANSFER = True
# _______________________________________________________________________
# ______________________________________NLPDV____________________________________
if eval_task_name in ALL_BINARY_TASKS: ALL_BINARY_TASKS.remove(eval_task_name)
if is_Shapley==BASELINES_S:
raddom_domains = np.random.choice(np.arange(len(ALL_BINARY_TASKS)), \
len(shpley_removals[eval_task_name]), replace=False)
learning_rates = [ 2e-5, 3e-5, 5e-5]
bz_szs = [ 16, 32]
for learning_rate in learning_rates:
for per_gpu_train_batch_size in bz_szs:
train_task_name = eval_task_name
if is_Shapley=='LOO': train_output_dir = 'temp/' + train_task_name + '_output_LOO_'+str(per_gpu_train_batch_size) + '_'+str(learning_rate) #+str(seed)+'/'
elif is_Shapley==True:
train_output_dir = 'temp/' + train_task_name + '_output_Shapley_'+str(per_gpu_train_batch_size) + '_'+str(learning_rate) #+str(seed)+'/'
elif is_Shapley == BASELINES_S:
train_output_dir = 'temp/' + train_task_name + '_output_baseline-s_' + str(
per_gpu_train_batch_size) + '_' + str(learning_rate) #+str(seed)+'/'
else:
train_output_dir = 'temp/' + train_task_name + '_output_baseline_'+str(per_gpu_train_batch_size) + '_'+str(learning_rate) #+str(seed)+'/'
eval_output_dir = train_output_dir +'/best'
train_data_dir = BASE_DATA_DIR
eval_data_dir = BASE_DATA_DIR
directory = eval_output_dir
if not os.path.exists(train_output_dir) :
os.makedirs(directory)
os.makedirs(os.path.join(directory, 'plots'))
if not os.path.exists(directory) :
os.makedirs(directory)
os.makedirs(os.path.join(directory, 'plots'))
def write_indices_to_delete(indices_to_delete_file_path, ids):
with open(indices_to_delete_file_path, "w") as writer:
print(f"***** Writing ids to {str(indices_to_delete_file_path)} *****", flush=True)
for id in ids:
writer.write("%s " % (id))
indices_to_delete_file_path = directory + '/indices_to_delete_file_path' + '.json'
if is_Shapley == True and eval_task_name != 'UD_TURKISH':
write_indices_to_delete(indices_to_delete_file_path, shpley_removals[eval_task_name])
if is_Shapley == BASELINES_S and eval_task_name != 'UD_TURKISH':
print('-eval_task_name: ', eval_task_name, flush=True)
print('raddom_removal_domains: ', raddom_domains,\
'shapley removals: ', shpley_removals[eval_task_name], flush=True)
write_indices_to_delete(indices_to_delete_file_path, raddom_domains )
# if is_Shapley == False and eval_task_name == 'UD_ENGLISH':
# write_indices_to_delete(indices_to_delete_file_path,\
# [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 28, 29])
# else is_Shapley: continue
run_command = "CUDA_VISIBLE_DEVICES=" + str(CUDA_VISIBLE_DEVICES[0])
for i in CUDA_VISIBLE_DEVICES[1:]:
run_command += ',' + str(i)
run_command += ' python '
if len(CUDA_VISIBLE_DEVICES) > 1: run_command += '-m torch.distributed.launch --nproc_per_node ' \
+ str(len(CUDA_VISIBLE_DEVICES))
run_command += ' ' + run_file + ' ' + ' --model_type ' + model_type + \
' --max_seq_length ' + str(max_seq_length) + ' --per_gpu_eval_batch_size=' + str(
per_gpu_eval_batch_size) + \
' --per_gpu_train_batch_size=' + str(per_gpu_train_batch_size) + ' --learning_rate ' + str(learning_rate) \
+ ' --overwrite_output_dir '
if do_lower_case: run_command += '--do_lower_case '
if fp16: run_command += ' --fp16 '
if overwrite_cache:
run_command += ' --overwrite_cache '
if evaluate_during_training: run_command += ' --evaluate_during_training '
# For training:
train_run_command = run_command + ' --do_train --task_name ' + train_task_name + \
' --data_dir ' + train_data_dir + ' --output_dir ' + \
train_output_dir + ' --model_name_or_path ' + train_model_name_or_path
if is_Shapley: train_run_command += ' --indices_to_delete_file_path ' + indices_to_delete_file_path
if is_few_shot : train_run_command += ' --is_few_shot'
command = train_run_command + ' --num_train_epochs 1'
print(command, flush=True)
if not os.path.exists(os.path.join(eval_output_dir,"pytorch_model.bin")):
os.system(command)
# initial Eval on whole dataset
# For eval:
run_command = "CUDA_VISIBLE_DEVICES=" + str(CUDA_VISIBLE_DEVICES[0])
run_command += ' python '
run_command += ' ' + run_file + ' ' + ' --model_type ' + model_type + \
' --max_seq_length ' + str(max_seq_length) + ' --per_gpu_eval_batch_size=' + str(
per_gpu_eval_batch_size) + \
' --per_gpu_train_batch_size=' + str(
per_gpu_train_batch_size) + ' --learning_rate ' + str(learning_rate) \
+ ' --overwrite_output_dir '
if do_lower_case: run_command += '--do_lower_case '
if fp16: run_command += ' --fp16 '
if overwrite_cache:
run_command += ' --overwrite_cache '
if evaluate_during_training: run_command += ' --evaluate_during_training '
eval_run_command = run_command + ' --do_eval --task_name ' + eval_task_name + \
' --data_dir ' + eval_data_dir + ' --output_dir ' + eval_output_dir + \
' --model_name_or_path ' + eval_output_dir
command = eval_run_command
print(command, flush=True)
os.system(command)
try:
output_eval_file = os.path.join(eval_output_dir, "eval_results.txt")
with open(output_eval_file, "r") as reader:
for line in reader:
line = line.strip().split()
key = line[0]
value = line[-1]
if key in ['acc']:
acc = float(value)
except:
acc = 0
print('-'*100, flush=True)
print("Task: ", train_task_name, flush=True)
print("learning_rate: ", learning_rate, flush=True)
print("per_gpu_train_batch_size: ", per_gpu_train_batch_size, flush=True)
print("Acc: ", acc, flush=True)
print("Shapely: ", str(is_Shapley), flush=True)
print('-'*100, flush=True)
if is_Shapley==True:
all_acc_shapley[eval_task_name].append(acc)
elif is_Shapley==False:
all_acc_baseline[eval_task_name].append(acc)
else:
all_acc_baseline_s[eval_task_name].append(acc)
if acc>best_acc:
best_per_gpu_train_batch_size = per_gpu_train_batch_size
best_learning_rate = learning_rate
best_acc=acc
print('-'*100, flush=True)
print('-Task: ', eval_task_name, flush=True)
print('-is_Shapley: ', is_Shapley, flush=True)
print('-best lr: ', best_learning_rate, '\n-bz sz: ', best_per_gpu_train_batch_size, \
'\n-best acc: ', best_acc, '\n-all_acc_shapley: ', all_acc_shapley, \
'\n-all_acc_shapley_baseline_s: ',all_acc_baseline_s,'\n- all_acc_baseline: ', all_acc_baseline, flush=True)
print('-'*100, flush=True)
# For Test:
train_task_name = eval_task_name
if is_Shapley == 'LOO':
train_output_dir = 'temp/' + train_task_name + '_output_LOO_' + str(
best_per_gpu_train_batch_size) + '_' + str(
best_learning_rate) # +str(seed)+'/'
elif is_Shapley == True:
train_output_dir = 'temp/' + train_task_name + '_output_Shapley_' + str(
best_per_gpu_train_batch_size) + '_' + str(best_learning_rate) # +str(seed)+'/'
elif is_Shapley == BASELINES_S:
train_output_dir = 'temp/' + train_task_name + '_output_baseline-s_' + str(
best_per_gpu_train_batch_size) + '_' + str(best_learning_rate) # +str(seed)+'/'
else:
train_output_dir = 'temp/' + train_task_name + '_output_baseline_' + str(
best_per_gpu_train_batch_size) + '_' + str(best_learning_rate) # +str(seed)+'/'
eval_output_dir = train_output_dir + '/best/'
run_command = "CUDA_VISIBLE_DEVICES=" + str(CUDA_VISIBLE_DEVICES[0])
for i in CUDA_VISIBLE_DEVICES[1:]:
run_command += ',' + str(i)
run_command += ' python '
if len(CUDA_VISIBLE_DEVICES) > 1: run_command += '-m torch.distributed.launch --nproc_per_node ' \
+ str(len(CUDA_VISIBLE_DEVICES))
run_command += ' ' + run_file + ' ' + ' --model_type ' + model_type + \
' --max_seq_length ' + str(max_seq_length) + ' --per_gpu_eval_batch_size=' + str(
per_gpu_eval_batch_size) + \
' --per_gpu_train_batch_size=' + str(
best_per_gpu_train_batch_size) + ' --learning_rate ' + str(
best_learning_rate) \
+ ' --overwrite_output_dir '
if do_lower_case: run_command += '--do_lower_case '
if fp16: run_command += ' --fp16 '
if overwrite_cache:
run_command += ' --overwrite_cache '
if evaluate_during_training: run_command += ' --evaluate_during_training '
train_run_command = run_command + ' --do_train --task_name ' + train_task_name + \
' --data_dir ' + train_data_dir + ' --output_dir ' + \
train_output_dir + ' --model_name_or_path ' + train_model_name_or_path
# For eval:
run_command = "CUDA_VISIBLE_DEVICES=" + str(CUDA_VISIBLE_DEVICES[0])
run_command += ' python '
run_command += ' ' + run_file + ' ' + ' --model_type ' + model_type + \
' --max_seq_length ' + str(max_seq_length) + ' --per_gpu_eval_batch_size=' + str(
per_gpu_eval_batch_size) + \
' --per_gpu_train_batch_size=' + str(
best_per_gpu_train_batch_size) + ' --learning_rate ' + str(
best_learning_rate) \
+ ' --overwrite_output_dir '
if do_lower_case: run_command += '--do_lower_case '
if fp16: run_command += ' --fp16 '
if overwrite_cache:
run_command += ' --overwrite_cache '
eval_run_command = run_command + ' --do_predict --task_name ' + eval_task_name + \
' --data_dir ' + eval_data_dir + ' --output_dir ' + eval_output_dir + \
' --model_name_or_path ' + eval_output_dir
indices_to_delete_file_path = eval_output_dir + '/indices_to_delete_file_path' + '.json'
if is_Shapley: train_run_command += ' --indices_to_delete_file_path ' + indices_to_delete_file_path
command = train_run_command + ' --num_train_epochs ' + str(num_train_epochs)
print(command, flush=True)
os.system(command)
# initial Eval on whole dataset
command = eval_run_command
print(command, flush=True)
os.system(command)
output_eval_file = os.path.join(eval_output_dir, "eval_results.txt")
with open(output_eval_file, "r") as reader:
for line in reader:
line = line.strip().split()
key = line[0]
value = line[-1]
if key in ['acc']:
acc = float(value)
print('-' * 100, flush=True)
print("Task: ", train_task_name, flush=True)
print("best_learning_rate: ", best_learning_rate, flush=True)
print("best_per_gpu_train_batch_size: ", best_per_gpu_train_batch_size, flush=True)
print("BEST TEST Acc: ", acc, flush=True)
print("Shapely: ", str(is_Shapley), flush=True)
print('-' * 100, flush=True)
if is_Shapley==True:
best_shapley_learning_rate = best_learning_rate
best_shapley_per_gpu_train_batch_size = best_per_gpu_train_batch_size
BEST_SHAPLEY_ACC = acc
elif is_Shapley==BASELINES_S:
best_baseline_s_learning_rate = best_learning_rate
best_baseline_s_per_gpu_train_batch_size = best_per_gpu_train_batch_size
else:
best_baseline_learning_rate = best_learning_rate
best_baseline_per_gpu_train_batch_size = best_per_gpu_train_batch_size
BEST_BASELINE_ACC = acc
best_shapley_dir = 'temp/'+eval_task_name+'_output_Shapley_'+str(best_shapley_per_gpu_train_batch_size)+'_'+\
str(best_shapley_learning_rate)+'/best/'
gold = best_shapley_dir+'test_gold.txt'
shapley = best_shapley_dir+'test_predictions.txt'
baseline = 'temp/'+eval_task_name+'_output_baseline_'+str(best_baseline_per_gpu_train_batch_size)+'_'+\
str(best_baseline_learning_rate)+'/best/'+'test_predictions.txt'
baseline_s = 'temp/'+eval_task_name+'_output_baseline-s_'+str(best_baseline_s_per_gpu_train_batch_size)+'_'+\
str(best_baseline_s_learning_rate)+'/best/'+'test_predictions.txt'
print('-'*100, flush=True)
print('Boostrap paired test of Shapley woth baseline!', flush=True)
command = "python script_t_test.py "+ gold + ' '+ shapley + ' ' + baseline
print(command, flush=True)
print('-' * 50, flush=True)
os.system(command)
print('-' * 50, flush=True)
print('-' * 50, flush=True)
print('Boostrap paired test of Shapley woth baseline-s!', flush=True)
command = "python script_t_test.py " + gold + ' ' + shapley + ' ' + baseline_s
print(command, flush=True)
print('-' * 50, flush=True)
os.system(command)
print('-' * 100, flush=True)
| [
"os.path.exists",
"os.makedirs",
"matplotlib.use",
"os.path.join",
"numpy.random.seed",
"os.system"
] | [((191, 212), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (205, 212), False, 'import matplotlib\n'), ((3438, 3458), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (3452, 3458), True, 'import numpy as np\n'), ((22098, 22116), 'os.system', 'os.system', (['command'], {}), '(command)\n', (22107, 22116), False, 'import os, pdb\n'), ((22461, 22479), 'os.system', 'os.system', (['command'], {}), '(command)\n', (22470, 22479), False, 'import os, pdb\n'), ((19369, 19387), 'os.system', 'os.system', (['command'], {}), '(command)\n', (19378, 19387), False, 'import os, pdb\n'), ((19540, 19558), 'os.system', 'os.system', (['command'], {}), '(command)\n', (19549, 19558), False, 'import os, pdb\n'), ((19595, 19644), 'os.path.join', 'os.path.join', (['eval_output_dir', '"""eval_results.txt"""'], {}), "(eval_output_dir, 'eval_results.txt')\n", (19607, 19644), False, 'import os, pdb\n'), ((12973, 12991), 'os.system', 'os.system', (['command'], {}), '(command)\n', (12982, 12991), False, 'import os, pdb\n'), ((7397, 7429), 'os.path.exists', 'os.path.exists', (['train_output_dir'], {}), '(train_output_dir)\n', (7411, 7429), False, 'import os, pdb\n'), ((7460, 7482), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (7471, 7482), False, 'import os, pdb\n'), ((7589, 7614), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (7603, 7614), False, 'import os, pdb\n'), ((7645, 7667), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (7656, 7667), False, 'import os, pdb\n'), ((11422, 11440), 'os.system', 'os.system', (['command'], {}), '(command)\n', (11431, 11440), False, 'import os, pdb\n'), ((13069, 13118), 'os.path.join', 'os.path.join', (['eval_output_dir', '"""eval_results.txt"""'], {}), "(eval_output_dir, 'eval_results.txt')\n", (13081, 13118), False, 'import os, pdb\n'), ((7523, 7555), 'os.path.join', 'os.path.join', (['directory', '"""plots"""'], {}), "(directory, 'plots')\n", (7535, 7555), False, 'import os, pdb\n'), ((7708, 7740), 'os.path.join', 'os.path.join', (['directory', '"""plots"""'], {}), "(directory, 'plots')\n", (7720, 7740), False, 'import os, pdb\n'), ((11342, 11392), 'os.path.join', 'os.path.join', (['eval_output_dir', '"""pytorch_model.bin"""'], {}), "(eval_output_dir, 'pytorch_model.bin')\n", (11354, 11392), False, 'import os, pdb\n')] |
# -*- coding: utf-8 -*-
from azureml.core import Environment, Experiment, ScriptRunConfig, Workspace
from azureml.core.conda_dependencies import CondaDependencies
def main():
# Create a Python environment for the experiment
# env = Environment("experiment_test_env")
env = Environment("experiment-test-MLFlow-env")
# Ensure the required packages are installed
# (here pip and Azure ML defaults)
packages = CondaDependencies.create(conda_packages=['pip'],
pip_packages=['mlflow', 'azureml-mlflow'])
# pip_packages=['azureml-defaults'])
env.python.conda_dependencies = packages
# Create a script config
experiment_folder = './src/azure'
script_config = ScriptRunConfig(source_directory=experiment_folder,
script='azure_test_experiment_script_MLFlow.py',
# script='azure_test_experiment_script.py',
environment=env)
# Load the workspace from the saved config file
ws = Workspace.from_config()
print('Ready to use Azure ML to work with {}'.format(ws.name))
# Create and submit the experiment
experiment = Experiment(workspace=ws, name='test-experiment-MLFlow') # name='test-experiment')
run = experiment.submit(config=script_config)
run.wait_for_completion()
if __name__ == '__main__':
main()
| [
"azureml.core.Workspace.from_config",
"azureml.core.Experiment",
"azureml.core.conda_dependencies.CondaDependencies.create",
"azureml.core.Environment",
"azureml.core.ScriptRunConfig"
] | [((287, 328), 'azureml.core.Environment', 'Environment', (['"""experiment-test-MLFlow-env"""'], {}), "('experiment-test-MLFlow-env')\n", (298, 328), False, 'from azureml.core import Environment, Experiment, ScriptRunConfig, Workspace\n'), ((434, 529), 'azureml.core.conda_dependencies.CondaDependencies.create', 'CondaDependencies.create', ([], {'conda_packages': "['pip']", 'pip_packages': "['mlflow', 'azureml-mlflow']"}), "(conda_packages=['pip'], pip_packages=['mlflow',\n 'azureml-mlflow'])\n", (458, 529), False, 'from azureml.core.conda_dependencies import CondaDependencies\n'), ((776, 898), 'azureml.core.ScriptRunConfig', 'ScriptRunConfig', ([], {'source_directory': 'experiment_folder', 'script': '"""azure_test_experiment_script_MLFlow.py"""', 'environment': 'env'}), "(source_directory=experiment_folder, script=\n 'azure_test_experiment_script_MLFlow.py', environment=env)\n", (791, 898), False, 'from azureml.core import Environment, Experiment, ScriptRunConfig, Workspace\n'), ((1112, 1135), 'azureml.core.Workspace.from_config', 'Workspace.from_config', ([], {}), '()\n', (1133, 1135), False, 'from azureml.core import Environment, Experiment, ScriptRunConfig, Workspace\n'), ((1260, 1315), 'azureml.core.Experiment', 'Experiment', ([], {'workspace': 'ws', 'name': '"""test-experiment-MLFlow"""'}), "(workspace=ws, name='test-experiment-MLFlow')\n", (1270, 1315), False, 'from azureml.core import Environment, Experiment, ScriptRunConfig, Workspace\n')] |
import pandas as pd
import time
import sys
class AverageMeter(object):
"""Sum values to compute the mean."""
def __init__(self):
self.reset()
def reset(self):
self.count = 0
self.sum = 0
def update(self, val):
self.count += 1
self.sum += val
def average(self):
return self.sum / self.count
class MetricsLogger(object):
"""Track the values of different metrics, display their average values and save them
in a log file."""
def __init__(self):
self.log_df = pd.DataFrame()
self.n = 0
self.reset()
def reset(self):
"""Reset the metrics average meters."""
# Save last epoch average value of metrics (unless it's the first epoch)
if self.n > 0:
self.log_df = self.log_df.append(self._average(), ignore_index=True)
self.n += 1
self.avgmeters = {}
self.last_update = time.time()
def update(self, values, show=True):
"""Update metrics values and display their current average if show is True."""
for key in values:
if key not in self.avgmeters:
self.avgmeters[key] = AverageMeter()
self.avgmeters[key].update(values[key])
if show:
self._show()
def save_log(self, path):
"""Save log dataframe to path."""
self.log_df.to_csv(path, index=False)
def _average(self):
return {key: self.avgmeters[key].average() for key in self.avgmeters}
def __str__(self):
avg = self._average()
str_list = [key + ": %.4f" % round(avg[key], 4) for key in avg]
return (" - ").join(str_list)
def _show(self, t_thresh=0.1):
t_now = time.time()
if t_now - self.last_update >= t_thresh:
sys.stdout.write("\r" + str(self))
sys.stdout.flush()
self.last_update = t_now
| [
"pandas.DataFrame",
"sys.stdout.flush",
"time.time"
] | [((577, 591), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (589, 591), True, 'import pandas as pd\n'), ((966, 977), 'time.time', 'time.time', ([], {}), '()\n', (975, 977), False, 'import time\n'), ((1802, 1813), 'time.time', 'time.time', ([], {}), '()\n', (1811, 1813), False, 'import time\n'), ((1922, 1940), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1938, 1940), False, 'import sys\n')] |
import unittest
from sys import argv
import numpy as np
import torch
from objective.logistic import Logistic_Gradient
from .utils import Container, assert_all_close, assert_all_close_dict
class TestObj_Logistic_Gradient(unittest.TestCase):
def setUp(self):
np.random.seed(1234)
torch.manual_seed(1234)
n_features = 3
n_samples = 5
n_classes = 7
mu = 0.02
self.hparams = Container(n_classes=n_classes,
n_features=n_features,
n_samples=n_samples,
mu=mu)
self.w = torch.randn(n_features, n_classes, requires_grad=True)
self.x = torch.randn(n_samples, n_features)
self.y = torch.randn(n_samples).long()
self.obj = Logistic_Gradient(self.hparams)
def test_error(self):
error_test = self.obj.task_error(self.w, self.x, self.y)
error_ref = torch.tensor(2.9248)
assert_all_close(error_test, error_ref, "task_error returned value")
def test_oracle(self):
oracle_info_test = self.obj.oracle(self.w, self.x, self.y)
oracle_info_ref = {
'dw': torch.tensor([[ 0.2578, -0.1417, 0.0046, -0.1236, -0.0180, 0.0249, -0.0273],
[-0.3585, 0.1889, -0.0937, 0.0522, 0.0100, 0.1239, 0.0620],
[-0.2921, 0.2251, -0.1870, 0.1791, 0.0171, 0.0109, -0.0156]]),
'obj': torch.tensor(3.1189)}
assert_all_close_dict(oracle_info_ref, oracle_info_test, "oracle returned info")
if __name__ == '__main__':
unittest.main(argv=argv)
| [
"torch.manual_seed",
"objective.logistic.Logistic_Gradient",
"torch.tensor",
"numpy.random.seed",
"unittest.main",
"torch.randn"
] | [((1628, 1652), 'unittest.main', 'unittest.main', ([], {'argv': 'argv'}), '(argv=argv)\n', (1641, 1652), False, 'import unittest\n'), ((273, 293), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (287, 293), True, 'import numpy as np\n'), ((302, 325), 'torch.manual_seed', 'torch.manual_seed', (['(1234)'], {}), '(1234)\n', (319, 325), False, 'import torch\n'), ((634, 688), 'torch.randn', 'torch.randn', (['n_features', 'n_classes'], {'requires_grad': '(True)'}), '(n_features, n_classes, requires_grad=True)\n', (645, 688), False, 'import torch\n'), ((706, 740), 'torch.randn', 'torch.randn', (['n_samples', 'n_features'], {}), '(n_samples, n_features)\n', (717, 740), False, 'import torch\n'), ((807, 838), 'objective.logistic.Logistic_Gradient', 'Logistic_Gradient', (['self.hparams'], {}), '(self.hparams)\n', (824, 838), False, 'from objective.logistic import Logistic_Gradient\n'), ((951, 971), 'torch.tensor', 'torch.tensor', (['(2.9248)'], {}), '(2.9248)\n', (963, 971), False, 'import torch\n'), ((1190, 1391), 'torch.tensor', 'torch.tensor', (['[[0.2578, -0.1417, 0.0046, -0.1236, -0.018, 0.0249, -0.0273], [-0.3585, \n 0.1889, -0.0937, 0.0522, 0.01, 0.1239, 0.062], [-0.2921, 0.2251, -0.187,\n 0.1791, 0.0171, 0.0109, -0.0156]]'], {}), '([[0.2578, -0.1417, 0.0046, -0.1236, -0.018, 0.0249, -0.0273],\n [-0.3585, 0.1889, -0.0937, 0.0522, 0.01, 0.1239, 0.062], [-0.2921, \n 0.2251, -0.187, 0.1791, 0.0171, 0.0109, -0.0156]])\n', (1202, 1391), False, 'import torch\n'), ((1484, 1504), 'torch.tensor', 'torch.tensor', (['(3.1189)'], {}), '(3.1189)\n', (1496, 1504), False, 'import torch\n'), ((758, 780), 'torch.randn', 'torch.randn', (['n_samples'], {}), '(n_samples)\n', (769, 780), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-31 22:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lemlit', '0012_remove_suratizinpenelitianmahasiswa_dosen'),
]
operations = [
migrations.AddField(
model_name='suratizinpenelitianmahasiswa',
name='nama_instansi',
field=models.CharField(default='Nama Kantor', max_length=80),
preserve_default=False,
),
migrations.AddField(
model_name='suratizinpenelitianmahasiswa',
name='nomor_surat',
field=models.CharField(default='xxx/LEMLIT-UNISAN/GTO/VIII/2017', max_length=50, unique=True),
preserve_default=False,
),
migrations.AddField(
model_name='suratizinpenelitianmahasiswa',
name='tujuan_surat',
field=models.CharField(default='Gorontalo', max_length=20),
preserve_default=False,
),
]
| [
"django.db.models.CharField"
] | [((450, 504), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""Nama Kantor"""', 'max_length': '(80)'}), "(default='Nama Kantor', max_length=80)\n", (466, 504), False, 'from django.db import migrations, models\n'), ((687, 778), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""xxx/LEMLIT-UNISAN/GTO/VIII/2017"""', 'max_length': '(50)', 'unique': '(True)'}), "(default='xxx/LEMLIT-UNISAN/GTO/VIII/2017', max_length=50,\n unique=True)\n", (703, 778), False, 'from django.db import migrations, models\n'), ((958, 1010), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""Gorontalo"""', 'max_length': '(20)'}), "(default='Gorontalo', max_length=20)\n", (974, 1010), False, 'from django.db import migrations, models\n')] |
from datasets.dataset_processors import ExtendedDataset
from models.model import IdentificationModel, ResNet50
from models.siamese import SiameseNet, MssNet
from base import BaseExecutor
from utils.utilities import type_error_msg, value_error_msg, timer, load_model
import torch
from torch.utils.data import DataLoader
from torchvision import transforms
from configparser import ConfigParser
from os import makedirs
from os.path import join, exists, dirname
from scipy.io import savemat
import numpy as np
class Tester(BaseExecutor):
"""A general tester for all.
Args:
config (ConfigParser): The ConfigParser which reads setting files.
name (str): A name defined in base.py.
dataset (str): A dataset defined in base.py.
model (str): A model defined in base.py.
epoch (int): The epoch of the saved trained model for testing.
scene (str): A scene defined in base.py.
Attributes:
test_path (str): Path to save features/labels/cams.
"""
DEFAULT_BATCH_SIZE = 64
DEFAULT_NUM_WORKER = 8
def __init__(self, config, name, dataset, model, epoch: int, scene):
if not isinstance(name, Tester.Name):
if isinstance(name, str):
if not name.islower():
name = name.lower()
if name not in Tester.NAME_LIST:
raise ValueError(value_error_msg('name', name, Tester.NAME_LIST))
name = Tester.Name(name)
else:
raise TypeError(type_error_msg('name', name, [Tester.Name, str]))
if not isinstance(dataset, Tester.Dataset):
if isinstance(dataset, str):
if not dataset.islower():
dataset = dataset.lower()
if dataset not in Tester.DATASET_LIST:
raise ValueError(value_error_msg('dataset', dataset, Tester.DATASET_LIST))
dataset = Tester.Dataset(dataset)
else:
raise TypeError(type_error_msg('dataset', dataset, [Tester.Dataset, str]))
if not isinstance(model, Tester.Model):
if isinstance(model, str):
if not model.islower():
model = model.lower()
if model not in Tester.MODEL_LIST:
raise ValueError(value_error_msg('model', model, Tester.MODEL_LIST))
model = Tester.Model(model)
else:
raise TypeError(type_error_msg('model', model, [Tester.MODEL_LIST, str]))
if not isinstance(scene, Tester.Scene):
if isinstance(scene, str):
if not scene.islower():
scene = scene.lower()
if scene not in Tester.SCENE_LIST:
raise ValueError(value_error_msg('scene', scene, Tester.SCENE_LIST))
scene = Tester.Scene(scene)
else:
raise TypeError(type_error_msg('scene', scene, [Tester.SCENE_LIST, str]))
if not isinstance(epoch, int):
raise TypeError(type_error_msg('epoch', epoch, [int]))
if not epoch >= 0:
raise ValueError(value_error_msg('epoch', epoch, 'epoch >= 0'))
self.name = name
self.dataset = dataset
self.model = model
self.scene = scene
self.config = config
self.train_class = config.getint(self.name.value, 'train_class')
# initialize model
model_name = self.model.value
if self.model == Tester.Model.MSSNET:
self.model = MssNet(self.config)
elif self.model == Tester.Model.RESNET50:
self.model = ResNet50(self.config, self.train_class, False)
# else:
# raise ValueError(value_error_msg('model', model, Tester.MODEL_LIST))
transform_list = []
if self.name == Tester.Name.MARKET1501:
transform_list = [
# transforms.Resize((160, 64)),
# transforms.Pad(10),
# transforms.RandomCrop((160, 64)),
# transforms.RandomHorizontalFlip(),
# transforms.ToTensor()
transforms.Resize((256, 128), interpolation=3),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]
self.dataset_type = ['gallery', 'query']
if self.scene == Tester.Scene.MULTI_SHOT:
self.dataset_type.append('multi_query')
# prepare datasets
if self.dataset == Tester.Dataset.EXTENDED:
self.dataset = {}
for item in self.dataset_type:
self.dataset[item] = ExtendedDataset(self.name.value,
join(self.config[self.name.value]['dataset_dir'], item),
transforms.Compose(transform_list))
else:
raise ValueError(value_error_msg('dataset', dataset, Tester.Dataset.EXTENDED))
# load weights
load_model(self.model, self.config[self.name.value]['model_format'] % (model_name, epoch))
if isinstance(self.model, IdentificationModel):
self.model.set_to_test()
self.test_path = self.config[self.name.value]['test_path'] % self.scene.value
@timer
def run(self):
"""
Reads: A pth file of model's state dict.
Processes: Computes the features of gallery and query imgs.
Writes: A mat file of saved gallery and query info.
"""
Tester.run_info(self.__class__.__name__, self.scene.value)
self.model.eval() # for batch norm
test_dict = {}
dataloader = {}
with torch.no_grad():
if self.scene == Tester.Scene.SINGLE_SHOT:
for item in self.dataset_type:
dataloader[item] = DataLoader(self.dataset[item],
num_workers=Tester.DEFAULT_NUM_WORKER,
batch_size=Tester.DEFAULT_BATCH_SIZE)
test_dict[item + '_feature'] = Tester.normalize(self.extract_feature(dataloader[item]))
test_dict[item + '_label'] = self.dataset[item].ids
test_dict[item + '_cam'] = self.dataset[item].cams
elif self.scene == Tester.Scene.MULTI_SHOT:
item = 'gallery'
dataloader[item] = DataLoader(self.dataset[item], num_workers=Tester.DEFAULT_NUM_WORKER,
batch_size=Tester.DEFAULT_BATCH_SIZE)
test_dict[item + '_feature'] = Tester.normalize(self.extract_feature(dataloader[item]))
test_dict[item + '_label'] = self.dataset[item].ids
test_dict[item + '_cam'] = self.dataset[item].cams
item = 'multi_query' # no need to save multi_query features into dict
dataloader[item] = DataLoader(self.dataset[item], num_workers=Tester.DEFAULT_NUM_WORKER,
batch_size=Tester.DEFAULT_BATCH_SIZE)
multi_query_feature = self.extract_feature(dataloader[item]) # no normalization
multi_query_label = self.dataset[item].ids
multi_query_cam = self.dataset[item].cams
item = 'query'
test_dict[item + '_label'] = self.dataset[item].ids
test_dict[item + '_cam'] = self.dataset[item].cams
test_dict[item + '_feature'] = Tester.normalize_numpy(Tester.mean_feature(multi_query_feature,
np.asarray(multi_query_label),
np.asarray(multi_query_cam),
np.asarray(test_dict['query_label']),
np.asarray(test_dict['query_cam']),
test_dict))
test_dir = dirname(self.test_path)
if not exists(test_dir):
makedirs(test_dir)
# WARNING: save test.mat will trigger overwrite if test.mat is already exists
savemat(self.test_path, test_dict)
@staticmethod
def mean_feature(mquery_feature, mquery_label, mquery_cam, query_label, query_cam, dictionary):
"""Averages multi query feature to get (mean) query feature.
Args:
mquery_feature (np.ndarray): The feature of multi query imgs, shape(#multi_query, embedding_dim).
mquery_label (np.ndarray): The people labels of multi query imgs, an 1d int array, shape(#multi_query).
mquery_cam (np.ndarray): The camera labels of multi query imgs, an 1d int array, shape(#multi_query).
query_label (np.ndarray): The people labels of query imgs, an 1d int array, shape(#query).
query_cam (np.ndarray): The camera labels of query imgs, an 1d int array, shape(#query).
dictionary (dict): A mutable dictionary for adding
{'multi_index': [index_array1, index_array2, ...]} (Implicit returns).
Returns:
query_feature (ndarray): The mean feature of mquery_feature.
"""
query_feature = []
multi_index = []
for i in range(len(query_label)):
label_mask = mquery_label == query_label[i]
cam_mask = mquery_cam == query_cam[i]
index = np.flatnonzero(label_mask & cam_mask)
multi_index.append(index)
query_feature.append(np.mean(mquery_feature[index, :], axis=0))
dictionary['multi_index'] = multi_index
return np.asarray(query_feature)
@staticmethod
def flip_lr(img: torch.Tensor):
"""Flips image tensor horizontally.
Args:
img (torch.Tensor): The original image tensor.
Returns:
img_flip (torch.Tensor): The flipped image tensor.
"""
inv_idx = torch.arange(img.size(3) - 1, -1, -1) # N x C x H x W
img_flip = img.index_select(3, inv_idx)
return img_flip
@staticmethod
def normalize(x: torch.Tensor):
"""Normalizes the 2d torch tensor.
Args:
x (torch.Tensor): in 2d.
Returns:
normalized_x (torch.Tensor): in 2d.
"""
xnorm = torch.norm(x, p=2, dim=1, keepdim=True)
return x.div(xnorm.expand_as(x))
@staticmethod
def normalize_numpy(x: np.ndarray): # 25% faster than normalize with torch above
"""Normalizes the 2d numpy array.
Args:
x (np.ndarray): in 2d.
Returns:
normalized_x (np.ndarray): in 2d.
"""
xnorm = np.linalg.norm(x, axis=1, keepdims=True)
return x / np.repeat(xnorm, x.shape[1]).reshape(x.shape)
def extract_feature(self, dataloader):
"""Extracts feature in batches.
Args:
dataloader (torch.utils.data.DataLoader): Initialized dataloader.
Returns:
feature (np.ndarray): shape(#gallery/query/multi_query, embedding_dim).
"""
feature = []
if isinstance(self.model, SiameseNet):
for i, data in enumerate(dataloader):
batch_feature = self.model.forward_once(data)
feature.append(batch_feature)
elif isinstance(self.model, IdentificationModel):
for i, data in enumerate(dataloader):
print(i * Tester.DEFAULT_BATCH_SIZE)
batch_feature = self.model.forward(data)
data = Tester.flip_lr(data)
batch_feature += self.model.forward(data)
feature.append(batch_feature)
return torch.cat(feature, 0).numpy()
| [
"scipy.io.savemat",
"models.siamese.MssNet",
"numpy.linalg.norm",
"utils.utilities.load_model",
"os.path.exists",
"numpy.mean",
"numpy.repeat",
"numpy.flatnonzero",
"numpy.asarray",
"torchvision.transforms.ToTensor",
"os.path.dirname",
"torch.norm",
"models.model.ResNet50",
"torchvision.tr... | [((5047, 5142), 'utils.utilities.load_model', 'load_model', (['self.model', "(self.config[self.name.value]['model_format'] % (model_name, epoch))"], {}), "(self.model, self.config[self.name.value]['model_format'] % (\n model_name, epoch))\n", (5057, 5142), False, 'from utils.utilities import type_error_msg, value_error_msg, timer, load_model\n'), ((8145, 8168), 'os.path.dirname', 'dirname', (['self.test_path'], {}), '(self.test_path)\n', (8152, 8168), False, 'from os.path import join, exists, dirname\n'), ((8327, 8361), 'scipy.io.savemat', 'savemat', (['self.test_path', 'test_dict'], {}), '(self.test_path, test_dict)\n', (8334, 8361), False, 'from scipy.io import savemat\n'), ((9813, 9838), 'numpy.asarray', 'np.asarray', (['query_feature'], {}), '(query_feature)\n', (9823, 9838), True, 'import numpy as np\n'), ((10496, 10535), 'torch.norm', 'torch.norm', (['x'], {'p': '(2)', 'dim': '(1)', 'keepdim': '(True)'}), '(x, p=2, dim=1, keepdim=True)\n', (10506, 10535), False, 'import torch\n'), ((10867, 10907), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {'axis': '(1)', 'keepdims': '(True)'}), '(x, axis=1, keepdims=True)\n', (10881, 10907), True, 'import numpy as np\n'), ((3554, 3573), 'models.siamese.MssNet', 'MssNet', (['self.config'], {}), '(self.config)\n', (3560, 3573), False, 'from models.siamese import SiameseNet, MssNet\n'), ((5727, 5742), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5740, 5742), False, 'import torch\n'), ((8184, 8200), 'os.path.exists', 'exists', (['test_dir'], {}), '(test_dir)\n', (8190, 8200), False, 'from os.path import join, exists, dirname\n'), ((8214, 8232), 'os.makedirs', 'makedirs', (['test_dir'], {}), '(test_dir)\n', (8222, 8232), False, 'from os import makedirs\n'), ((9598, 9635), 'numpy.flatnonzero', 'np.flatnonzero', (['(label_mask & cam_mask)'], {}), '(label_mask & cam_mask)\n', (9612, 9635), True, 'import numpy as np\n'), ((3061, 3098), 'utils.utilities.type_error_msg', 'type_error_msg', (['"""epoch"""', 'epoch', '[int]'], {}), "('epoch', epoch, [int])\n", (3075, 3098), False, 'from utils.utilities import type_error_msg, value_error_msg, timer, load_model\n'), ((3156, 3201), 'utils.utilities.value_error_msg', 'value_error_msg', (['"""epoch"""', 'epoch', '"""epoch >= 0"""'], {}), "('epoch', epoch, 'epoch >= 0')\n", (3171, 3201), False, 'from utils.utilities import type_error_msg, value_error_msg, timer, load_model\n'), ((3649, 3695), 'models.model.ResNet50', 'ResNet50', (['self.config', 'self.train_class', '(False)'], {}), '(self.config, self.train_class, False)\n', (3657, 3695), False, 'from models.model import IdentificationModel, ResNet50\n'), ((4152, 4198), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256, 128)'], {'interpolation': '(3)'}), '((256, 128), interpolation=3)\n', (4169, 4198), False, 'from torchvision import transforms\n'), ((4216, 4237), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4235, 4237), False, 'from torchvision import transforms\n'), ((4255, 4321), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (4275, 4321), False, 'from torchvision import transforms\n'), ((4953, 5013), 'utils.utilities.value_error_msg', 'value_error_msg', (['"""dataset"""', 'dataset', 'Tester.Dataset.EXTENDED'], {}), "('dataset', dataset, Tester.Dataset.EXTENDED)\n", (4968, 5013), False, 'from utils.utilities import type_error_msg, value_error_msg, timer, load_model\n'), ((9707, 9748), 'numpy.mean', 'np.mean', (['mquery_feature[index, :]'], {'axis': '(0)'}), '(mquery_feature[index, :], axis=0)\n', (9714, 9748), True, 'import numpy as np\n'), ((11872, 11893), 'torch.cat', 'torch.cat', (['feature', '(0)'], {}), '(feature, 0)\n', (11881, 11893), False, 'import torch\n'), ((1528, 1576), 'utils.utilities.type_error_msg', 'type_error_msg', (['"""name"""', 'name', '[Tester.Name, str]'], {}), "('name', name, [Tester.Name, str])\n", (1542, 1576), False, 'from utils.utilities import type_error_msg, value_error_msg, timer, load_model\n'), ((2010, 2067), 'utils.utilities.type_error_msg', 'type_error_msg', (['"""dataset"""', 'dataset', '[Tester.Dataset, str]'], {}), "('dataset', dataset, [Tester.Dataset, str])\n", (2024, 2067), False, 'from utils.utilities import type_error_msg, value_error_msg, timer, load_model\n'), ((2473, 2529), 'utils.utilities.type_error_msg', 'type_error_msg', (['"""model"""', 'model', '[Tester.MODEL_LIST, str]'], {}), "('model', model, [Tester.MODEL_LIST, str])\n", (2487, 2529), False, 'from utils.utilities import type_error_msg, value_error_msg, timer, load_model\n'), ((2935, 2991), 'utils.utilities.type_error_msg', 'type_error_msg', (['"""scene"""', 'scene', '[Tester.SCENE_LIST, str]'], {}), "('scene', scene, [Tester.SCENE_LIST, str])\n", (2949, 2991), False, 'from utils.utilities import type_error_msg, value_error_msg, timer, load_model\n'), ((4764, 4819), 'os.path.join', 'join', (["self.config[self.name.value]['dataset_dir']", 'item'], {}), "(self.config[self.name.value]['dataset_dir'], item)\n", (4768, 4819), False, 'from os.path import join, exists, dirname\n'), ((4874, 4908), 'torchvision.transforms.Compose', 'transforms.Compose', (['transform_list'], {}), '(transform_list)\n', (4892, 4908), False, 'from torchvision import transforms\n'), ((5885, 5996), 'torch.utils.data.DataLoader', 'DataLoader', (['self.dataset[item]'], {'num_workers': 'Tester.DEFAULT_NUM_WORKER', 'batch_size': 'Tester.DEFAULT_BATCH_SIZE'}), '(self.dataset[item], num_workers=Tester.DEFAULT_NUM_WORKER,\n batch_size=Tester.DEFAULT_BATCH_SIZE)\n', (5895, 5996), False, 'from torch.utils.data import DataLoader\n'), ((6468, 6579), 'torch.utils.data.DataLoader', 'DataLoader', (['self.dataset[item]'], {'num_workers': 'Tester.DEFAULT_NUM_WORKER', 'batch_size': 'Tester.DEFAULT_BATCH_SIZE'}), '(self.dataset[item], num_workers=Tester.DEFAULT_NUM_WORKER,\n batch_size=Tester.DEFAULT_BATCH_SIZE)\n', (6478, 6579), False, 'from torch.utils.data import DataLoader\n'), ((6984, 7095), 'torch.utils.data.DataLoader', 'DataLoader', (['self.dataset[item]'], {'num_workers': 'Tester.DEFAULT_NUM_WORKER', 'batch_size': 'Tester.DEFAULT_BATCH_SIZE'}), '(self.dataset[item], num_workers=Tester.DEFAULT_NUM_WORKER,\n batch_size=Tester.DEFAULT_BATCH_SIZE)\n', (6994, 7095), False, 'from torch.utils.data import DataLoader\n'), ((10927, 10955), 'numpy.repeat', 'np.repeat', (['xnorm', 'x.shape[1]'], {}), '(xnorm, x.shape[1])\n', (10936, 10955), True, 'import numpy as np\n'), ((1388, 1435), 'utils.utilities.value_error_msg', 'value_error_msg', (['"""name"""', 'name', 'Tester.NAME_LIST'], {}), "('name', name, Tester.NAME_LIST)\n", (1403, 1435), False, 'from utils.utilities import type_error_msg, value_error_msg, timer, load_model\n'), ((1852, 1908), 'utils.utilities.value_error_msg', 'value_error_msg', (['"""dataset"""', 'dataset', 'Tester.DATASET_LIST'], {}), "('dataset', dataset, Tester.DATASET_LIST)\n", (1867, 1908), False, 'from utils.utilities import type_error_msg, value_error_msg, timer, load_model\n'), ((2327, 2377), 'utils.utilities.value_error_msg', 'value_error_msg', (['"""model"""', 'model', 'Tester.MODEL_LIST'], {}), "('model', model, Tester.MODEL_LIST)\n", (2342, 2377), False, 'from utils.utilities import type_error_msg, value_error_msg, timer, load_model\n'), ((2789, 2839), 'utils.utilities.value_error_msg', 'value_error_msg', (['"""scene"""', 'scene', 'Tester.SCENE_LIST'], {}), "('scene', scene, Tester.SCENE_LIST)\n", (2804, 2839), False, 'from utils.utilities import type_error_msg, value_error_msg, timer, load_model\n'), ((7700, 7729), 'numpy.asarray', 'np.asarray', (['multi_query_label'], {}), '(multi_query_label)\n', (7710, 7729), True, 'import numpy as np\n'), ((7801, 7828), 'numpy.asarray', 'np.asarray', (['multi_query_cam'], {}), '(multi_query_cam)\n', (7811, 7828), True, 'import numpy as np\n'), ((7900, 7936), 'numpy.asarray', 'np.asarray', (["test_dict['query_label']"], {}), "(test_dict['query_label'])\n", (7910, 7936), True, 'import numpy as np\n'), ((8008, 8042), 'numpy.asarray', 'np.asarray', (["test_dict['query_cam']"], {}), "(test_dict['query_cam'])\n", (8018, 8042), True, 'import numpy as np\n')] |
from datetime import timedelta
import app_config
import dateutil.parser
from googleapiclient.discovery import build
from injector import inject
from models import AllDayCalendarEntry, CalendarEntry
from google_api import GoogleAuthenication
class GoogleCalendar:
@inject
def __init__(self, auth: GoogleAuthenication):
self.auth = auth
def query_calendar(self, start, end):
results = []
creds = self.auth.creds
service = build('calendar', 'v3', credentials=creds,
cache_discovery=False)
time_min = start + "T00:00:00Z"
time_max = end + "T23:59:59Z"
events_result = service.events().list(calendarId='primary', timeMin=time_min, timeMax=time_max,
singleEvents=True, showDeleted=False, timeZone=app_config.CALENDAR_TIMEZONE,
orderBy='startTime').execute()
events = events_result.get('items', [])
for event in events:
if 'dateTime' in event['start'] and 'dateTime' in event['end']:
results.append(CalendarEntry(description=event['summary'], date=event['start']
['dateTime'][0:10], time=event['start']['dateTime'][11:16], is_primary=False))
elif 'date' in event['start'] and 'date' in event['end']:
current = dateutil.parser.parse(
event['start']['date'])
range_end = dateutil.parser.parse(
event['end']['date'])
while current < range_end:
results.append(AllDayCalendarEntry(description=event['summary'], date=current.strftime(
'%Y-%m-%d'), is_primary=False))
current = current + timedelta(days=1)
return results
| [
"googleapiclient.discovery.build",
"models.CalendarEntry",
"datetime.timedelta"
] | [((473, 538), 'googleapiclient.discovery.build', 'build', (['"""calendar"""', '"""v3"""'], {'credentials': 'creds', 'cache_discovery': '(False)'}), "('calendar', 'v3', credentials=creds, cache_discovery=False)\n", (478, 538), False, 'from googleapiclient.discovery import build\n'), ((1134, 1279), 'models.CalendarEntry', 'CalendarEntry', ([], {'description': "event['summary']", 'date': "event['start']['dateTime'][0:10]", 'time': "event['start']['dateTime'][11:16]", 'is_primary': '(False)'}), "(description=event['summary'], date=event['start']['dateTime']\n [0:10], time=event['start']['dateTime'][11:16], is_primary=False)\n", (1147, 1279), False, 'from models import AllDayCalendarEntry, CalendarEntry\n'), ((1825, 1842), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (1834, 1842), False, 'from datetime import timedelta\n')] |
from django.db import models
# Create your models here.
class Profile(models.Model):
pic=models.ImageField(upload_to='images/')
pub_date=models.DateTimeField(auto_now=True)
obj=models.TextField(blank=True)
# ctime() method is for converting datetime string into a string
def __str__(self):
return self.pub_date.ctime()
# class Objective(models.Model):
# obj=models.TextField(blank=True)
# list_obj=[]
# def __str__(self):
# self.list_obj=self.obj.split()
# return " ".join(self.list_obj[:5])+'...'
class Language(models.Model):
image = models.ImageField(upload_to='images/', blank=True)
lang_name=models.CharField(max_length=20)
lang_summary=models.CharField(max_length=200)
# For admin views instead of object
def __str__(self):
return self.lang_name
class Framework(models.Model):
framework_name=models.CharField(max_length=20)
language=models.ForeignKey(Language,on_delete=models.CASCADE)
def __str__(self):
return self.framework_name
class Project(models.Model):
proj_title=models.CharField(max_length=100)
proj_desc=models.TextField()
proj_link=models.URLField()
frameworks=models.ManyToManyField(Framework)
def __str__(self):
return self.proj_title
class Academic(models.Model):
exam_name=models.CharField(max_length=50)
exam_school=models.CharField(max_length=50)
exam_gpa=models.FloatField()
exam_year=models.SmallIntegerField()
new_list=[]
adm_list=[]
def __str__(self):
self.new_list=self.exam_name.split()
self.adm_list=list(map(lambda x:x[:1], self.new_list))
return ".".join(self.adm_list)
| [
"django.db.models.FloatField",
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.ManyToManyField",
"django.db.models.DateTimeField",
"django.db.models.SmallIntegerField",
"django.db.models.ImageField",
"django.db.models.URLField",
"django.db.models.CharField"
] | [((94, 132), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""images/"""'}), "(upload_to='images/')\n", (111, 132), False, 'from django.db import models\n'), ((146, 181), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (166, 181), False, 'from django.db import models\n'), ((190, 218), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (206, 218), False, 'from django.db import models\n'), ((599, 649), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""images/"""', 'blank': '(True)'}), "(upload_to='images/', blank=True)\n", (616, 649), False, 'from django.db import models\n'), ((664, 695), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (680, 695), False, 'from django.db import models\n'), ((713, 745), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (729, 745), False, 'from django.db import models\n'), ((896, 927), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (912, 927), False, 'from django.db import models\n'), ((941, 994), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Language'], {'on_delete': 'models.CASCADE'}), '(Language, on_delete=models.CASCADE)\n', (958, 994), False, 'from django.db import models\n'), ((1098, 1130), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1114, 1130), False, 'from django.db import models\n'), ((1145, 1163), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (1161, 1163), False, 'from django.db import models\n'), ((1178, 1195), 'django.db.models.URLField', 'models.URLField', ([], {}), '()\n', (1193, 1195), False, 'from django.db import models\n'), ((1211, 1244), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Framework'], {}), '(Framework)\n', (1233, 1244), False, 'from django.db import models\n'), ((1345, 1376), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (1361, 1376), False, 'from django.db import models\n'), ((1393, 1424), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (1409, 1424), False, 'from django.db import models\n'), ((1438, 1457), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (1455, 1457), False, 'from django.db import models\n'), ((1472, 1498), 'django.db.models.SmallIntegerField', 'models.SmallIntegerField', ([], {}), '()\n', (1496, 1498), False, 'from django.db import models\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 29 20:53:21 2020
@author: asherhensley
"""
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.express as px
import pandas as pd
import yulesimon as ys
from plotly.subplots import make_subplots
import plotly.graph_objects as go
from dash.dependencies import Input, Output
from dash.exceptions import PreventUpdate
import numpy as np
import dash_table
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
colors = {
'background': '#000000',
'text': '#4ae2ed'
}
fig1 = make_subplots()
# fig1.update_layout(
# autosize=False,
# height=400,
# width=600,
# showlegend=False,
# #margin=dict(l=0,r=0,b=50,t=50),
# )
fig2 = make_subplots()
# fig2.update_layout(
# autosize=False,
# height=400,
# width=600,
# showlegend=False,
# #margin=dict(l=0,r=0,b=50,t=50),
# )
fig3 = make_subplots()
colors = {
'background': '#000000',
'text': '#7FDBFF'
}
df = pd.DataFrame(data={
"Key Statistics":[6],
"Values":[4]})
app.layout = html.Div(children=[
html.H1(children='CIRCLON-8', style={'textAlign':'left'}),
html.Div(children=[
'Ticker: ',
dcc.Input(id='Ticker',value='MSFT',type='text', size='50'),
html.Button('Search',id='Search',n_clicks=0)]
),
html.Br(),
html.H6(id='Status',children='Ready', style={'textAlign':'left'}),
# dash_table.DataTable(
# id='table',
# columns=[{"name": "Key Statistics", "id": "Key Statistics"},
# {"name": "Values", "id": "Values"}],
# data=df.to_dict('records')
# ),
dcc.Tabs(id="tabs", value='tab-1', children=[
dcc.Tab(label='Prices/Returns',
children=[dcc.Graph(id='Figure1',figure=fig1)]),
dcc.Tab(label='Volatility Profile',
children=[dcc.Graph(id='Figure2',figure=fig2)]),
dcc.Tab(label='Modeling Analysis',
children=[dcc.Graph(id='Figure3',figure=fig2)]),
]),
html.Div(id='tabs-content')
])
@app.callback(
Output(component_id='Status', component_property='children'),
Input(component_id='Search', component_property='n_clicks')
)
def set_status(n_clicks):
status = 'Searching...'
if n_clicks==0:
status = 'Initializing...'
return status
@app.callback(
Output(component_id='Figure1', component_property='figure'),
Output(component_id='Figure2', component_property='figure'),
Output(component_id='Figure3', component_property='figure'),
Output(component_id='Status', component_property='children'),
Input(component_id='Ticker', component_property='value'),
Input(component_id='Search', component_property='n_clicks')
)
def update_figure(ticker_in, n_clicks):
ctx = dash.callback_context
if not ctx.triggered:
ticker = 'MSFT'
else:
callback_id = ctx.triggered[0]['prop_id'].split('.')[0]
if callback_id=='Search':
ticker = ticker_in
else:
ticker = None
if ticker==None:
raise PreventUpdate
else:
# Run Model
closing_prices, log_returns, dates = ys.GetYahooFeed(ticker,5)
Chain = ys.TimeSeries(log_returns)
nsteps = 200
burnin = nsteps/2.0
downsample = 2
history = Chain.step(nsteps)
sigma, sample_size = ys.ExpectedValue(history.std_deviation, burnin, downsample)
mu, sample_size = ys.ExpectedValue(history.mean, burnin, downsample)
z = np.arange(-0.2,0.2,0.001)
yulesimon_PDF = ys.MixtureModel(z,mu/100,sigma/100)
H,b = np.histogram(log_returns,200)
delta = b[1]-b[0]
bctr = b[1:]-delta/2.0
empirical_PDF = H/(sum(H)*delta)
gaussian_PDF = ys.Gaussian(z,np.mean(log_returns),1/np.var(log_returns))
# Update Prices/Returns
fig1 = make_subplots(rows=2,cols=1,shared_xaxes=True,vertical_spacing=0.05)
fig1.add_trace(go.Scatter(x=dates[1:],y=closing_prices[1:],
fill='tozeroy',
line_color='#0000ff',
fillcolor='#7474f7'), row=1,col=1)
fig1.add_trace(go.Scatter(x=dates[1:],y=mu/100+2*sigma/100,
fill='tozeroy',
fillcolor='#ffb0b0',
mode='none'), row=2,col=1)
fig1.add_trace(go.Scatter(x=dates[1:],y=mu/100-2*sigma/100,
fill='tozeroy',
fillcolor='#ffb0b0',
mode='none'), row=2,col=1)
fig1.add_trace(go.Scatter(x=dates[1:],y=log_returns,
line_color='#ff0000'), row=2,col=1)
fig1.add_trace(go.Scatter(x=dates[1:],y=mu,
line_color='#000000'), row=2,col=1)
#fig1.add_trace(go.Scatter(x=dates[1:],y=mu*0,line=dict(dash='dash'),
# line_color='#000000'), row=2,col=1)
fig1.update_layout(
showlegend=False,
height=700
)
fig1.update_yaxes(title_text='Daily Close',row=1,col=1)
fig1.update_yaxes(title_text='Daily Log-Return',row=2,col=1)
# Update Volatility Profile
fig2 = make_subplots(rows=1,cols=2,
shared_xaxes=True,
subplot_titles=("Linear Scale","Log Scale"))
fig2.add_trace(go.Scatter(x=bctr,y=empirical_PDF,mode='markers',marker_color='#ff0000'),row=1,col=1)
#fig2.add_trace(go.Scatter(x=z,y=gaussian_PDF,line_color='#edc24a',),row=1,col=1)
fig2.add_trace(go.Scatter(x=z,y=yulesimon_PDF,line_color='#0000ff',),row=1,col=1)
fig2.add_trace(go.Scatter(x=bctr,y=empirical_PDF,mode='markers',marker_color='#ff0000'),row=1,col=2)
#fig2.add_trace(go.Scatter(x=z,y=gaussian_PDF,line_color='#edc24a',),row=1,col=2)
fig2.add_trace(go.Scatter(x=z,y=yulesimon_PDF,line_color='#0000ff',),row=1,col=2)
fig2.update_xaxes(title_text='Log Returns',row=1,col=1)
fig2.update_yaxes(title_text='Probability Density',row=1,col=1)
fig2.update_xaxes(title_text='Log Returns',row=1,col=2)
fig2.update_yaxes(title_text='Probability Density',type="log",row=1,col=2)
fig2.update_layout(showlegend=False)
# Update Modeling Analysis Tab
fig3 = make_subplots(rows=1,cols=2)
fig3.add_trace(go.Scatter(y=history.log_likelihood,line_color='#0000ff',),row=1,col=1)
fig3.add_trace(go.Scatter(y=history.pvalue,line_color='#ff0000',),row=1,col=2)
fig3.update_xaxes(title_text='Iteration',row=1,col=1)
fig3.update_yaxes(title_text='Log-Likelihood',row=1,col=1)
fig3.update_xaxes(title_text='Iteration',row=1,col=2)
fig3.update_yaxes(title_text='p-Value',type="log",row=1,col=2)
fig3.update_layout(showlegend=False)
return fig1, fig2, fig3, 'Ready'
if __name__ == '__main__':
app.run_server(debug=True)
| [
"dash_html_components.Button",
"dash.dependencies.Input",
"numpy.arange",
"dash_html_components.Div",
"yulesimon.TimeSeries",
"numpy.mean",
"numpy.histogram",
"dash.Dash",
"dash.dependencies.Output",
"dash_html_components.Br",
"plotly.graph_objects.Scatter",
"yulesimon.GetYahooFeed",
"dash_h... | [((545, 607), 'dash.Dash', 'dash.Dash', (['__name__'], {'external_stylesheets': 'external_stylesheets'}), '(__name__, external_stylesheets=external_stylesheets)\n', (554, 607), False, 'import dash\n'), ((681, 696), 'plotly.subplots.make_subplots', 'make_subplots', ([], {}), '()\n', (694, 696), False, 'from plotly.subplots import make_subplots\n'), ((855, 870), 'plotly.subplots.make_subplots', 'make_subplots', ([], {}), '()\n', (868, 870), False, 'from plotly.subplots import make_subplots\n'), ((1029, 1044), 'plotly.subplots.make_subplots', 'make_subplots', ([], {}), '()\n', (1042, 1044), False, 'from plotly.subplots import make_subplots\n'), ((1116, 1173), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'Key Statistics': [6], 'Values': [4]}"}), "(data={'Key Statistics': [6], 'Values': [4]})\n", (1128, 1173), True, 'import pandas as pd\n'), ((2248, 2308), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""Status"""', 'component_property': '"""children"""'}), "(component_id='Status', component_property='children')\n", (2254, 2308), False, 'from dash.dependencies import Input, Output\n'), ((2314, 2373), 'dash.dependencies.Input', 'Input', ([], {'component_id': '"""Search"""', 'component_property': '"""n_clicks"""'}), "(component_id='Search', component_property='n_clicks')\n", (2319, 2373), False, 'from dash.dependencies import Input, Output\n'), ((2527, 2586), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""Figure1"""', 'component_property': '"""figure"""'}), "(component_id='Figure1', component_property='figure')\n", (2533, 2586), False, 'from dash.dependencies import Input, Output\n'), ((2592, 2651), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""Figure2"""', 'component_property': '"""figure"""'}), "(component_id='Figure2', component_property='figure')\n", (2598, 2651), False, 'from dash.dependencies import Input, Output\n'), ((2657, 2716), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""Figure3"""', 'component_property': '"""figure"""'}), "(component_id='Figure3', component_property='figure')\n", (2663, 2716), False, 'from dash.dependencies import Input, Output\n'), ((2722, 2782), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""Status"""', 'component_property': '"""children"""'}), "(component_id='Status', component_property='children')\n", (2728, 2782), False, 'from dash.dependencies import Input, Output\n'), ((2788, 2844), 'dash.dependencies.Input', 'Input', ([], {'component_id': '"""Ticker"""', 'component_property': '"""value"""'}), "(component_id='Ticker', component_property='value')\n", (2793, 2844), False, 'from dash.dependencies import Input, Output\n'), ((2850, 2909), 'dash.dependencies.Input', 'Input', ([], {'component_id': '"""Search"""', 'component_property': '"""n_clicks"""'}), "(component_id='Search', component_property='n_clicks')\n", (2855, 2909), False, 'from dash.dependencies import Input, Output\n'), ((3373, 3399), 'yulesimon.GetYahooFeed', 'ys.GetYahooFeed', (['ticker', '(5)'], {}), '(ticker, 5)\n', (3388, 3399), True, 'import yulesimon as ys\n'), ((3415, 3441), 'yulesimon.TimeSeries', 'ys.TimeSeries', (['log_returns'], {}), '(log_returns)\n', (3428, 3441), True, 'import yulesimon as ys\n'), ((3580, 3639), 'yulesimon.ExpectedValue', 'ys.ExpectedValue', (['history.std_deviation', 'burnin', 'downsample'], {}), '(history.std_deviation, burnin, downsample)\n', (3596, 3639), True, 'import yulesimon as ys\n'), ((3666, 3716), 'yulesimon.ExpectedValue', 'ys.ExpectedValue', (['history.mean', 'burnin', 'downsample'], {}), '(history.mean, burnin, downsample)\n', (3682, 3716), True, 'import yulesimon as ys\n'), ((3747, 3774), 'numpy.arange', 'np.arange', (['(-0.2)', '(0.2)', '(0.001)'], {}), '(-0.2, 0.2, 0.001)\n', (3756, 3774), True, 'import numpy as np\n'), ((3797, 3838), 'yulesimon.MixtureModel', 'ys.MixtureModel', (['z', '(mu / 100)', '(sigma / 100)'], {}), '(z, mu / 100, sigma / 100)\n', (3812, 3838), True, 'import yulesimon as ys\n'), ((3847, 3877), 'numpy.histogram', 'np.histogram', (['log_returns', '(200)'], {}), '(log_returns, 200)\n', (3859, 3877), True, 'import numpy as np\n'), ((4112, 4183), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': '(2)', 'cols': '(1)', 'shared_xaxes': '(True)', 'vertical_spacing': '(0.05)'}), '(rows=2, cols=1, shared_xaxes=True, vertical_spacing=0.05)\n', (4125, 4183), False, 'from plotly.subplots import make_subplots\n'), ((5600, 5699), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': '(1)', 'cols': '(2)', 'shared_xaxes': '(True)', 'subplot_titles': "('Linear Scale', 'Log Scale')"}), "(rows=1, cols=2, shared_xaxes=True, subplot_titles=(\n 'Linear Scale', 'Log Scale'))\n", (5613, 5699), False, 'from plotly.subplots import make_subplots\n'), ((6739, 6768), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': '(1)', 'cols': '(2)'}), '(rows=1, cols=2)\n', (6752, 6768), False, 'from plotly.subplots import make_subplots\n'), ((1232, 1290), 'dash_html_components.H1', 'html.H1', ([], {'children': '"""CIRCLON-8"""', 'style': "{'textAlign': 'left'}"}), "(children='CIRCLON-8', style={'textAlign': 'left'})\n", (1239, 1290), True, 'import dash_html_components as html\n'), ((1482, 1491), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (1489, 1491), True, 'import dash_html_components as html\n'), ((1502, 1569), 'dash_html_components.H6', 'html.H6', ([], {'id': '"""Status"""', 'children': '"""Ready"""', 'style': "{'textAlign': 'left'}"}), "(id='Status', children='Ready', style={'textAlign': 'left'})\n", (1509, 1569), True, 'import dash_html_components as html\n'), ((2192, 2219), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""tabs-content"""'}), "(id='tabs-content')\n", (2200, 2219), True, 'import dash_html_components as html\n'), ((4012, 4032), 'numpy.mean', 'np.mean', (['log_returns'], {}), '(log_returns)\n', (4019, 4032), True, 'import numpy as np\n'), ((4204, 4313), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'dates[1:]', 'y': 'closing_prices[1:]', 'fill': '"""tozeroy"""', 'line_color': '"""#0000ff"""', 'fillcolor': '"""#7474f7"""'}), "(x=dates[1:], y=closing_prices[1:], fill='tozeroy', line_color=\n '#0000ff', fillcolor='#7474f7')\n", (4214, 4313), True, 'import plotly.graph_objects as go\n'), ((4447, 4554), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'dates[1:]', 'y': '(mu / 100 + 2 * sigma / 100)', 'fill': '"""tozeroy"""', 'fillcolor': '"""#ffb0b0"""', 'mode': '"""none"""'}), "(x=dates[1:], y=mu / 100 + 2 * sigma / 100, fill='tozeroy',\n fillcolor='#ffb0b0', mode='none')\n", (4457, 4554), True, 'import plotly.graph_objects as go\n'), ((4681, 4788), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'dates[1:]', 'y': '(mu / 100 - 2 * sigma / 100)', 'fill': '"""tozeroy"""', 'fillcolor': '"""#ffb0b0"""', 'mode': '"""none"""'}), "(x=dates[1:], y=mu / 100 - 2 * sigma / 100, fill='tozeroy',\n fillcolor='#ffb0b0', mode='none')\n", (4691, 4788), True, 'import plotly.graph_objects as go\n'), ((4915, 4975), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'dates[1:]', 'y': 'log_returns', 'line_color': '"""#ff0000"""'}), "(x=dates[1:], y=log_returns, line_color='#ff0000')\n", (4925, 4975), True, 'import plotly.graph_objects as go\n'), ((5046, 5097), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'dates[1:]', 'y': 'mu', 'line_color': '"""#000000"""'}), "(x=dates[1:], y=mu, line_color='#000000')\n", (5056, 5097), True, 'import plotly.graph_objects as go\n'), ((5783, 5858), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'bctr', 'y': 'empirical_PDF', 'mode': '"""markers"""', 'marker_color': '"""#ff0000"""'}), "(x=bctr, y=empirical_PDF, mode='markers', marker_color='#ff0000')\n", (5793, 5858), True, 'import plotly.graph_objects as go\n'), ((5982, 6036), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'z', 'y': 'yulesimon_PDF', 'line_color': '"""#0000ff"""'}), "(x=z, y=yulesimon_PDF, line_color='#0000ff')\n", (5992, 6036), True, 'import plotly.graph_objects as go\n'), ((6072, 6147), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'bctr', 'y': 'empirical_PDF', 'mode': '"""markers"""', 'marker_color': '"""#ff0000"""'}), "(x=bctr, y=empirical_PDF, mode='markers', marker_color='#ff0000')\n", (6082, 6147), True, 'import plotly.graph_objects as go\n'), ((6271, 6325), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'z', 'y': 'yulesimon_PDF', 'line_color': '"""#0000ff"""'}), "(x=z, y=yulesimon_PDF, line_color='#0000ff')\n", (6281, 6325), True, 'import plotly.graph_objects as go\n'), ((6791, 6849), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'y': 'history.log_likelihood', 'line_color': '"""#0000ff"""'}), "(y=history.log_likelihood, line_color='#0000ff')\n", (6801, 6849), True, 'import plotly.graph_objects as go\n'), ((6886, 6936), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'y': 'history.pvalue', 'line_color': '"""#ff0000"""'}), "(y=history.pvalue, line_color='#ff0000')\n", (6896, 6936), True, 'import plotly.graph_objects as go\n'), ((4035, 4054), 'numpy.var', 'np.var', (['log_returns'], {}), '(log_returns)\n', (4041, 4054), True, 'import numpy as np\n'), ((1348, 1408), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""Ticker"""', 'value': '"""MSFT"""', 'type': '"""text"""', 'size': '"""50"""'}), "(id='Ticker', value='MSFT', type='text', size='50')\n", (1357, 1408), True, 'import dash_core_components as dcc\n'), ((1416, 1462), 'dash_html_components.Button', 'html.Button', (['"""Search"""'], {'id': '"""Search"""', 'n_clicks': '(0)'}), "('Search', id='Search', n_clicks=0)\n", (1427, 1462), True, 'import dash_html_components as html\n'), ((1922, 1958), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""Figure1"""', 'figure': 'fig1'}), "(id='Figure1', figure=fig1)\n", (1931, 1958), True, 'import dash_core_components as dcc\n'), ((2032, 2068), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""Figure2"""', 'figure': 'fig2'}), "(id='Figure2', figure=fig2)\n", (2041, 2068), True, 'import dash_core_components as dcc\n'), ((2141, 2177), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""Figure3"""', 'figure': 'fig2'}), "(id='Figure3', figure=fig2)\n", (2150, 2177), True, 'import dash_core_components as dcc\n')] |
# coding:utf-8
from gensim.models import word2vec
class LoadModelFlag(object):
def __init__(self, fname, folder_word):
self.fname = fname
self.folder_word = folder_word
def load_model_similar_flag(self):
# 分かち書きしてmodelファイルを生成する。
load = word2vec.Word2Vec.load(self.fname)
isFeature = False
try:
results = load.most_similar(positive=[self.folder_word], topn=30)
except TypeError:
return isFeature
except KeyError:
return isFeature
else:
for x in results:
if x[1] <= 0.9:
continue
else:
isFeature = True
return isFeature
| [
"gensim.models.word2vec.Word2Vec.load"
] | [((278, 312), 'gensim.models.word2vec.Word2Vec.load', 'word2vec.Word2Vec.load', (['self.fname'], {}), '(self.fname)\n', (300, 312), False, 'from gensim.models import word2vec\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .common import Base, session_scope
from sqlalchemy import and_, or_
from sqlalchemy.sql.expression import func
class Team(Base):
__tablename__ = 'teams'
__autoload__ = True
HUMAN_READABLE = 'team'
def __init__(self, team_data):
self.team_id = team_data.get('id')
self.franchise_id = team_data.get('franchise')['franchiseId']
self.name = team_data.get('name')
self.short_name = team_data.get('shortName')
self.team_name = team_data.get('teamName')
self.abbr = team_data.get('abbreviation')
self.first_year_of_play = team_data.get('firstYearOfPlay')
@classmethod
def find(cls, abbr):
with session_scope() as session:
t = session.query(Team).filter(
or_(
func.lower(Team.abbr) == abbr.lower(),
func.lower(Team.orig_abbr) == abbr.lower())).first()
return t
@classmethod
def find_by_name(cls, name):
if name.lower() in [
"canadiens montreal",
"montreal canadiens",
"canadien de montreal"]:
name = "<NAME>"
if name.lower() == "<NAME>":
name = "<NAME>"
with session_scope() as session:
try:
t = session.query(Team).filter(
func.lower(Team.name) == name.lower()
).one()
except Exception as e:
t = None
return t
@classmethod
def find_by_id(cls, id):
with session_scope() as session:
try:
t = session.query(Team).filter(
Team.team_id == id
).one()
except Exception as e:
t = None
return t
@classmethod
def find_by_abbr(cls, abbr):
with session_scope() as session:
try:
t = session.query(Team).filter(
or_(
func.lower(Team.abbr) == abbr.lower(),
func.lower(Team.orig_abbr) == abbr.lower()
)
).one()
except Exception as e:
t = None
return t
@classmethod
def find_teams_for_season(cls, season=None):
with session_scope() as session:
if season is None:
teams = session.query(Team).filter(
Team.last_year_of_play.is_(None)
).all()
else:
teams = session.query(Team).filter(
and_(
Team.first_year_of_play <= season,
or_(
Team.last_year_of_play > season,
Team.last_year_of_play.is_(None)
)
)
).all()
return teams
@classmethod
def find_teams_with_abbrs(cls, abbrs):
with session_scope() as session:
teams = session.query(Team).filter(
or_(
Team.abbr.in_(abbrs),
Team.orig_abbr.in_(abbrs)
)
).all()
return teams
def __str__(self):
return self.name
def __hash__(self):
return self.team_id
def __repr__(self):
return self.name
def __eq__(self, other):
return self.name == other.name
def __ne__(self, other):
return not self == other
def __gt__(self, other):
return self.name > other.name
def __lt__(self, other):
return self.name < other.name
| [
"sqlalchemy.sql.expression.func.lower"
] | [((846, 867), 'sqlalchemy.sql.expression.func.lower', 'func.lower', (['Team.abbr'], {}), '(Team.abbr)\n', (856, 867), False, 'from sqlalchemy.sql.expression import func\n'), ((905, 931), 'sqlalchemy.sql.expression.func.lower', 'func.lower', (['Team.orig_abbr'], {}), '(Team.orig_abbr)\n', (915, 931), False, 'from sqlalchemy.sql.expression import func\n'), ((1410, 1431), 'sqlalchemy.sql.expression.func.lower', 'func.lower', (['Team.name'], {}), '(Team.name)\n', (1420, 1431), False, 'from sqlalchemy.sql.expression import func\n'), ((2056, 2077), 'sqlalchemy.sql.expression.func.lower', 'func.lower', (['Team.abbr'], {}), '(Team.abbr)\n', (2066, 2077), False, 'from sqlalchemy.sql.expression import func\n'), ((2119, 2145), 'sqlalchemy.sql.expression.func.lower', 'func.lower', (['Team.orig_abbr'], {}), '(Team.orig_abbr)\n', (2129, 2145), False, 'from sqlalchemy.sql.expression import func\n')] |
"""Main module with image processing pipeline with several stages:
1. Processing: simplyfy image to get better results.
Here we do color clustering and similar image transformations.
2. Select shapes: find similar shapes on image
3. Classify shapes
4. Connect shapes: find lines on image and make connections between shapes
5. use networkx to store graph data
6. draw
"""
import sys
import os
import cv2
from processors import GMMProcessor, LineExtractor, LineClusterizer, NodeConnector
from detectors import SimpleTemplateDetector
from renderers import ImageRenderer, PlotlyNodeRenderer, NetworkxRenderer
from graph_tools import *
class GraphPipeline:
def __init__(self, tempdir="images"):
if not os.path.exists(tempdir):
os.makedirs(tempdir)
self.tempdir = tempdir
self.stages = [
SimpleTemplateDetector(),
LineExtractor(),
LineClusterizer(),
ImageRenderer(), # for debugging purposes
PlotlyNodeRenderer(), # also for debugging
NodeBuilder(),
NodeConnector(),
EdgeBuilder(),
#CannyProcessor()
#ImageRenderer(),
#PlotlyNodeRenderer(),
NetworkxRenderer()
]
def process(self, image_path, i=0):
path = os.path.join(self.tempdir, str(i))
if not os.path.exists(path):
os.makedirs(path)
image = cv2.imread(image_path)
data = {
"raw_image": image
}
for s in self.stages:
s(data, image_path=image_path)
# print({k: type(v) for k, v in data.items()})
if __name__=="__main__":
if len(sys.argv) < 2:
print("Please provide path to image as a parameter")
exit(1)
image_path = sys.argv[1]
tempdir = sys.argv[2] if len(sys.argv) > 2 else "saves"
pipeline = GraphPipeline(tempdir)
pipeline.process(image_path) | [
"processors.LineExtractor",
"os.path.exists",
"processors.LineClusterizer",
"renderers.NetworkxRenderer",
"os.makedirs",
"detectors.SimpleTemplateDetector",
"renderers.PlotlyNodeRenderer",
"processors.NodeConnector",
"cv2.imread",
"renderers.ImageRenderer"
] | [((1426, 1448), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (1436, 1448), False, 'import cv2\n'), ((713, 736), 'os.path.exists', 'os.path.exists', (['tempdir'], {}), '(tempdir)\n', (727, 736), False, 'import os\n'), ((750, 770), 'os.makedirs', 'os.makedirs', (['tempdir'], {}), '(tempdir)\n', (761, 770), False, 'import os\n'), ((838, 862), 'detectors.SimpleTemplateDetector', 'SimpleTemplateDetector', ([], {}), '()\n', (860, 862), False, 'from detectors import SimpleTemplateDetector\n'), ((876, 891), 'processors.LineExtractor', 'LineExtractor', ([], {}), '()\n', (889, 891), False, 'from processors import GMMProcessor, LineExtractor, LineClusterizer, NodeConnector\n'), ((905, 922), 'processors.LineClusterizer', 'LineClusterizer', ([], {}), '()\n', (920, 922), False, 'from processors import GMMProcessor, LineExtractor, LineClusterizer, NodeConnector\n'), ((936, 951), 'renderers.ImageRenderer', 'ImageRenderer', ([], {}), '()\n', (949, 951), False, 'from renderers import ImageRenderer, PlotlyNodeRenderer, NetworkxRenderer\n'), ((990, 1010), 'renderers.PlotlyNodeRenderer', 'PlotlyNodeRenderer', ([], {}), '()\n', (1008, 1010), False, 'from renderers import ImageRenderer, PlotlyNodeRenderer, NetworkxRenderer\n'), ((1072, 1087), 'processors.NodeConnector', 'NodeConnector', ([], {}), '()\n', (1085, 1087), False, 'from processors import GMMProcessor, LineExtractor, LineClusterizer, NodeConnector\n'), ((1223, 1241), 'renderers.NetworkxRenderer', 'NetworkxRenderer', ([], {}), '()\n', (1239, 1241), False, 'from renderers import ImageRenderer, PlotlyNodeRenderer, NetworkxRenderer\n'), ((1358, 1378), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1372, 1378), False, 'import os\n'), ((1392, 1409), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (1403, 1409), False, 'import os\n')] |
# ======================================================================================================================
# File: GUI/MainWindow.py
# Project: AlphaBrew
# Description: Extensions and functionality for the main GUI window.
# Author: <NAME> <<EMAIL>>
# Copyright: (c) 2020 <NAME>
# ----------------------------------------------------------------------------------------------------------------------
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ----------------------------------------------------------------------------------------------------------------------
# ======================================================================================================================
# Import Statements
# ----------------------------------------------------------------------------------------------------------------------
import os
import importlib_metadata
import openpyxl
from PySide2 import QtWidgets
from GUI.Base.MainWindow import Ui_MainWindow
from GUI.TabRecipe import TabRecipe
from GUI.TabFermentables import TabFermentables
from GUI.TabMiscellaneous import TabMiscellaneous
from GUI.TabWaters import TabWaters
from GUI.TabChemistry import TabChemistry
from GUI.TabMash import TabMash
from GUI.TabHops import TabHops
from GUI.TabCultures import TabCultures
from GUI.TabFermentation import TabFermentation
from Brewhouse import Brewhouse
from Model.Recipe import Recipe
# ======================================================================================================================
# Main Window GUI Class
# ----------------------------------------------------------------------------------------------------------------------
class MainWindow(QtWidgets.QMainWindow):
"""Extends the GUI MainWindow class to provide the base functionality of the application."""
def __init__(self, parent=None):
super().__init__(parent)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self._touched = None
self.version = importlib_metadata.version('alphabrew')
self.filename = None
# Setup the range displays for OG, FG, ABV, and IBU.
self.ui.og.setRange(1, 1.12)
self.ui.og.setPrecision(3)
self.ui.og.setTickMarks(0.01, 2)
self.ui.fg.setRange(1, 1.03)
self.ui.fg.setPrecision(3)
self.ui.fg.setTickMarks(0.01, 2)
self.ui.abv.setRange(0, 15)
self.ui.abv.setPrecision(2)
self.ui.abv.setTickMarks(1, 2)
self.ui.ibu.setRange(0, 120)
self.ui.ibu.setPrecision(1)
self.ui.ibu.setTickMarks(10, 2)
# Connect events to the controls.
self.ui.actionNew.triggered.connect(self.on_file_new)
self.ui.actionOpen.triggered.connect(self.on_file_open)
self.ui.actionSave.triggered.connect(self.on_file_save)
self.ui.actionSaveAs.triggered.connect(self.on_file_save_as)
self.ui.actionAbout.triggered.connect(self.on_help_about)
self.ui.actionContents.triggered.connect(self.on_help_contents)
self.brewhouse = Brewhouse()
self.recipe = Recipe(self.brewhouse.calibrations)
self.recipe.loaded.connect(self.update)
self.recipe.changed.connect(self.update)
# Load database items into memory.
# Load read-only just because we never need to change the database from within this tool.
workbook = openpyxl.load_workbook('Database.xlsx', read_only=True, data_only=True)
# Add the tab instances into the tab container.
self.ui.tab_recipe = TabRecipe(self, self.recipe, self.brewhouse, workbook)
self.ui.tabs.addTab(self.ui.tab_recipe, "Recipe")
self.ui.tab_waters = TabWaters(self, self.recipe, workbook)
self.ui.tabs.addTab(self.ui.tab_waters, "Waters")
self.ui.tab_fermentables = TabFermentables(self, self.recipe, workbook)
self.ui.tabs.addTab(self.ui.tab_fermentables, "Fermentables")
self.ui.tab_mash = TabMash(self, self.recipe)
self.ui.tabs.addTab(self.ui.tab_mash, "Mash")
self.ui.tab_chemistry = TabChemistry(self, self.recipe)
self.ui.tabs.addTab(self.ui.tab_chemistry, "Chemistry")
self.ui.tab_hops = TabHops(self, self.recipe, workbook)
self.ui.tabs.addTab(self.ui.tab_hops, "Hops")
self.ui.tab_miscellaneous = TabMiscellaneous(self, self.recipe)
self.ui.tabs.addTab(self.ui.tab_miscellaneous, "Miscellaneous")
self.ui.tab_cultures = TabCultures(self, self.recipe, workbook)
self.ui.tabs.addTab(self.ui.tab_cultures, "Cultures")
self.ui.tab_fermentation = TabFermentation(self, self.recipe)
self.ui.tabs.addTab(self.ui.tab_fermentation, "Fermentation")
self.ui.tabs.currentChanged.connect(self.on_tab_change)
workbook.close()
self.update()
# Once loaded it's safe to assume that nothing has been touched yet.
self.touched = False
# ======================================================================================================================
# Shutdown Hook
# ----------------------------------------------------------------------------------------------------------------------
def closeEvent(self, event):
"""Ensure that the user has saved all changes and allow them to cancel if they didn't mean to close."""
self._warn_unsaved(event)
# ======================================================================================================================
# Properties
# ----------------------------------------------------------------------------------------------------------------------
@property
def isDirty(self):
"""Returns True when the currently open file has never been saved or has changed since the last save."""
# If the recipe has not been changed then it's safe to assume that it's not dirty.
if not self.touched:
return False
# Touched, but possibly no filename yet - no file, nothing to compare - definitely dirty.
if self.filename is None:
return True
# Otherwise, compare the existing contents of the file to the current state.
current = self.recipe.to_beerjson()
with open(self.filename) as handle:
existing = handle.read()
# Return True, indicating "dirty" when they aren't identical.
# There's probably a more robust way to compare JSON.
return current != existing
# ----------------------------------------------------------------------------------------------------------------------
@property
def touched(self):
"""Fetches the "dirty" flag that comes from recipe changes. This is used to better capture when a recipe has
been modified before it has been saved."""
return self._touched
# ----------------------------------------------------------------------------------------------------------------------
@touched.setter
def touched(self, state):
"""Sets the "touched" flag and updates the GUI window to show an asterisk when it is dirty."""
self._touched = state
title = f'AlphaBrew {self.version} - '
if self.filename is not None:
title += os.path.basename(self.filename)
else:
title += 'untitled'
if self.isDirty:
title += '*'
self.setWindowTitle(title)
# ======================================================================================================================
# Event Handlers
# ----------------------------------------------------------------------------------------------------------------------
def on_file_new(self):
"""Fires when the user requests to start a new recipe."""
if self._warn_unsaved():
self.filename = None
self.recipe.clear()
self.touched = False
# ----------------------------------------------------------------------------------------------------------------------
def on_file_open(self):
"""Fires when the user requests to open a recipe from local file."""
if self._warn_unsaved():
self.recipe.clear()
filename, filters = QtWidgets.QFileDialog.getOpenFileName(self, "Open Recipe", filter="BeerJSON (*.json)")
if filename:
self.filename = filename
self.recipe.changed.disconnect(self.update)
with open(filename) as handle:
self.recipe.from_beerxml(handle.read())
# Newly loaded recipe was not touched. Clear that up despite all of the "change" events reported during
# the loading.
self.touched = False
self.recipe.changed.connect(self.update)
# ----------------------------------------------------------------------------------------------------------------------
def on_file_save(self):
"""Fires when the user requests to save the current recipe to local file."""
if self.filename:
# TODO: Handle merging BeerJSON with existing files. The goal would be to update applicable values when
# saving but not nuke the existing values that may already be in the file.
with open(self.filename, 'w') as handle:
handle.write(self.recipe.to_beerjson())
self.touched = False
else:
self.on_file_save_as()
# ----------------------------------------------------------------------------------------------------------------------
def on_file_save_as(self):
"""Fires when the user specifically requests to save as a different file."""
filename, _ = QtWidgets.QFileDialog.getSaveFileName(self, "Save Recipe As...", filter="BeerJSON (*.json)")
if filename:
self.filename = filename
self.on_file_save()
# ----------------------------------------------------------------------------------------------------------------------
def on_help_about(self):
"""Fires when the user selects about from the help menu."""
description = 'A highly opinionated beer recipe editor. It\'s not fancy or elaborate. It strives to do one '
description += 'thing, one thing only, and one thing well - edit beer recipes.'
description += '\n\n'
description += 'It is not a calculator or an adjuster or a compensator, etc.'
description += ' - download something else for that.'
description += '\n\n'
description += f'Version: {self.version}\n'
description += 'Author: <NAME> <<EMAIL>>'
QtWidgets.QMessageBox.about(self, 'AlphaBrew', description)
# ----------------------------------------------------------------------------------------------------------------------
def on_help_contents(self):
"""Fires when the user asks for help."""
# TODO: Implement a nice help guide of some kind to walk users through the features and settings.
# ----------------------------------------------------------------------------------------------------------------------
def update(self):
"""Handles copying of recipe information over to the GUI when the recipe gets changed."""
# Update the bubble indicating the ideal range based upon the selected beer style.
if self.recipe.style:
if self.recipe.style.og is not None:
self.ui.og.setPreferredRange(
self.recipe.style.og.minimum.sg,
self.recipe.style.og.maximum.sg
)
if self.recipe.style.fg is not None:
self.ui.fg.setPreferredRange(
self.recipe.style.fg.minimum.sg,
self.recipe.style.fg.maximum.sg
)
if self.recipe.style.abv is not None:
self.ui.abv.setPreferredRange(
self.recipe.style.abv.minimum.percent,
self.recipe.style.abv.maximum.percent
)
if self.recipe.style.bitterness is not None:
self.ui.ibu.setPreferredRange(
self.recipe.style.bitterness.minimum.IBUs,
self.recipe.style.bitterness.maximum.IBUs
)
if self.recipe.style.color is not None:
self.ui.srm.setPreferredRange(
self.recipe.style.color.minimum.SRM,
self.recipe.style.color.maximum.SRM
)
# Set the bar for the actual, calclulated value from the recipe.
self.ui.og.setValue(self.recipe.originalGravity)
self.ui.fg.setValue(self.recipe.finalGravity)
self.ui.abv.setValue(self.recipe.abv)
self.ui.ibu.setValue(self.recipe.bitterness)
self.ui.srm.setValue(self.recipe.color)
self.ui.ibu_gu.setValue(self.recipe.ibuGu)
# Output numbers for the calculated values.
self.ui.calcBoilSize.setText(f'{self.recipe.boilVolume.gal:.1f} gal')
self.ui.calcBoilSg.setText(f'{self.recipe.boilGravity:.3f}')
self.ui.calcCalories.setText(f'{self.recipe.calories:.0f} / 16oz')
self.touched = True
# ----------------------------------------------------------------------------------------------------------------------
def on_tab_change(self, index):
"""Fires when the active tab changes and calls tries to call the activated method of newly activated tab widget.
"""
tab = self.ui.tabs.widget(index)
try:
tab.activated()
except AttributeError:
# Swallow errors when the refresh method does not exist. It's implementation is optional.
pass
# ======================================================================================================================
# Private Methods
# ----------------------------------------------------------------------------------------------------------------------
def _warn_unsaved(self, event=None):
"""Check to see if the current file is dirty and prompt the user to save changes."""
if self.isDirty:
text = 'You have unsaved changes, would you like to save them before exiting?'
buttons = QtWidgets.QMessageBox.Save | QtWidgets.QMessageBox.Discard | QtWidgets.QMessageBox.Cancel
stdButtons = QtWidgets.QMessageBox.StandardButtons(buttons)
default = QtWidgets.QMessageBox.Save
result = QtWidgets.QMessageBox.warning(self, "Unsaved changes", text, stdButtons, defaultButton=default)
if result == QtWidgets.QMessageBox.Cancel:
if event is not None:
event.ignore()
return False
if result == QtWidgets.QMessageBox.Save:
self.on_file_save()
return True
# End of File
| [
"GUI.TabMash.TabMash",
"GUI.Base.MainWindow.Ui_MainWindow",
"GUI.TabMiscellaneous.TabMiscellaneous",
"importlib_metadata.version",
"GUI.TabFermentables.TabFermentables",
"GUI.TabHops.TabHops",
"PySide2.QtWidgets.QFileDialog.getOpenFileName",
"GUI.TabWaters.TabWaters",
"PySide2.QtWidgets.QMessageBox.... | [((2977, 2992), 'GUI.Base.MainWindow.Ui_MainWindow', 'Ui_MainWindow', ([], {}), '()\n', (2990, 2992), False, 'from GUI.Base.MainWindow import Ui_MainWindow\n'), ((3082, 3121), 'importlib_metadata.version', 'importlib_metadata.version', (['"""alphabrew"""'], {}), "('alphabrew')\n", (3108, 3121), False, 'import importlib_metadata\n'), ((4160, 4171), 'Brewhouse.Brewhouse', 'Brewhouse', ([], {}), '()\n', (4169, 4171), False, 'from Brewhouse import Brewhouse\n'), ((4195, 4230), 'Model.Recipe.Recipe', 'Recipe', (['self.brewhouse.calibrations'], {}), '(self.brewhouse.calibrations)\n', (4201, 4230), False, 'from Model.Recipe import Recipe\n'), ((4495, 4566), 'openpyxl.load_workbook', 'openpyxl.load_workbook', (['"""Database.xlsx"""'], {'read_only': '(True)', 'data_only': '(True)'}), "('Database.xlsx', read_only=True, data_only=True)\n", (4517, 4566), False, 'import openpyxl\n'), ((4656, 4710), 'GUI.TabRecipe.TabRecipe', 'TabRecipe', (['self', 'self.recipe', 'self.brewhouse', 'workbook'], {}), '(self, self.recipe, self.brewhouse, workbook)\n', (4665, 4710), False, 'from GUI.TabRecipe import TabRecipe\n'), ((4802, 4840), 'GUI.TabWaters.TabWaters', 'TabWaters', (['self', 'self.recipe', 'workbook'], {}), '(self, self.recipe, workbook)\n', (4811, 4840), False, 'from GUI.TabWaters import TabWaters\n'), ((4938, 4982), 'GUI.TabFermentables.TabFermentables', 'TabFermentables', (['self', 'self.recipe', 'workbook'], {}), '(self, self.recipe, workbook)\n', (4953, 4982), False, 'from GUI.TabFermentables import TabFermentables\n'), ((5084, 5110), 'GUI.TabMash.TabMash', 'TabMash', (['self', 'self.recipe'], {}), '(self, self.recipe)\n', (5091, 5110), False, 'from GUI.TabMash import TabMash\n'), ((5201, 5232), 'GUI.TabChemistry.TabChemistry', 'TabChemistry', (['self', 'self.recipe'], {}), '(self, self.recipe)\n', (5213, 5232), False, 'from GUI.TabChemistry import TabChemistry\n'), ((5328, 5364), 'GUI.TabHops.TabHops', 'TabHops', (['self', 'self.recipe', 'workbook'], {}), '(self, self.recipe, workbook)\n', (5335, 5364), False, 'from GUI.TabHops import TabHops\n'), ((5459, 5494), 'GUI.TabMiscellaneous.TabMiscellaneous', 'TabMiscellaneous', (['self', 'self.recipe'], {}), '(self, self.recipe)\n', (5475, 5494), False, 'from GUI.TabMiscellaneous import TabMiscellaneous\n'), ((5602, 5642), 'GUI.TabCultures.TabCultures', 'TabCultures', (['self', 'self.recipe', 'workbook'], {}), '(self, self.recipe, workbook)\n', (5613, 5642), False, 'from GUI.TabCultures import TabCultures\n'), ((5744, 5778), 'GUI.TabFermentation.TabFermentation', 'TabFermentation', (['self', 'self.recipe'], {}), '(self, self.recipe)\n', (5759, 5778), False, 'from GUI.TabFermentation import TabFermentation\n'), ((10942, 11039), 'PySide2.QtWidgets.QFileDialog.getSaveFileName', 'QtWidgets.QFileDialog.getSaveFileName', (['self', '"""Save Recipe As..."""'], {'filter': '"""BeerJSON (*.json)"""'}), "(self, 'Save Recipe As...', filter=\n 'BeerJSON (*.json)')\n", (10979, 11039), False, 'from PySide2 import QtWidgets\n'), ((11887, 11946), 'PySide2.QtWidgets.QMessageBox.about', 'QtWidgets.QMessageBox.about', (['self', '"""AlphaBrew"""', 'description'], {}), "(self, 'AlphaBrew', description)\n", (11914, 11946), False, 'from PySide2 import QtWidgets\n'), ((8431, 8462), 'os.path.basename', 'os.path.basename', (['self.filename'], {}), '(self.filename)\n', (8447, 8462), False, 'import os\n'), ((9430, 9521), 'PySide2.QtWidgets.QFileDialog.getOpenFileName', 'QtWidgets.QFileDialog.getOpenFileName', (['self', '"""Open Recipe"""'], {'filter': '"""BeerJSON (*.json)"""'}), "(self, 'Open Recipe', filter=\n 'BeerJSON (*.json)')\n", (9467, 9521), False, 'from PySide2 import QtWidgets\n'), ((15711, 15757), 'PySide2.QtWidgets.QMessageBox.StandardButtons', 'QtWidgets.QMessageBox.StandardButtons', (['buttons'], {}), '(buttons)\n', (15748, 15757), False, 'from PySide2 import QtWidgets\n'), ((15834, 15933), 'PySide2.QtWidgets.QMessageBox.warning', 'QtWidgets.QMessageBox.warning', (['self', '"""Unsaved changes"""', 'text', 'stdButtons'], {'defaultButton': 'default'}), "(self, 'Unsaved changes', text, stdButtons,\n defaultButton=default)\n", (15863, 15933), False, 'from PySide2 import QtWidgets\n')] |
from django.contrib.auth import authenticate, login
from django.contrib.auth.models import User
from django.shortcuts import render, HttpResponseRedirect
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import views as auth_views
def crear_cuenta(request):
if request.user.is_authenticated():
return HttpResponseRedirect('/chat/')
if request.method == "POST":
form = UserCreationForm(request.POST)
if form.is_valid():
user = User.objects.create_user(username=form.cleaned_data['username'],
password=form.cleaned_data['<PASSWORD>'])
user_autenticado = authenticate(username=user.username, password=form.cleaned_data['<PASSWORD>'])
login(request, user_autenticado)
return HttpResponseRedirect('/chat/')
else:
form = UserCreationForm()
return render(request, 'registration/signup.html', {'form': form})
def login_check(request):
# Todos los usuarios autenticados tienen permiso de chatear
# Por lo que no hace falta que se autentique con otra cuenta
if request.user.is_authenticated():
return HttpResponseRedirect('/BotTelegram/')
return auth_views.login(request)
| [
"django.shortcuts.render",
"django.contrib.auth.authenticate",
"django.contrib.auth.views.login",
"django.contrib.auth.login",
"django.shortcuts.HttpResponseRedirect",
"django.contrib.auth.forms.UserCreationForm",
"django.contrib.auth.models.User.objects.create_user"
] | [((918, 977), 'django.shortcuts.render', 'render', (['request', '"""registration/signup.html"""', "{'form': form}"], {}), "(request, 'registration/signup.html', {'form': form})\n", (924, 977), False, 'from django.shortcuts import render, HttpResponseRedirect\n'), ((1240, 1265), 'django.contrib.auth.views.login', 'auth_views.login', (['request'], {}), '(request)\n', (1256, 1265), True, 'from django.contrib.auth import views as auth_views\n'), ((345, 375), 'django.shortcuts.HttpResponseRedirect', 'HttpResponseRedirect', (['"""/chat/"""'], {}), "('/chat/')\n", (365, 375), False, 'from django.shortcuts import render, HttpResponseRedirect\n'), ((425, 455), 'django.contrib.auth.forms.UserCreationForm', 'UserCreationForm', (['request.POST'], {}), '(request.POST)\n', (441, 455), False, 'from django.contrib.auth.forms import UserCreationForm\n'), ((887, 905), 'django.contrib.auth.forms.UserCreationForm', 'UserCreationForm', ([], {}), '()\n', (903, 905), False, 'from django.contrib.auth.forms import UserCreationForm\n'), ((1190, 1227), 'django.shortcuts.HttpResponseRedirect', 'HttpResponseRedirect', (['"""/BotTelegram/"""'], {}), "('/BotTelegram/')\n", (1210, 1227), False, 'from django.shortcuts import render, HttpResponseRedirect\n'), ((504, 615), 'django.contrib.auth.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': "form.cleaned_data['username']", 'password': "form.cleaned_data['<PASSWORD>']"}), "(username=form.cleaned_data['username'], password=\n form.cleaned_data['<PASSWORD>'])\n", (528, 615), False, 'from django.contrib.auth.models import User\n'), ((687, 765), 'django.contrib.auth.authenticate', 'authenticate', ([], {'username': 'user.username', 'password': "form.cleaned_data['<PASSWORD>']"}), "(username=user.username, password=form.cleaned_data['<PASSWORD>'])\n", (699, 765), False, 'from django.contrib.auth import authenticate, login\n'), ((778, 810), 'django.contrib.auth.login', 'login', (['request', 'user_autenticado'], {}), '(request, user_autenticado)\n', (783, 810), False, 'from django.contrib.auth import authenticate, login\n'), ((830, 860), 'django.shortcuts.HttpResponseRedirect', 'HttpResponseRedirect', (['"""/chat/"""'], {}), "('/chat/')\n", (850, 860), False, 'from django.shortcuts import render, HttpResponseRedirect\n')] |
import argparse
import os
import sys
import time
import warnings
from ast import literal_eval
warnings.filterwarnings("ignore")
import IPython
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
import context
from context import utils
import utils.plotting as plot
import utils.db as db
import utils.filesystem as fs
from utils.misc import get_equal_dicts, length_of_longest
from data_analysis import print_group_info, get_best, get_max_chkpt_int, invert_signs, get_checkpoint_directories
def load_data(checkpoint_directories, old_mtimes=None, old_states=None, old_stats=None, best=False):
# TODO This is a mess
# Parse inputs
if best:
filename = 'state-dict-best-algorithm.pkl'
else:
filename = 'state-dict-algorithm.pkl'
n_tot_files = len(checkpoint_directories)
if old_mtimes is not None:
assert old_states is not None, "If given modification times, must also get old data to overwrite"
assert old_stats is not None, "If given modification times, must also get old stats to overwrite"
# Find files that have been modified
mtimes = fs.get_modified_times(checkpoint_directories, filename)
if len(mtimes)-len(old_mtimes) > 0:
old_mtimes = np.pad(old_mtimes, (0, len(mtimes)-len(old_mtimes)), mode='constant', constant_values=0)
elif len(old_mtimes)-len(mtimes) > 0:
mtimes = np.pad(mtimes, (0, len(old_mtimes)-len(mtimes)), mode='constant', constant_values=0)
is_changed = ~np.equal(old_mtimes, mtimes)
checkpoint_directories = [d for i, d in enumerate(checkpoint_directories) if is_changed[i]]
n_files = len(checkpoint_directories)
idxs = np.where(is_changed)[0]
algorithm_states_list = old_states
stats_list = old_stats
print("Loading " + str(n_files) + " modified files of " + str(n_tot_files) + " total files...")
else:
n_files = len(checkpoint_directories)
print("Loading " + str(n_files) + " files...")
algorithm_states_list = [None]*len(checkpoint_directories)
stats_list = [None]*len(checkpoint_directories)
idxs = range(0, len(checkpoint_directories))
# Strings and constants
n_chars = len(str(n_files))
f = ' {:' + str(n_chars) + 'd}/{:' + str(n_chars) + 'd} files loaded'
text = ""
# Loop over files to load (all or only changed ones)
i_file = -1
for i, chkpt_dir in zip(idxs, checkpoint_directories):
try:
algorithm_states_list[i] = torch.load(os.path.join(chkpt_dir, filename))
stats_list[i] = pd.read_csv(os.path.join(chkpt_dir, 'stats.csv'))
i_file += 1
if i_file + 1 != n_files:
print(f.format(i_file + 1, n_files), end='\r')
except Exception:
text += " Required files not (yet) present in: " + chkpt_dir + "\n"
# Remove any None
algorithm_states_list = [s for s in algorithm_states_list if s is not None]
stats_list = [s for s in stats_list if s is not None]
# Evaluate any strings as literal types
for s in stats_list:
for k in s.keys()[s.dtypes == object]:
s[k] = s[k].apply(literal_eval)
print(f.format(i_file + 1, n_files), end='\n')
if text:
print(text[:-2])
return algorithm_states_list, stats_list
def sub_into_lists(stats_list, keys_to_monitor):
for s in stats_list:
for k in keys_to_monitor:
if k in s and type(s[k][0]) is list:
s[k] = [vals_group[0] for vals_group in s[k]]
if 'lr' in k and 'lr' not in s.keys():
s['lr'] = s[k][0]
def create_plots(args, stats_list, keys_to_monitor, groups):
unique_groups = set(groups)
n_keys = len(keys_to_monitor)
n_chars = len(str(n_keys))
f = ' {:' + str(n_chars) + 'd}/{:' + str(n_chars) + 'd} monitored keys plotted'
for i_key, k in enumerate(keys_to_monitor):
list_of_series = [s[k].tolist() for s in stats_list if k in s]
list_of_genera = [range(len(s)) for s in stats_list if k in s]
plot.timeseries(list_of_genera, list_of_series, xlabel='generations', ylabel=k)
plt.savefig(os.path.join(args.monitor_dir, k + '-all-series.pdf'), bbox_inches='tight')
plt.close()
plot.timeseries_distribution(list_of_genera, list_of_series, xlabel='generations', ylabel=k)
plt.savefig(os.path.join(args.monitor_dir, k + '-all-distribution.pdf'), bbox_inches='tight')
plt.close()
plot.timeseries_median(list_of_genera, list_of_series, xlabel='generations', ylabel=k)
plt.savefig(os.path.join(args.monitor_dir, k + '-all-median.pdf'), bbox_inches='tight')
plt.close()
plot.timeseries_final_distribution(list_of_series, label=k, ybins=len(list_of_series)*10)
plt.savefig(os.path.join(args.monitor_dir, k + '-all-final-distribution.pdf'), bbox_inches='tight')
plt.close()
# Subset only those series that are done (or the one that is the longest)
l = length_of_longest(list_of_series)
indices = [i for i, series in enumerate(list_of_series) if len(series) == l]
list_of_longest_series = [list_of_series[i] for i in indices]
list_of_longest_genera = [list_of_genera[i] for i in indices]
groups_longest_series = groups[indices]
plot.timeseries_mean_grouped(list_of_longest_genera, list_of_longest_series, groups_longest_series, xlabel='generations', ylabel=k)
plt.savefig(os.path.join(args.monitor_dir, k + '-all-series-mean-sd' + '.pdf'), bbox_inches='tight')
plt.close()
if len(unique_groups) > 1:
for g in unique_groups:
gstr = '{0:02d}'.format(g)
g_indices = np.where(groups == g)[0]
group_stats = [stats_list[i] for i in g_indices]
list_of_series = [s[k].tolist() for s in group_stats if k in s]
list_of_genera = [range(len(s)) for s in group_stats if k in s]
if list_of_genera and list_of_series:
plot.timeseries(list_of_genera, list_of_series, xlabel='generations', ylabel=k)
plt.savefig(os.path.join(args.monitor_dir, k + '-group-' + gstr + '-series.pdf'), bbox_inches='tight')
plt.close()
plot.timeseries_distribution(list_of_genera, list_of_series, xlabel='generations', ylabel=k)
plt.savefig(os.path.join(args.monitor_dir, k + '-group-' + gstr + '-distribution.pdf'), bbox_inches='tight')
plt.close()
plot.timeseries_median(list_of_genera, list_of_series, xlabel='generations', ylabel=k)
plt.savefig(os.path.join(args.monitor_dir, k + '-group-' + gstr + '-median.pdf'), bbox_inches='tight')
plt.close()
plot.timeseries_final_distribution(list_of_series, label=k, ybins=len(list_of_series)*10)
plt.savefig(os.path.join(args.monitor_dir, k + '-group-' + gstr + '-final-distribution.pdf'), bbox_inches='tight')
plt.close()
if i_key + 1 == n_keys:
print(f.format(i_key+1, n_keys), end='\n')
else:
print(f.format(i_key+1, n_keys), end='\r')
def wait_for_updates(args, last_refresh, max_chkpt_int, mtimes_last):
"""Wait for updates to the chyeckpoint directories.
If no updates are seen after waiting more than the maximum checkpoint
interval, returns False. Otherwise returns True.
"""
print("Waiting 'max checkpoint interval' x 2 = " + str(int(max_chkpt_int * 2)) + " seconds before checking for updates...")
count_down(count_down_started_at=last_refresh, wait=max_chkpt_int * 2)
checkpoint_directories = get_checkpoint_directories(args.d)
mtimes = fs.get_modified_times(checkpoint_directories, 'state-dict-algorithm.pkl')
if mtimes == mtimes_last:
print("Monitoring stopped since loaded data did not change for " + str(int(max_chkpt_int*2)) + " seconds.")
return True
return False
def get_keys_to_monitor(stats_list):
keys_to_monitor = {'return_unp', 'accuracy_unp', 'sigma'}
for s in stats_list:
for c in s.columns:
addkeys = set()
for k in keys_to_monitor:
if k in c:
addkeys.add(c)
if addkeys: keys_to_monitor.add(*addkeys)
return keys_to_monitor
def get_data(old_mtimes=None, old_states=None, old_stats=None, timeout=30*60, checkevery=30):
checkpoint_directories = get_checkpoint_directories(args.d)
algorithm_states, stats_list = load_data(checkpoint_directories, old_mtimes=old_mtimes, old_states=old_states, old_stats=old_stats)
# Check if any data found
if not algorithm_states:
print("No data found.")
print("Rechecking directory for files every " + str(checkevery) + " seconds for " + str(int(timeout/60)) + " minutes.")
for i in range(0, timeout, checkevery):
count_down(wait=checkevery, info_interval=1)
checkpoint_directories = get_checkpoint_directories(args.d)
algorithm_states, stats_list = load_data(checkpoint_directories, old_mtimes=old_mtimes, old_states=old_states, old_stats=old_stats)
if algorithm_states:
return algorithm_states, stats_list
print("{:2.2f} minutes remaining".format((timeout - i-checkevery)/60))
print("No data found to monitor after checking for " + str(int(timeout/60)) + " minutes.")
return algorithm_states, stats_list
def count_down(wait=60, count_down_started_at=None, info_interval=5):
if count_down_started_at is not None:
seconds_remaining = int(wait - (time.time() - count_down_started_at))
else:
seconds_remaining = wait
for i in range(0, seconds_remaining, info_interval):
print("Updating in {:s} seconds".format(str(seconds_remaining-i)), end="\r")
time.sleep(info_interval)
print("Updating... ", end='\n')
return time.time()
def monitor(args):
this_file_dir_local = os.path.dirname(os.path.abspath(__file__))
# Get the root of the package locally and where monitored (may be the same)
package_root_this_file = fs.get_parent(this_file_dir_local, 'es-rl')
# Get directory to monitor
if not args.d:
args.d = os.path.join(package_root_this_file, 'experiments', 'checkpoints', args.i)
elif not os.path.isabs(args.d):
args.d = os.path.join(package_root_this_file, 'experiments', 'checkpoints', args.d)
if not os.path.exists(args.d):
os.mkdir(args.d)
package_root_monitored_directory = fs.get_parent(args.d, 'es-rl')
print("Monitoring: " + args.d)
# Load data
last_refresh = time.time()
checkpoint_directories = get_checkpoint_directories(args.d)
mtimes = fs.get_modified_times(checkpoint_directories, 'state-dict-algorithm.pkl')
algorithm_states, stats_list = get_data(timeout=args.t)
if not algorithm_states:
print("Monitoring stopped. No data available after " + str(args.t) + " minutes.")
return
# Create directory for monitoring plots
monitor_dir = os.path.join(args.d, 'monitoring')
if not os.path.exists(monitor_dir):
os.mkdir(monitor_dir)
args.monitor_dir = monitor_dir
# Setup drobbox
if args.c:
package_parent_folder_monitored_directory = os.path.join(os.sep,*package_root_monitored_directory.split(os.sep)[:-1])
# args.dbx_dir = os.sep + os.path.relpath(args.monitor_dir, package_parent_folder_monitored_directory)
args.dbx_dir = os.sep + os.path.relpath(args.d, package_parent_folder_monitored_directory)
token_file = os.path.join(this_file_dir_local, 'dropboxtoken.tok')
assert os.path.exists(token_file)
dbx = db.get_dropbox_client(token_file)
ignored_keys = ['chkpt_dir', 'sensitivities', 'sens_inputs', '_weight_update_scale']
ignored_keys.extend([k for k in algorithm_states[0].keys() if k[0] == '_'])
ignored_keys = set(ignored_keys)
for s in algorithm_states:
if s['optimize_sigma']:
ignored_keys.add('sigma')
break
# Monitoring loop
while True:
# Prepare data
print("Preparing data...")
keys_to_monitor = get_keys_to_monitor(stats_list)
invert_signs(stats_list, keys_to_monitor)
sub_into_lists(stats_list, keys_to_monitor)
# Find groups of algorithms
groups = get_equal_dicts(algorithm_states, ignored_keys=ignored_keys)
print_group_info(algorithm_states, groups, directory=args.monitor_dir)
# Plot
print("Creating and saving plots...")
# try:
create_plots(args, stats_list, keys_to_monitor, groups)
# except:
# pass
# Upload results to dropbox
if args.c:
# db.upload_directory(dbx, args.monitor_dir, args.dbx_dir)
db.upload_directory(dbx, args.d, args.dbx_dir, upload_older_files=False)
# Break condition
if wait_for_updates(args, last_refresh, get_max_chkpt_int(algorithm_states), mtimes):
return
# Load data
print()
last_refresh = time.time()
algorithm_states, stats_list = get_data(timeout=args.t, old_mtimes=mtimes, old_states=algorithm_states, old_stats=stats_list)
checkpoint_directories = get_checkpoint_directories(args.d)
mtimes = fs.get_modified_times(checkpoint_directories, 'state-dict-algorithm.pkl')
if __name__ == '__main__':
# Parse inputs
parser = argparse.ArgumentParser(description='Monitorer')
parser.add_argument('-d', type=str, metavar='--directory', help='The directory of checkpoints to monitor.')
parser.add_argument('-i', type=str, metavar='--identifier', help='The identifier of the checkpoints to monitor.')
parser.add_argument('-t', type=int, metavar='--timeout', default=4000, help='If no files are modified during a period of timeout minutes, monitoring is stopped.')
parser.add_argument('-c', action='store_true', help='Copying of monitor directory to dropbox.')
parser.add_argument('-s', action='store_true', help='Silent mode.')
args = parser.parse_args()
if args.s:
sys.stdout = open(os.devnull, 'w')
assert args.d or args.i, "Must specify directory or identifier of checkpoints to monitor"
# Colormap
# plt.rcParams['image.cmap'] = 'magma'
# plt.rcParams['image.cmap'] = 'inferno'
# plt.rcParams['image.cmap'] = 'plasma'
plt.rcParams['image.cmap'] = 'viridis'
try:
monitor(args)
except KeyboardInterrupt:
print("\nMonitoring halted by user KeyboardInterrupt")
"""
SSHFS
sshfs s<EMAIL>@login.<EMAIL>:/zhome/c2/b/86488 ~/mnt
LINUX
python monitor.py -d ~/mnt/Documents/es-rl/experiments/checkpoints/E001-SM/ -c
MAC
python monitor.py -d /Users/Jakob/mnt/Documents/es-rl/experiments/checkpoints/E001-SM/ -c
python monitor.py -d sftp://s132315@login.hpc.dtu.dk/zhome/c2/b/86488/Documents/es-rl/experiments/checkpoints/E001-SM
""" | [
"utils.db.upload_directory",
"time.sleep",
"numpy.equal",
"utils.plotting.timeseries_median",
"utils.misc.get_equal_dicts",
"os.path.exists",
"utils.filesystem.get_parent",
"argparse.ArgumentParser",
"utils.plotting.timeseries_mean_grouped",
"numpy.where",
"data_analysis.invert_signs",
"matplo... | [((94, 127), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (117, 127), False, 'import warnings\n'), ((169, 183), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (176, 183), True, 'import matplotlib as mpl\n'), ((7860, 7894), 'data_analysis.get_checkpoint_directories', 'get_checkpoint_directories', (['args.d'], {}), '(args.d)\n', (7886, 7894), False, 'from data_analysis import print_group_info, get_best, get_max_chkpt_int, invert_signs, get_checkpoint_directories\n'), ((7908, 7981), 'utils.filesystem.get_modified_times', 'fs.get_modified_times', (['checkpoint_directories', '"""state-dict-algorithm.pkl"""'], {}), "(checkpoint_directories, 'state-dict-algorithm.pkl')\n", (7929, 7981), True, 'import utils.filesystem as fs\n'), ((8653, 8687), 'data_analysis.get_checkpoint_directories', 'get_checkpoint_directories', (['args.d'], {}), '(args.d)\n', (8679, 8687), False, 'from data_analysis import print_group_info, get_best, get_max_chkpt_int, invert_signs, get_checkpoint_directories\n'), ((10143, 10154), 'time.time', 'time.time', ([], {}), '()\n', (10152, 10154), False, 'import time\n'), ((10354, 10397), 'utils.filesystem.get_parent', 'fs.get_parent', (['this_file_dir_local', '"""es-rl"""'], {}), "(this_file_dir_local, 'es-rl')\n", (10367, 10397), True, 'import utils.filesystem as fs\n'), ((10768, 10798), 'utils.filesystem.get_parent', 'fs.get_parent', (['args.d', '"""es-rl"""'], {}), "(args.d, 'es-rl')\n", (10781, 10798), True, 'import utils.filesystem as fs\n'), ((10870, 10881), 'time.time', 'time.time', ([], {}), '()\n', (10879, 10881), False, 'import time\n'), ((10911, 10945), 'data_analysis.get_checkpoint_directories', 'get_checkpoint_directories', (['args.d'], {}), '(args.d)\n', (10937, 10945), False, 'from data_analysis import print_group_info, get_best, get_max_chkpt_int, invert_signs, get_checkpoint_directories\n'), ((10959, 11032), 'utils.filesystem.get_modified_times', 'fs.get_modified_times', (['checkpoint_directories', '"""state-dict-algorithm.pkl"""'], {}), "(checkpoint_directories, 'state-dict-algorithm.pkl')\n", (10980, 11032), True, 'import utils.filesystem as fs\n'), ((11290, 11324), 'os.path.join', 'os.path.join', (['args.d', '"""monitoring"""'], {}), "(args.d, 'monitoring')\n", (11302, 11324), False, 'import os\n'), ((13698, 13746), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Monitorer"""'}), "(description='Monitorer')\n", (13721, 13746), False, 'import argparse\n'), ((1183, 1238), 'utils.filesystem.get_modified_times', 'fs.get_modified_times', (['checkpoint_directories', 'filename'], {}), '(checkpoint_directories, filename)\n', (1204, 1238), True, 'import utils.filesystem as fs\n'), ((4158, 4237), 'utils.plotting.timeseries', 'plot.timeseries', (['list_of_genera', 'list_of_series'], {'xlabel': '"""generations"""', 'ylabel': 'k'}), "(list_of_genera, list_of_series, xlabel='generations', ylabel=k)\n", (4173, 4237), True, 'import utils.plotting as plot\n'), ((4342, 4353), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4351, 4353), True, 'import matplotlib.pyplot as plt\n'), ((4363, 4460), 'utils.plotting.timeseries_distribution', 'plot.timeseries_distribution', (['list_of_genera', 'list_of_series'], {'xlabel': '"""generations"""', 'ylabel': 'k'}), "(list_of_genera, list_of_series, xlabel=\n 'generations', ylabel=k)\n", (4391, 4460), True, 'import utils.plotting as plot\n'), ((4566, 4577), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4575, 4577), True, 'import matplotlib.pyplot as plt\n'), ((4587, 4677), 'utils.plotting.timeseries_median', 'plot.timeseries_median', (['list_of_genera', 'list_of_series'], {'xlabel': '"""generations"""', 'ylabel': 'k'}), "(list_of_genera, list_of_series, xlabel='generations',\n ylabel=k)\n", (4609, 4677), True, 'import utils.plotting as plot\n'), ((4778, 4789), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4787, 4789), True, 'import matplotlib.pyplot as plt\n'), ((5005, 5016), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5014, 5016), True, 'import matplotlib.pyplot as plt\n'), ((5112, 5145), 'utils.misc.length_of_longest', 'length_of_longest', (['list_of_series'], {}), '(list_of_series)\n', (5129, 5145), False, 'from utils.misc import get_equal_dicts, length_of_longest\n'), ((5427, 5562), 'utils.plotting.timeseries_mean_grouped', 'plot.timeseries_mean_grouped', (['list_of_longest_genera', 'list_of_longest_series', 'groups_longest_series'], {'xlabel': '"""generations"""', 'ylabel': 'k'}), "(list_of_longest_genera, list_of_longest_series,\n groups_longest_series, xlabel='generations', ylabel=k)\n", (5455, 5562), True, 'import utils.plotting as plot\n'), ((5676, 5687), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5685, 5687), True, 'import matplotlib.pyplot as plt\n'), ((10057, 10082), 'time.sleep', 'time.sleep', (['info_interval'], {}), '(info_interval)\n', (10067, 10082), False, 'import time\n'), ((10218, 10243), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (10233, 10243), False, 'import os\n'), ((10466, 10540), 'os.path.join', 'os.path.join', (['package_root_this_file', '"""experiments"""', '"""checkpoints"""', 'args.i'], {}), "(package_root_this_file, 'experiments', 'checkpoints', args.i)\n", (10478, 10540), False, 'import os\n'), ((10680, 10702), 'os.path.exists', 'os.path.exists', (['args.d'], {}), '(args.d)\n', (10694, 10702), False, 'import os\n'), ((10712, 10728), 'os.mkdir', 'os.mkdir', (['args.d'], {}), '(args.d)\n', (10720, 10728), False, 'import os\n'), ((11336, 11363), 'os.path.exists', 'os.path.exists', (['monitor_dir'], {}), '(monitor_dir)\n', (11350, 11363), False, 'import os\n'), ((11373, 11394), 'os.mkdir', 'os.mkdir', (['monitor_dir'], {}), '(monitor_dir)\n', (11381, 11394), False, 'import os\n'), ((11823, 11876), 'os.path.join', 'os.path.join', (['this_file_dir_local', '"""dropboxtoken.tok"""'], {}), "(this_file_dir_local, 'dropboxtoken.tok')\n", (11835, 11876), False, 'import os\n'), ((11892, 11918), 'os.path.exists', 'os.path.exists', (['token_file'], {}), '(token_file)\n', (11906, 11918), False, 'import os\n'), ((11933, 11966), 'utils.db.get_dropbox_client', 'db.get_dropbox_client', (['token_file'], {}), '(token_file)\n', (11954, 11966), True, 'import utils.db as db\n'), ((12456, 12497), 'data_analysis.invert_signs', 'invert_signs', (['stats_list', 'keys_to_monitor'], {}), '(stats_list, keys_to_monitor)\n', (12468, 12497), False, 'from data_analysis import print_group_info, get_best, get_max_chkpt_int, invert_signs, get_checkpoint_directories\n'), ((12603, 12663), 'utils.misc.get_equal_dicts', 'get_equal_dicts', (['algorithm_states'], {'ignored_keys': 'ignored_keys'}), '(algorithm_states, ignored_keys=ignored_keys)\n', (12618, 12663), False, 'from utils.misc import get_equal_dicts, length_of_longest\n'), ((12672, 12742), 'data_analysis.print_group_info', 'print_group_info', (['algorithm_states', 'groups'], {'directory': 'args.monitor_dir'}), '(algorithm_states, groups, directory=args.monitor_dir)\n', (12688, 12742), False, 'from data_analysis import print_group_info, get_best, get_max_chkpt_int, invert_signs, get_checkpoint_directories\n'), ((13332, 13343), 'time.time', 'time.time', ([], {}), '()\n', (13341, 13343), False, 'import time\n'), ((13511, 13545), 'data_analysis.get_checkpoint_directories', 'get_checkpoint_directories', (['args.d'], {}), '(args.d)\n', (13537, 13545), False, 'from data_analysis import print_group_info, get_best, get_max_chkpt_int, invert_signs, get_checkpoint_directories\n'), ((13563, 13636), 'utils.filesystem.get_modified_times', 'fs.get_modified_times', (['checkpoint_directories', '"""state-dict-algorithm.pkl"""'], {}), "(checkpoint_directories, 'state-dict-algorithm.pkl')\n", (13584, 13636), True, 'import utils.filesystem as fs\n'), ((1571, 1599), 'numpy.equal', 'np.equal', (['old_mtimes', 'mtimes'], {}), '(old_mtimes, mtimes)\n', (1579, 1599), True, 'import numpy as np\n'), ((1761, 1781), 'numpy.where', 'np.where', (['is_changed'], {}), '(is_changed)\n', (1769, 1781), True, 'import numpy as np\n'), ((4258, 4311), 'os.path.join', 'os.path.join', (['args.monitor_dir', "(k + '-all-series.pdf')"], {}), "(args.monitor_dir, k + '-all-series.pdf')\n", (4270, 4311), False, 'import os\n'), ((4476, 4535), 'os.path.join', 'os.path.join', (['args.monitor_dir', "(k + '-all-distribution.pdf')"], {}), "(args.monitor_dir, k + '-all-distribution.pdf')\n", (4488, 4535), False, 'import os\n'), ((4694, 4747), 'os.path.join', 'os.path.join', (['args.monitor_dir', "(k + '-all-median.pdf')"], {}), "(args.monitor_dir, k + '-all-median.pdf')\n", (4706, 4747), False, 'import os\n'), ((4909, 4974), 'os.path.join', 'os.path.join', (['args.monitor_dir', "(k + '-all-final-distribution.pdf')"], {}), "(args.monitor_dir, k + '-all-final-distribution.pdf')\n", (4921, 4974), False, 'import os\n'), ((5579, 5645), 'os.path.join', 'os.path.join', (['args.monitor_dir', "(k + '-all-series-mean-sd' + '.pdf')"], {}), "(args.monitor_dir, k + '-all-series-mean-sd' + '.pdf')\n", (5591, 5645), False, 'import os\n'), ((9185, 9219), 'data_analysis.get_checkpoint_directories', 'get_checkpoint_directories', (['args.d'], {}), '(args.d)\n', (9211, 9219), False, 'from data_analysis import print_group_info, get_best, get_max_chkpt_int, invert_signs, get_checkpoint_directories\n'), ((10554, 10575), 'os.path.isabs', 'os.path.isabs', (['args.d'], {}), '(args.d)\n', (10567, 10575), False, 'import os\n'), ((10594, 10668), 'os.path.join', 'os.path.join', (['package_root_this_file', '"""experiments"""', '"""checkpoints"""', 'args.d'], {}), "(package_root_this_file, 'experiments', 'checkpoints', args.d)\n", (10606, 10668), False, 'import os\n'), ((11735, 11801), 'os.path.relpath', 'os.path.relpath', (['args.d', 'package_parent_folder_monitored_directory'], {}), '(args.d, package_parent_folder_monitored_directory)\n', (11750, 11801), False, 'import os\n'), ((13060, 13132), 'utils.db.upload_directory', 'db.upload_directory', (['dbx', 'args.d', 'args.dbx_dir'], {'upload_older_files': '(False)'}), '(dbx, args.d, args.dbx_dir, upload_older_files=False)\n', (13079, 13132), True, 'import utils.db as db\n'), ((13208, 13243), 'data_analysis.get_max_chkpt_int', 'get_max_chkpt_int', (['algorithm_states'], {}), '(algorithm_states)\n', (13225, 13243), False, 'from data_analysis import print_group_info, get_best, get_max_chkpt_int, invert_signs, get_checkpoint_directories\n'), ((2596, 2629), 'os.path.join', 'os.path.join', (['chkpt_dir', 'filename'], {}), '(chkpt_dir, filename)\n', (2608, 2629), False, 'import os\n'), ((2671, 2707), 'os.path.join', 'os.path.join', (['chkpt_dir', '"""stats.csv"""'], {}), "(chkpt_dir, 'stats.csv')\n", (2683, 2707), False, 'import os\n'), ((5831, 5852), 'numpy.where', 'np.where', (['(groups == g)'], {}), '(groups == g)\n', (5839, 5852), True, 'import numpy as np\n'), ((6156, 6235), 'utils.plotting.timeseries', 'plot.timeseries', (['list_of_genera', 'list_of_series'], {'xlabel': '"""generations"""', 'ylabel': 'k'}), "(list_of_genera, list_of_series, xlabel='generations', ylabel=k)\n", (6171, 6235), True, 'import utils.plotting as plot\n'), ((6379, 6390), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6388, 6390), True, 'import matplotlib.pyplot as plt\n'), ((6412, 6509), 'utils.plotting.timeseries_distribution', 'plot.timeseries_distribution', (['list_of_genera', 'list_of_series'], {'xlabel': '"""generations"""', 'ylabel': 'k'}), "(list_of_genera, list_of_series, xlabel=\n 'generations', ylabel=k)\n", (6440, 6509), True, 'import utils.plotting as plot\n'), ((6654, 6665), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6663, 6665), True, 'import matplotlib.pyplot as plt\n'), ((6687, 6777), 'utils.plotting.timeseries_median', 'plot.timeseries_median', (['list_of_genera', 'list_of_series'], {'xlabel': '"""generations"""', 'ylabel': 'k'}), "(list_of_genera, list_of_series, xlabel='generations',\n ylabel=k)\n", (6709, 6777), True, 'import utils.plotting as plot\n'), ((6917, 6928), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6926, 6928), True, 'import matplotlib.pyplot as plt\n'), ((7195, 7206), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7204, 7206), True, 'import matplotlib.pyplot as plt\n'), ((9826, 9837), 'time.time', 'time.time', ([], {}), '()\n', (9835, 9837), False, 'import time\n'), ((6268, 6336), 'os.path.join', 'os.path.join', (['args.monitor_dir', "(k + '-group-' + gstr + '-series.pdf')"], {}), "(args.monitor_dir, k + '-group-' + gstr + '-series.pdf')\n", (6280, 6336), False, 'import os\n'), ((6537, 6611), 'os.path.join', 'os.path.join', (['args.monitor_dir', "(k + '-group-' + gstr + '-distribution.pdf')"], {}), "(args.monitor_dir, k + '-group-' + gstr + '-distribution.pdf')\n", (6549, 6611), False, 'import os\n'), ((6806, 6874), 'os.path.join', 'os.path.join', (['args.monitor_dir', "(k + '-group-' + gstr + '-median.pdf')"], {}), "(args.monitor_dir, k + '-group-' + gstr + '-median.pdf')\n", (6818, 6874), False, 'import os\n'), ((7072, 7157), 'os.path.join', 'os.path.join', (['args.monitor_dir', "(k + '-group-' + gstr + '-final-distribution.pdf')"], {}), "(args.monitor_dir, k + '-group-' + gstr + '-final-distribution.pdf'\n )\n", (7084, 7157), False, 'import os\n')] |
import tensorrt as trt
import pycuda.driver as cuda
import cv2
import numpy as np
class TrtPacknet(object):
"""TrtPacknet class encapsulates things needed to run TRT Packnet (depth inference)."""
def _load_engine(self):
TRTbin = 'trt_%s.trt' % self.model
with open(TRTbin, 'rb') as f, trt.Runtime(self.trt_logger) as runtime:
return runtime.deserialize_cuda_engine(f.read())
def _allocate_buffers(self):
host_inputs, host_outputs, cuda_inputs, cuda_outputs, bindings = \
[], [], [], [], []
for binding in self.engine:
size = trt.volume(self.engine.get_binding_shape(binding)) * \
self.engine.max_batch_size
host_mem = cuda.pagelocked_empty(size, np.float32)
cuda_mem = cuda.mem_alloc(host_mem.nbytes)
bindings.append(int(cuda_mem))
if self.engine.binding_is_input(binding):
host_inputs.append(host_mem)
cuda_inputs.append(cuda_mem)
else:
host_outputs.append(host_mem)
cuda_outputs.append(cuda_mem)
return host_inputs, host_outputs, cuda_inputs, cuda_outputs, bindings
def __init__(self, model, input_shape=(288, 384), cuda_ctx=None):
"""Initialize TensorRT plugins, engine and conetxt."""
self.model = model
self.input_shape = input_shape
self.cuda_ctx = cuda_ctx
if self.cuda_ctx:
self.cuda_ctx.push()
self.trt_logger = trt.Logger(trt.Logger.INFO)
self.engine = self._load_engine()
try:
self.context = self.engine.create_execution_context()
self.stream = cuda.Stream()
self.host_inputs, self.host_outputs, self.cuda_inputs, self.cuda_outputs, self.bindings = self._allocate_buffers()
except Exception as e:
raise RuntimeError('fail to allocate CUDA resources') from e
finally:
if self.cuda_ctx:
self.cuda_ctx.pop()
def __del__(self):
"""Free CUDA memories and context."""
del self.cuda_outputs
del self.cuda_inputs
del self.stream
if __name__ == "__main__":
import pycuda.autoinit # This is needed for initializing CUDA driver
trt_packnet = TrtPacknet("packnet") | [
"pycuda.driver.mem_alloc",
"pycuda.driver.pagelocked_empty",
"pycuda.driver.Stream",
"tensorrt.Logger",
"tensorrt.Runtime"
] | [((1527, 1554), 'tensorrt.Logger', 'trt.Logger', (['trt.Logger.INFO'], {}), '(trt.Logger.INFO)\n', (1537, 1554), True, 'import tensorrt as trt\n'), ((312, 340), 'tensorrt.Runtime', 'trt.Runtime', (['self.trt_logger'], {}), '(self.trt_logger)\n', (323, 340), True, 'import tensorrt as trt\n'), ((734, 773), 'pycuda.driver.pagelocked_empty', 'cuda.pagelocked_empty', (['size', 'np.float32'], {}), '(size, np.float32)\n', (755, 773), True, 'import pycuda.driver as cuda\n'), ((797, 828), 'pycuda.driver.mem_alloc', 'cuda.mem_alloc', (['host_mem.nbytes'], {}), '(host_mem.nbytes)\n', (811, 828), True, 'import pycuda.driver as cuda\n'), ((1703, 1716), 'pycuda.driver.Stream', 'cuda.Stream', ([], {}), '()\n', (1714, 1716), True, 'import pycuda.driver as cuda\n')] |
# analyzing each point forecast and selecting the best, day by day, saving forecasts and making final forecast
import os
import sys
import datetime
import logging
import logging.handlers as handlers
import json
import itertools as it
import pandas as pd
import numpy as np
# open local settings
with open('./settings.json') as local_json_file:
local_submodule_settings = json.loads(local_json_file.read())
local_json_file.close()
# log setup
current_script_name = os.path.basename(__file__).split('.')[0]
log_path_filename = ''.join([local_submodule_settings['log_path'], current_script_name, '.log'])
logging.basicConfig(filename=log_path_filename, level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(name)s %(message)s')
logger = logging.getLogger(__name__)
logHandler = handlers.RotatingFileHandler(log_path_filename, maxBytes=10485760, backupCount=5)
logger.addHandler(logHandler)
# load custom libraries
sys.path.insert(1, local_submodule_settings['custom_library_path'])
from save_forecast_and_make_submission import save_forecast_and_submission
from stochastic_model_obtain_results import stochastic_simulation_results_analysis
class explore_day_by_day_results_and_generate_submission:
def run(self, submission_name, local_ergs_settings):
try:
print('\nstarting the granular day_by_day ts_by_ts point forecast selection approach')
# first check the stage, if evaluation stage, this means that no MSE are available, warning about this
if local_ergs_settings['competition_stage'] != 'submitting_after_June_1th_using_1913days':
print('settings indicate that the final stage is now in progress')
print('so there not available real MSE for comparison')
print('the last saved data will be used and allow to continue..')
print(''.join(['\x1b[0;2;41m',
'but be careful with this submission and consider other way to make the final submit',
'\x1b[0m']))
# loading the forecasts
first_model_forecast = np.load(''.join([local_ergs_settings['train_data_path'],
'first_model_forecast_data.npy']))
second_model_forecast = np.load(''.join([local_ergs_settings['train_data_path'],
'second_model_forecast_data.npy']))
third_model_forecast = np.load(''.join([local_ergs_settings['train_data_path'],
'third_model_forecast_data.npy']))
fourth_model_forecast = np.load(''.join([local_ergs_settings['train_data_path'],
'fourth_model_forecast_data.npy']))
# this forecast has the shape=30490, 28
fifth_model_forecast_30490_28 = np.load(''.join([local_ergs_settings['train_data_path'],
'fifth_model_forecast_data.npy']))
fifth_model_forecast = np.zeros(shape=(60980, 28), dtype=np.dtype('float32'))
fifth_model_forecast[0: 30490, :] = fifth_model_forecast_30490_28
sixth_model_forecast = np.load(''.join([local_ergs_settings['train_data_path'],
'sixth_model_forecast_data.npy']))
seventh_model_forecast = np.load(''.join([local_ergs_settings['train_data_path'],
'seventh_model_forecast_data.npy']))
eighth_model_forecast = np.load(''.join([local_ergs_settings['train_data_path'],
'eighth_model_nearest_neighbor_forecast_data.npy']))
ninth_model_forecast = np.load(''.join([local_ergs_settings['train_data_path'],
'ninth_model_random_average_simulation_forecast_data.npy']))
best_mse_model_forecast = np.load(''.join([local_ergs_settings['train_data_path'],
'mse_based_best_ts_forecast.npy']))
# day by day comparison
with open(''.join([local_ergs_settings['hyperparameters_path'],
'organic_in_block_time_serie_based_model_hyperparameters.json'])) \
as local_r_json_file:
local_model_ergs_hyperparameters = json.loads(local_r_json_file.read())
local_r_json_file.close()
nof_ts = local_ergs_settings['number_of_time_series']
local_forecast_horizon_days = local_ergs_settings['forecast_horizon_days']
best_lower_error_ts_day_by_day_y_pred = np.zeros(shape=(nof_ts, local_forecast_horizon_days),
dtype=np.dtype('float32'))
count_best_first_model, count_best_second_model, count_best_third_model, count_best_fourth_model,\
count_best_fifth_model, count_best_sixth_model, count_best_seventh_model, count_best_eighth_model,\
count_best_ninth_model, count_best_mse_model = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
ts_model_mse = []
# accessing ground_truth data and rechecking stage of competition
local_ergs_raw_data_filename = 'sales_train_evaluation.csv'
local_ergs_raw_unit_sales = pd.read_csv(''.join([local_ergs_settings['raw_data_path'],
local_ergs_raw_data_filename]))
print('raw sales data accessed (day_by_day_approach_best_lower_error_model results evaluation)')
# extract data and check dimensions
local_ergs_raw_unit_sales = local_ergs_raw_unit_sales.iloc[:, 6:].values
local_max_selling_time = np.shape(local_ergs_raw_unit_sales)[1]
local_settings_max_selling_time = local_ergs_settings['max_selling_time']
if local_settings_max_selling_time + 28 <= local_max_selling_time:
local_ergs_raw_unit_sales_ground_truth = local_ergs_raw_unit_sales
print('ground_truth data obtained')
print('length raw data ground truth:', local_ergs_raw_unit_sales_ground_truth.shape[1])
local_ergs_raw_unit_sales = local_ergs_raw_unit_sales[:, :local_settings_max_selling_time]
print('length raw data for training:', local_ergs_raw_unit_sales.shape[1])
elif local_max_selling_time != local_settings_max_selling_time:
print("settings doesn't match data dimensions, it must be rechecked before continue"
"(_day_by_day_best_lower_error_model_module)")
logger.info(''.join(['\n', datetime.datetime.now().strftime("%d.%b %Y %H:%M:%S"),
' data dimensions does not match settings']))
return False
else:
if local_ergs_settings['competition_stage'] != 'submitting_after_June_1th_using_1941days':
print(''.join(['\x1b[0;2;41m', 'Warning', '\x1b[0m']))
print('please check: forecast horizon days will be included within training data')
print('It was expected that the last 28 days were not included..')
print('to avoid overfitting')
elif local_ergs_settings['competition_stage'] == 'submitting_after_June_1th_using_1941days':
print(''.join(['\x1b[0;2;41m', 'Straight end of the competition', '\x1b[0m']))
print('settings indicate that this is the last stage!')
print('caution: take in consideration that evaluations in this point are not useful, '
'because will be made using the last data (the same used in training)')
# will only use the last data available
local_ergs_raw_unit_sales_ground_truth = \
local_ergs_raw_unit_sales_ground_truth[:, -local_forecast_horizon_days:]
# very granular approach
# iterating in each point_forecast, calculating error and selecting best lower error model forecast
for time_serie_index, day_index in it.product(range(nof_ts), range(local_forecast_horizon_days)):
# acquiring day_by_day data
ground_truth_ts_day = local_ergs_raw_unit_sales_ground_truth[time_serie_index, day_index]
first_model_ts_day = first_model_forecast[time_serie_index, day_index]
second_model_ts_day = second_model_forecast[time_serie_index, day_index]
third_model_ts_day = third_model_forecast[time_serie_index, day_index]
fourth_model_ts_day = fourth_model_forecast[time_serie_index, day_index]
fifth_model_ts_day = fifth_model_forecast[time_serie_index, day_index]
sixth_model_ts_day = sixth_model_forecast[time_serie_index, day_index]
seventh_model_ts_day = seventh_model_forecast[time_serie_index, day_index]
eighth_model_ts_day = eighth_model_forecast[time_serie_index, day_index].astype(np.dtype('float32'))
ninth_model_ts_day = ninth_model_forecast[time_serie_index, day_index]
best_mse_model_ts_day = best_mse_model_forecast[time_serie_index, day_index]
# calculating error
first_model_ts_day_error = np.abs(ground_truth_ts_day - first_model_ts_day)
second_model_ts_day_error = np.abs(ground_truth_ts_day - second_model_ts_day)
third_model_ts_day_error = np.abs(ground_truth_ts_day - third_model_ts_day)
fourth_model_ts_day_error = np.abs(ground_truth_ts_day - fourth_model_ts_day)
fifth_model_ts_day_error = np.abs(ground_truth_ts_day - fifth_model_ts_day)
sixth_model_ts_day_error = np.abs(ground_truth_ts_day - sixth_model_ts_day)
seventh_model_ts_day_error = np.abs(ground_truth_ts_day - seventh_model_ts_day)
eighth_model_ts_day_error = np.abs(ground_truth_ts_day - eighth_model_ts_day)
ninth_model_ts_day_error = np.abs(ground_truth_ts_day - ninth_model_ts_day)
best_mse_model_ts_day_error = np.abs(ground_truth_ts_day - best_mse_model_ts_day)
# selecting best point ts_day forecast
if first_model_ts_day_error <= second_model_ts_day_error and \
first_model_ts_day_error <= third_model_ts_day_error \
and first_model_ts_day_error <= fourth_model_ts_day_error \
and first_model_ts_day_error <= fifth_model_ts_day_error\
and first_model_ts_day_error <= sixth_model_ts_day_error \
and first_model_ts_day_error <= seventh_model_ts_day_error\
and first_model_ts_day_error <= eighth_model_ts_day_error \
and first_model_ts_day_error <= ninth_model_ts_day_error \
and first_model_ts_day_error <= best_mse_model_ts_day_error:
best_lower_error_ts_day_by_day_y_pred[time_serie_index, day_index] = first_model_ts_day
count_best_first_model += 1
ts_model_mse.append([time_serie_index, int(1), first_model_ts_day_error])
# elif best_mse_model_ts_day_error <= first_model_ts_day_error \
# and best_mse_model_ts_day_error <= second_model_ts_day_error \
# and best_mse_model_ts_day_error <= third_model_ts_day_error \
# and best_mse_model_ts_day_error <= fourth_model_ts_day_error\
# and best_mse_model_ts_day_error <= fifth_model_ts_day_error \
# and best_mse_model_ts_day_error <= sixth_model_ts_day_error\
# and best_mse_model_ts_day_error <= seventh_model_ts_day_error \
# and best_mse_model_ts_day_error <= eighth_model_ts_day_error\
# and best_mse_model_ts_day_error <= ninth_model_ts_day_error:
# best_lower_error_ts_day_by_day_y_pred[time_serie_index, day_index] = best_mse_model_ts_day
# count_best_mse_model += 1
# ts_model_mse.append([time_serie_index, int(10), best_mse_model_ts_day_error])
elif second_model_ts_day_error <= first_model_ts_day_error \
and second_model_ts_day_error <= third_model_ts_day_error \
and second_model_ts_day_error <= fourth_model_ts_day_error \
and second_model_ts_day_error <= fifth_model_ts_day_error\
and second_model_ts_day_error <= sixth_model_ts_day_error \
and second_model_ts_day_error <= seventh_model_ts_day_error\
and second_model_ts_day_error <= eighth_model_ts_day_error \
and second_model_ts_day_error <= ninth_model_ts_day_error\
and second_model_ts_day_error <= best_mse_model_ts_day_error:
best_lower_error_ts_day_by_day_y_pred[time_serie_index, day_index] = second_model_ts_day
count_best_second_model += 1
ts_model_mse.append([time_serie_index, int(2), second_model_ts_day_error])
elif third_model_ts_day_error <= first_model_ts_day_error \
and third_model_ts_day_error <= second_model_ts_day_error \
and third_model_ts_day_error <= fourth_model_ts_day_error \
and third_model_ts_day_error <= fifth_model_ts_day_error\
and third_model_ts_day_error <= sixth_model_ts_day_error \
and third_model_ts_day_error <= seventh_model_ts_day_error\
and third_model_ts_day_error <= eighth_model_ts_day_error \
and third_model_ts_day_error <= ninth_model_ts_day_error\
and third_model_ts_day_error <= best_mse_model_ts_day_error:
best_lower_error_ts_day_by_day_y_pred[time_serie_index, day_index] = third_model_ts_day
count_best_third_model += 1
ts_model_mse.append([time_serie_index, int(3), third_model_ts_day_error])
# elif fourth_model_ts_day_error <= first_model_ts_day_error \
# and fourth_model_ts_day_error <= second_model_ts_day_error \
# and fourth_model_ts_day_error <= third_model_ts_day_error \
# and fourth_model_ts_day_error <= fifth_model_ts_day_error\
# and fourth_model_ts_day_error <= sixth_model_ts_day_error \
# and fourth_model_ts_day_error <= seventh_model_ts_day_error\
# and fourth_model_ts_day_error <= eighth_model_ts_day_error \
# and fourth_model_ts_day_error <= ninth_model_ts_day_error\
# and fourth_model_ts_day_error <= best_mse_model_ts_day_error:
# best_lower_error_ts_day_by_day_y_pred[time_serie_index, day_index] = fourth_model_ts_day
# count_best_fourth_model += 1
# ts_model_mse.append([time_serie_index, int(4), fourth_model_ts_day_error])
elif fifth_model_ts_day_error <= first_model_ts_day_error \
and fifth_model_ts_day_error <= second_model_ts_day_error \
and fifth_model_ts_day_error <= third_model_ts_day_error \
and fifth_model_ts_day_error <= fourth_model_ts_day_error\
and fifth_model_ts_day_error <= sixth_model_ts_day_error \
and fifth_model_ts_day_error <= seventh_model_ts_day_error\
and fifth_model_ts_day_error <= eighth_model_ts_day_error \
and fifth_model_ts_day_error <= ninth_model_ts_day_error\
and fifth_model_ts_day_error <= best_mse_model_ts_day_error:
best_lower_error_ts_day_by_day_y_pred[time_serie_index, day_index] = fifth_model_ts_day
count_best_fifth_model += 1
ts_model_mse.append([time_serie_index, int(5), fifth_model_ts_day_error])
elif sixth_model_ts_day_error <= first_model_ts_day_error \
and sixth_model_ts_day_error <= second_model_ts_day_error \
and sixth_model_ts_day_error <= third_model_ts_day_error \
and sixth_model_ts_day_error <= fourth_model_ts_day_error\
and sixth_model_ts_day_error <= fifth_model_ts_day_error \
and sixth_model_ts_day_error <= seventh_model_ts_day_error\
and sixth_model_ts_day_error <= eighth_model_ts_day_error \
and sixth_model_ts_day_error <= ninth_model_ts_day_error\
and sixth_model_ts_day_error <= best_mse_model_ts_day_error:
best_lower_error_ts_day_by_day_y_pred[time_serie_index, day_index] = sixth_model_ts_day
count_best_sixth_model += 1
ts_model_mse.append([time_serie_index, int(6), sixth_model_ts_day_error])
elif seventh_model_ts_day_error <= first_model_ts_day_error \
and seventh_model_ts_day_error <= second_model_ts_day_error \
and seventh_model_ts_day_error <= third_model_ts_day_error \
and seventh_model_ts_day_error <= fourth_model_ts_day_error\
and seventh_model_ts_day_error <= fifth_model_ts_day_error \
and seventh_model_ts_day_error <= sixth_model_ts_day_error\
and seventh_model_ts_day_error <= eighth_model_ts_day_error \
and seventh_model_ts_day_error <= ninth_model_ts_day_error\
and seventh_model_ts_day_error <= best_mse_model_ts_day_error:
best_lower_error_ts_day_by_day_y_pred[time_serie_index, day_index] = seventh_model_ts_day
count_best_seventh_model += 1
ts_model_mse.append([time_serie_index, int(7), seventh_model_ts_day_error])
elif ninth_model_ts_day_error <= first_model_ts_day_error \
and ninth_model_ts_day_error <= second_model_ts_day_error \
and ninth_model_ts_day_error <= third_model_ts_day_error \
and ninth_model_ts_day_error <= fourth_model_ts_day_error\
and ninth_model_ts_day_error <= fifth_model_ts_day_error \
and ninth_model_ts_day_error <= sixth_model_ts_day_error\
and ninth_model_ts_day_error <= seventh_model_ts_day_error \
and ninth_model_ts_day_error <= eighth_model_ts_day_error\
and ninth_model_ts_day_error <= best_mse_model_ts_day_error:
best_lower_error_ts_day_by_day_y_pred[time_serie_index, day_index] = ninth_model_ts_day
count_best_ninth_model += 1
ts_model_mse.append([time_serie_index, int(9), ninth_model_ts_day_error])
else:
best_lower_error_ts_day_by_day_y_pred[time_serie_index, day_index] = eighth_model_ts_day
count_best_eighth_model += 1
ts_model_mse.append([time_serie_index, int(8), eighth_model_ts_day_error])
# finally reporting the results
print('it was used ', count_best_first_model, ' ts day_by_day forecasts from first model')
print('it was used ', count_best_second_model, ' ts day_by_day forecasts from second model')
print('it was used ', count_best_third_model, ' ts day_by_day forecasts from third model')
print('it was used ', count_best_fourth_model, ' ts day_by_day forecasts from fourth model')
print('it was used ', count_best_fifth_model, ' ts day_by_day forecasts from fifth model')
print('it was used ', count_best_sixth_model, ' ts day_by_day forecasts from sixth model')
print('it was used ', count_best_seventh_model, ' ts day_by_day forecasts from seventh model')
print('it was used ', count_best_eighth_model, ' ts day_by_day forecasts from eighth model')
print('it was used ', count_best_ninth_model, ' ts day_by_day forecasts from ninth model')
print('it was used ', count_best_mse_model, ' ts day_by_day forecasts from best_mse (tenth) model')
# saving best mse_based between different models forecast and submission
store_and_submit_best_model_forecast = save_forecast_and_submission()
point_error_based_best_model_save_review = \
store_and_submit_best_model_forecast.store_and_submit(submission_name, local_ergs_settings,
best_lower_error_ts_day_by_day_y_pred)
if point_error_based_best_model_save_review:
print('best low point forecast error and generate_submission data and submission done')
else:
print('error at storing best_low_point_forecast_error data and generate_submission or submission')
# evaluating the best_lower_error criteria granular_model forecast
local_ergs_forecasts_name = 'day_by_day_best_low_error_criteria_model_forecast'
zeros_as_forecast = stochastic_simulation_results_analysis()
zeros_as_forecast_review = \
zeros_as_forecast.evaluate_stochastic_simulation(local_ergs_settings,
local_model_ergs_hyperparameters,
local_ergs_raw_unit_sales,
local_ergs_raw_unit_sales_ground_truth,
local_ergs_forecasts_name)
# saving errors by time_serie and storing the estimated best model
ts_model_mse = np.array(ts_model_mse)
np.save(''.join([local_ergs_settings['models_evaluation_path'],
'best_low_point_forecast_error_ts_model_mse']), ts_model_mse)
np.savetxt(''.join([local_ergs_settings['models_evaluation_path'],
'best_low_point_forecast_error_ts_model_mse.csv']),
ts_model_mse, fmt='%10.15f', delimiter=',', newline='\n')
except Exception as submodule_error:
print('best low point forecast error and generate_submission submodule_error: ', submodule_error)
logger.info('error in best low point forecast error and generate_submission submodule')
logger.error(str(submodule_error), exc_info=True)
return False
return True
| [
"logging.basicConfig",
"logging.getLogger",
"numpy.shape",
"sys.path.insert",
"numpy.abs",
"numpy.dtype",
"logging.handlers.RotatingFileHandler",
"stochastic_model_obtain_results.stochastic_simulation_results_analysis",
"numpy.array",
"datetime.datetime.now",
"os.path.basename",
"save_forecast... | [((612, 742), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': 'log_path_filename', 'level': 'logging.DEBUG', 'format': '"""%(asctime)s %(levelname)s %(name)s %(message)s"""'}), "(filename=log_path_filename, level=logging.DEBUG, format\n ='%(asctime)s %(levelname)s %(name)s %(message)s')\n", (631, 742), False, 'import logging\n'), ((767, 794), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (784, 794), False, 'import logging\n'), ((808, 893), 'logging.handlers.RotatingFileHandler', 'handlers.RotatingFileHandler', (['log_path_filename'], {'maxBytes': '(10485760)', 'backupCount': '(5)'}), '(log_path_filename, maxBytes=10485760,\n backupCount=5)\n', (836, 893), True, 'import logging.handlers as handlers\n'), ((945, 1012), 'sys.path.insert', 'sys.path.insert', (['(1)', "local_submodule_settings['custom_library_path']"], {}), "(1, local_submodule_settings['custom_library_path'])\n", (960, 1012), False, 'import sys\n'), ((474, 500), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (490, 500), False, 'import os\n'), ((21052, 21082), 'save_forecast_and_make_submission.save_forecast_and_submission', 'save_forecast_and_submission', ([], {}), '()\n', (21080, 21082), False, 'from save_forecast_and_make_submission import save_forecast_and_submission\n'), ((21855, 21895), 'stochastic_model_obtain_results.stochastic_simulation_results_analysis', 'stochastic_simulation_results_analysis', ([], {}), '()\n', (21893, 21895), False, 'from stochastic_model_obtain_results import stochastic_simulation_results_analysis\n'), ((22518, 22540), 'numpy.array', 'np.array', (['ts_model_mse'], {}), '(ts_model_mse)\n', (22526, 22540), True, 'import numpy as np\n'), ((5904, 5939), 'numpy.shape', 'np.shape', (['local_ergs_raw_unit_sales'], {}), '(local_ergs_raw_unit_sales)\n', (5912, 5939), True, 'import numpy as np\n'), ((9531, 9579), 'numpy.abs', 'np.abs', (['(ground_truth_ts_day - first_model_ts_day)'], {}), '(ground_truth_ts_day - first_model_ts_day)\n', (9537, 9579), True, 'import numpy as np\n'), ((9624, 9673), 'numpy.abs', 'np.abs', (['(ground_truth_ts_day - second_model_ts_day)'], {}), '(ground_truth_ts_day - second_model_ts_day)\n', (9630, 9673), True, 'import numpy as np\n'), ((9717, 9765), 'numpy.abs', 'np.abs', (['(ground_truth_ts_day - third_model_ts_day)'], {}), '(ground_truth_ts_day - third_model_ts_day)\n', (9723, 9765), True, 'import numpy as np\n'), ((9810, 9859), 'numpy.abs', 'np.abs', (['(ground_truth_ts_day - fourth_model_ts_day)'], {}), '(ground_truth_ts_day - fourth_model_ts_day)\n', (9816, 9859), True, 'import numpy as np\n'), ((9903, 9951), 'numpy.abs', 'np.abs', (['(ground_truth_ts_day - fifth_model_ts_day)'], {}), '(ground_truth_ts_day - fifth_model_ts_day)\n', (9909, 9951), True, 'import numpy as np\n'), ((9995, 10043), 'numpy.abs', 'np.abs', (['(ground_truth_ts_day - sixth_model_ts_day)'], {}), '(ground_truth_ts_day - sixth_model_ts_day)\n', (10001, 10043), True, 'import numpy as np\n'), ((10089, 10139), 'numpy.abs', 'np.abs', (['(ground_truth_ts_day - seventh_model_ts_day)'], {}), '(ground_truth_ts_day - seventh_model_ts_day)\n', (10095, 10139), True, 'import numpy as np\n'), ((10184, 10233), 'numpy.abs', 'np.abs', (['(ground_truth_ts_day - eighth_model_ts_day)'], {}), '(ground_truth_ts_day - eighth_model_ts_day)\n', (10190, 10233), True, 'import numpy as np\n'), ((10277, 10325), 'numpy.abs', 'np.abs', (['(ground_truth_ts_day - ninth_model_ts_day)'], {}), '(ground_truth_ts_day - ninth_model_ts_day)\n', (10283, 10325), True, 'import numpy as np\n'), ((10372, 10423), 'numpy.abs', 'np.abs', (['(ground_truth_ts_day - best_mse_model_ts_day)'], {}), '(ground_truth_ts_day - best_mse_model_ts_day)\n', (10378, 10423), True, 'import numpy as np\n'), ((3138, 3157), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (3146, 3157), True, 'import numpy as np\n'), ((4911, 4930), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (4919, 4930), True, 'import numpy as np\n'), ((9250, 9269), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (9258, 9269), True, 'import numpy as np\n'), ((6834, 6857), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6855, 6857), False, 'import datetime\n')] |
from random import randint
from typing import Any
from typing import Dict
from retrying import retry
import apysc as ap
from apysc._event.mouse_up_interface import MouseUpInterface
from apysc._expression import expression_data_util
from apysc._type.variable_name_interface import VariableNameInterface
class _TestMouseUp(MouseUpInterface, VariableNameInterface):
def __init__(self) -> None:
"""Test class for mouse up interface.
"""
self.variable_name = 'test_mouse_up'
class TestMouseUpInterface:
def on_mouse_up_1(
self, e: ap.MouseEvent, options: Dict[str, Any]) -> None:
"""
Test handler for mouse up event.
Parameters
----------
e : MouseEvent
Created event instance.
options : dict
Optional arguments dictionary.
"""
def on_mouse_up_2(
self, e: ap.MouseEvent, options: Dict[str, Any]) -> None:
"""
Test handler for mouse up event.
Parameters
----------
e : MouseEvent
Created event instance.
options : dict
Optional arguments dictionary.
"""
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__initialize_mouse_up_handlers_if_not_initialized(self) -> None:
interface_1: MouseUpInterface = MouseUpInterface()
interface_1._initialize_mouse_up_handlers_if_not_initialized()
assert interface_1._mouse_up_handlers == {}
interface_1._initialize_mouse_up_handlers_if_not_initialized()
assert interface_1._mouse_up_handlers == {}
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test_mouseup(self) -> None:
expression_data_util.empty_expression()
interface_1: _TestMouseUp = _TestMouseUp()
name: str = interface_1.mouseup(
handler=self.on_mouse_up_1, options={'msg': 'Hello!'})
assert name in interface_1._mouse_up_handlers
expression: str = \
expression_data_util.get_current_event_handler_scope_expression()
expected: str = f'function {name}('
assert expected in expression
expression = expression_data_util.get_current_expression()
expected = (
f'{interface_1.variable_name}.mouseup({name});'
)
assert expected in expression
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test_unbind_mouseup(self) -> None:
expression_data_util.empty_expression()
interface_1: _TestMouseUp = _TestMouseUp()
name: str = interface_1.mouseup(handler=self.on_mouse_up_1)
interface_1.unbind_mouseup(handler=self.on_mouse_up_1)
assert interface_1._mouse_up_handlers == {}
expression: str = expression_data_util.get_current_expression()
expected: str = (
f'{interface_1.variable_name}.off('
f'"{ap.MouseEventType.MOUSEUP.value}", {name});'
)
assert expected in expression
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test_unbind_mouseup_all(self) -> None:
expression_data_util.empty_expression()
interface_1: _TestMouseUp = _TestMouseUp()
interface_1.mouseup(handler=self.on_mouse_up_1)
interface_1.mouseup(handler=self.on_mouse_up_2)
interface_1.unbind_mouseup_all()
interface_1._mouse_up_handlers == {}
expression: str = expression_data_util.get_current_expression()
expected: str = (
f'{interface_1.variable_name}.off('
f'"{ap.MouseEventType.MOUSEUP.value}");'
)
assert expected in expression
| [
"apysc._expression.expression_data_util.empty_expression",
"apysc._expression.expression_data_util.get_current_expression",
"apysc._expression.expression_data_util.get_current_event_handler_scope_expression",
"apysc._event.mouse_up_interface.MouseUpInterface",
"random.randint"
] | [((1416, 1434), 'apysc._event.mouse_up_interface.MouseUpInterface', 'MouseUpInterface', ([], {}), '()\n', (1432, 1434), False, 'from apysc._event.mouse_up_interface import MouseUpInterface\n'), ((1805, 1844), 'apysc._expression.expression_data_util.empty_expression', 'expression_data_util.empty_expression', ([], {}), '()\n', (1842, 1844), False, 'from apysc._expression import expression_data_util\n'), ((2104, 2169), 'apysc._expression.expression_data_util.get_current_event_handler_scope_expression', 'expression_data_util.get_current_event_handler_scope_expression', ([], {}), '()\n', (2167, 2169), False, 'from apysc._expression import expression_data_util\n'), ((2278, 2323), 'apysc._expression.expression_data_util.get_current_expression', 'expression_data_util.get_current_expression', ([], {}), '()\n', (2321, 2323), False, 'from apysc._expression import expression_data_util\n'), ((2582, 2621), 'apysc._expression.expression_data_util.empty_expression', 'expression_data_util.empty_expression', ([], {}), '()\n', (2619, 2621), False, 'from apysc._expression import expression_data_util\n'), ((2887, 2932), 'apysc._expression.expression_data_util.get_current_expression', 'expression_data_util.get_current_expression', ([], {}), '()\n', (2930, 2932), False, 'from apysc._expression import expression_data_util\n'), ((3250, 3289), 'apysc._expression.expression_data_util.empty_expression', 'expression_data_util.empty_expression', ([], {}), '()\n', (3287, 3289), False, 'from apysc._expression import expression_data_util\n'), ((3571, 3616), 'apysc._expression.expression_data_util.get_current_expression', 'expression_data_util.get_current_expression', ([], {}), '()\n', (3614, 3616), False, 'from apysc._expression import expression_data_util\n'), ((1278, 1295), 'random.randint', 'randint', (['(10)', '(3000)'], {}), '(10, 3000)\n', (1285, 1295), False, 'from random import randint\n'), ((1740, 1757), 'random.randint', 'randint', (['(10)', '(3000)'], {}), '(10, 3000)\n', (1747, 1757), False, 'from random import randint\n'), ((2510, 2527), 'random.randint', 'randint', (['(10)', '(3000)'], {}), '(10, 3000)\n', (2517, 2527), False, 'from random import randint\n'), ((3174, 3191), 'random.randint', 'randint', (['(10)', '(3000)'], {}), '(10, 3000)\n', (3181, 3191), False, 'from random import randint\n')] |
from ipywidgets import interact
import ipywidgets as widgets
from IPython.display import display
class SimpleWidgets():
def __init__(self):
self._int_slider_widget = None
self._clicked_next_widget = None
self._button = None
def do_stuff_on_click(self, b):
if self._clicked_next_widget:
self._clicked_next_widget.close()
if self._int_slider_widget.value < 10:
@interact
def get_check_box():
x = widgets.Checkbox(value=False, description='Check me')
self._clicked_next_widget = x
return x
else:
@interact
def get_text():
x = widgets.Dropdown(options=['alpha', 'beta', 'gamma'], value='alpha', description='Text:')
self._clicked_next_widget = x
return x
def run_simple_widgets(self):
@interact
def get_int_slider():
x = widgets.IntSlider(min=0, max=30, step=1, value=10)
self._int_slider_widget = x
return x
self._button = widgets.Button(description="Click Me!")
display(self._button)
self._button.on_click(self.do_stuff_on_click)
@property
def last_value(self):
if self._clicked_next_widget:
return self._clicked_next_widget.value
else:
return None
| [
"IPython.display.display",
"ipywidgets.IntSlider",
"ipywidgets.Dropdown",
"ipywidgets.Button",
"ipywidgets.Checkbox"
] | [((1102, 1141), 'ipywidgets.Button', 'widgets.Button', ([], {'description': '"""Click Me!"""'}), "(description='Click Me!')\n", (1116, 1141), True, 'import ipywidgets as widgets\n'), ((1150, 1171), 'IPython.display.display', 'display', (['self._button'], {}), '(self._button)\n', (1157, 1171), False, 'from IPython.display import display\n'), ((966, 1016), 'ipywidgets.IntSlider', 'widgets.IntSlider', ([], {'min': '(0)', 'max': '(30)', 'step': '(1)', 'value': '(10)'}), '(min=0, max=30, step=1, value=10)\n', (983, 1016), True, 'import ipywidgets as widgets\n'), ((497, 550), 'ipywidgets.Checkbox', 'widgets.Checkbox', ([], {'value': '(False)', 'description': '"""Check me"""'}), "(value=False, description='Check me')\n", (513, 550), True, 'import ipywidgets as widgets\n'), ((706, 798), 'ipywidgets.Dropdown', 'widgets.Dropdown', ([], {'options': "['alpha', 'beta', 'gamma']", 'value': '"""alpha"""', 'description': '"""Text:"""'}), "(options=['alpha', 'beta', 'gamma'], value='alpha',\n description='Text:')\n", (722, 798), True, 'import ipywidgets as widgets\n')] |
import alsaaudio, wave
mixer = alsaaudio.Mixer(control='Mic', cardindex=0)
mixer.setrec(1)
mixer.setvolume(80, 0, alsaaudio.PCM_CAPTURE)
inp = alsaaudio.PCM(type=alsaaudio.PCM_CAPTURE, device='sysdefault:CARD=Headset')
inp.setchannels(1)
inp.setrate(44100)
inp.setformat(alsaaudio.PCM_FORMAT_S16_LE)
inp.setperiodsize(1024)
w = wave.open('test.wav', 'w')
w.setnchannels(1)
w.setsampwidth(2)
w.setframerate(44100)
while True:
l, data = inp.read()
w.writeframes(data)
| [
"alsaaudio.Mixer",
"wave.open",
"alsaaudio.PCM"
] | [((31, 74), 'alsaaudio.Mixer', 'alsaaudio.Mixer', ([], {'control': '"""Mic"""', 'cardindex': '(0)'}), "(control='Mic', cardindex=0)\n", (46, 74), False, 'import alsaaudio, wave\n'), ((144, 219), 'alsaaudio.PCM', 'alsaaudio.PCM', ([], {'type': 'alsaaudio.PCM_CAPTURE', 'device': '"""sysdefault:CARD=Headset"""'}), "(type=alsaaudio.PCM_CAPTURE, device='sysdefault:CARD=Headset')\n", (157, 219), False, 'import alsaaudio, wave\n'), ((329, 355), 'wave.open', 'wave.open', (['"""test.wav"""', '"""w"""'], {}), "('test.wav', 'w')\n", (338, 355), False, 'import alsaaudio, wave\n')] |
"""Provides functions for working with legacy session cookies."""
from typing import Tuple
from base64 import b64encode, b64decode
import hashlib
from datetime import datetime, timedelta
from .exceptions import InvalidCookie
from . import util
def unpack(cookie: str) -> Tuple[str, str, str, datetime, str]:
"""
Unpack the legacy session cookie.
Parameters
----------
cookie : str
The value of session cookie.
Returns
-------
str
The session ID associated with the cookie.
str
The user ID of the authenticated account.
str
The IP address of the client when the session was created.
datetime
The datetime when the session was created.
datetime
The datetime when the session expires.
str
Legacy user privilege level.
Raises
------
:class:`InvalidCookie`
Raised if the cookie is detectably malformed or tampered with.
"""
parts = cookie.split(':')
payload: Tuple[str, str, str, datetime, str]
if len(parts) < 5:
raise InvalidCookie('Malformed cookie')
session_id = parts[0]
user_id = parts[1]
ip = parts[2]
issued_at = util.from_epoch(int(parts[3]))
expires_at = issued_at + timedelta(seconds=util.get_session_duration())
capabilities = parts[4]
try:
expected = pack(session_id, user_id, ip, issued_at, capabilities)
assert expected == cookie
except (TypeError, AssertionError) as e:
raise InvalidCookie('Invalid session cookie; forged?') from e
return session_id, user_id, ip, issued_at, expires_at, capabilities
def pack(session_id: str, user_id: str, ip: str, issued_at: datetime,
capabilities: str) -> str:
"""
Generate a value for the classic session cookie.
Parameters
----------
session_id : str
The session ID associated with the cookie.
user_id : str
The user ID of the authenticated account.
ip : str
Client IP address.
issued_at : datetime
The UNIX time at which the session was initiated.
capabilities : str
This is essentially a user privilege level.
Returns
-------
str
Signed session cookie value.
"""
session_hash = util.get_session_hash()
value = ':'.join(map(str, [session_id, user_id, ip, util.epoch(issued_at),
capabilities]))
to_sign = f'{value}-{session_hash}'.encode('utf-8')
cookie_hash = b64encode(hashlib.sha1(to_sign).digest())
return value + ':' + cookie_hash.decode('utf-8')[:-1]
| [
"hashlib.sha1"
] | [((2503, 2524), 'hashlib.sha1', 'hashlib.sha1', (['to_sign'], {}), '(to_sign)\n', (2515, 2524), False, 'import hashlib\n')] |
import argparse
import csv
import os
import signal
import logging
from datetime import datetime
from decimal import Decimal
import pandas as pd
import progressbar
import sqlite3
import sys
import time
from pathlib import Path
from dhalsim.parser.file_generator import BatchReadmeGenerator, GeneralReadmeGenerator
from dhalsim.py3_logger import get_logger
import wntr
import wntr.network.controls as controls
import yaml
class PhysicalPlant:
"""
Class representing the plant itself, runs each iteration. This class also deals with WNTR
and updates the database.
"""
def __init__(self, intermediate_yaml):
signal.signal(signal.SIGINT, self.interrupt)
signal.signal(signal.SIGTERM, self.interrupt)
self.intermediate_yaml = intermediate_yaml
with self.intermediate_yaml.open(mode='r') as file:
self.data = yaml.safe_load(file)
logging.getLogger('wntr').setLevel(logging.WARNING)
self.logger = get_logger(self.data['log_level'])
self.ground_truth_path = Path(self.data["output_path"]) / "ground_truth.csv"
self.ground_truth_path.touch(exist_ok=True)
# connection to the database
self.conn = sqlite3.connect(self.data["db_path"])
self.c = self.conn.cursor()
# Create the network
self.wn = wntr.network.WaterNetworkModel(self.data['inp_file'])
self.node_list = list(self.wn.node_name_list)
self.link_list = list(self.wn.link_name_list)
self.tank_list = self.get_node_list_by_type(self.node_list, 'Tank')
self.junction_list = self.get_node_list_by_type(self.node_list, 'Junction')
self.scada_junction_list = self.get_scada_junction_list(self.data['plcs'])
self.pump_list = self.get_link_list_by_type(self.link_list, 'Pump')
self.valve_list = self.get_link_list_by_type(self.link_list, 'Valve')
self.values_list = list()
list_header = ['iteration', 'timestamp']
list_header.extend(self.create_node_header(self.tank_list))
list_header.extend(self.create_node_header(self.junction_list))
list_header.extend(self.create_link_header(self.pump_list))
list_header.extend(self.create_link_header(self.valve_list))
list_header.extend(self.create_attack_header())
self.results_list = []
self.results_list.append(list_header)
dummy_condition = controls.ValueCondition(self.wn.get_node(self.tank_list[0]), 'level',
'>=', -1)
self.control_list = []
for valve in self.valve_list:
self.control_list.append(self.create_control_dict(valve, dummy_condition))
for pump in self.pump_list:
self.control_list.append(self.create_control_dict(pump, dummy_condition))
for control in self.control_list:
an_action = controls.ControlAction(control['actuator'], control['parameter'],
control['value'])
a_control = controls.Control(control['condition'], an_action, name=control['name'])
self.wn.add_control(control['name'], a_control)
if self.data['simulator'] == 'pdd':
self.wn.options.hydraulic.demand_model = 'PDD'
# Set initial physical conditions
self.set_initial_values()
self.sim = wntr.sim.WNTRSimulator(self.wn)
self.logger.info("Starting simulation for " +
os.path.basename(str(self.data['inp_file']))[:-4] + " topology.")
self.start_time = datetime.now()
self.master_time = -1
self.db_update_string = "UPDATE plant SET value = ? WHERE name = ?"
def get_scada_junction_list(self, plcs):
junction_list = []
for PLC in plcs:
if 'sensors' not in PLC:
PLC['sensors'] = list()
for sensor in PLC['sensors']:
if sensor != "" and sensor in self.junction_list:
junction_list.append(sensor)
return junction_list
def get_node_list_by_type(self, a_list, a_type):
result = []
for node in a_list:
if self.wn.get_node(node).node_type == a_type:
result.append(str(node))
return result
def get_link_list_by_type(self, a_list, a_type):
result = []
for link in a_list:
if self.wn.get_link(link).link_type == a_type:
result.append(str(link))
return result
@staticmethod
def create_node_header(a_list):
result = []
for node in a_list:
result.append(node + "_LEVEL")
return result
@staticmethod
def create_link_header(a_list):
result = []
for link in a_list:
result.append(link + "_FLOW")
result.append(link + "_STATUS")
return result
def create_attack_header(self):
"""
Function that creates csv list headers for device and network attacks
:return: list of attack names starting with device and ending with network
"""
result = []
# Append device attacks
if "plcs" in self.data:
for plc in self.data["plcs"]:
if "attacks" in plc:
for attack in plc["attacks"]:
result.append(attack['name'])
# Append network attacks
if "network_attacks" in self.data:
for network_attack in self.data["network_attacks"]:
result.append(network_attack['name'])
return result
def get_controls(self, a_list):
result = []
for control in a_list:
result.append(self.wn.get_control(control))
return result
def create_control_dict(self, actuator, dummy_condition):
act_dict = dict.fromkeys(['actuator', 'parameter', 'value', 'condition', 'name'])
act_dict['actuator'] = self.wn.get_link(actuator)
act_dict['parameter'] = 'status'
act_dict['condition'] = dummy_condition
act_dict['name'] = actuator
if type(self.wn.get_link(actuator).status) is int:
act_dict['value'] = act_dict['actuator'].status
else:
act_dict['value'] = act_dict['actuator'].status.value
return act_dict
def register_results(self):
# Results are divided into: nodes: reservoir and tanks, links: flows and status
self.values_list = [self.master_time, datetime.now()]
self.extend_tanks()
self.extend_junctions()
self.extend_pumps()
self.extend_valves()
self.extend_attacks()
def extend_tanks(self):
# Get tanks levels
for tank in self.tank_list:
self.values_list.extend([self.wn.get_node(tank).level])
def extend_junctions(self):
# Get junction levels
for junction in self.junction_list:
self.values_list.extend(
[self.wn.get_node(junction).head - self.wn.get_node(junction).elevation])
def extend_pumps(self):
# Get pumps flows and status
for pump in self.pump_list:
self.values_list.extend([self.wn.get_link(pump).flow])
if type(self.wn.get_link(pump).status) is int:
self.values_list.extend([self.wn.get_link(pump).status])
else:
self.values_list.extend([self.wn.get_link(pump).status.value])
def extend_valves(self):
# Get valves flows and status
for valve in self.valve_list:
self.values_list.extend([self.wn.get_link(valve).flow])
if type(self.wn.get_link(valve).status) is int:
self.values_list.extend([self.wn.get_link(valve).status])
else:
self.values_list.extend([self.wn.get_link(valve).status.value])
def extend_attacks(self):
# Get device attacks
if "plcs" in self.data:
for plc in self.data["plcs"]:
if "attacks" in plc:
for attack in plc["attacks"]:
self.values_list.append(self.get_attack_flag(attack['name']))
# get network attacks
if "network_attacks" in self.data:
for network_attack in self.data["network_attacks"]:
self.values_list.append(self.get_attack_flag(network_attack['name']))
def update_controls(self):
"""Updates all controls in WNTR."""
for control in self.control_list:
rows_1 = self.c.execute('SELECT value FROM plant WHERE name = ?',
(control['name'],)).fetchone()
self.conn.commit()
new_status = int(rows_1[0])
control['value'] = new_status
new_action = controls.ControlAction(control['actuator'], control['parameter'],
control['value'])
new_control = controls.Control(control['condition'], new_action, name=control['name'])
self.wn.remove_control(control['name'])
self.wn.add_control(control['name'], new_control)
def write_results(self, results):
"""Writes ground truth file."""
with self.ground_truth_path.open(mode='w') as f:
writer = csv.writer(f)
writer.writerows(results)
def get_plcs_ready(self):
"""
Checks whether all PLCs have finished their loop.
:return: boolean whether all PLCs have finished
"""
self.c.execute("""SELECT count(*)
FROM sync
WHERE flag <= 0""")
flag = int(self.c.fetchone()[0]) == 0
return flag
def get_attack_flag(self, name):
"""
Get the attack flag of this attack.
:return: False if attack not running, true otherwise
"""
self.c.execute("SELECT flag FROM attack WHERE name IS ?", (name,))
flag = int(self.c.fetchone()[0])
return flag
def main(self):
"""Runs the simulation for x iterations."""
# We want to simulate only one hydraulic timestep each time MiniCPS processes the
# simulation data
self.wn.options.time.duration = self.wn.options.time.hydraulic_timestep
iteration_limit = self.data["iterations"]
self.logger.debug("Temporary file location: " + str(Path(self.data["db_path"]).parent))
if 'batch_index' in self.data:
self.logger.info("Running batch simulation {x} out of {y}."
.format(x=self.data['batch_index'] + 1,
y=self.data['batch_simulations']))
self.logger.info("Simulation will run for {x} iterations with hydraulic timestep {step}."
.format(x=str(iteration_limit),
step=str(self.wn.options.time.hydraulic_timestep)))
p_bar = None
if self.data['log_level'] != 'debug':
widgets = [' [', progressbar.Timer(), ' - ', progressbar.SimpleProgress(), '] ',
progressbar.Bar(), ' [', progressbar.ETA(), '] ', ]
p_bar = progressbar.ProgressBar(max_value=iteration_limit, widgets=widgets)
p_bar.start()
while self.master_time < iteration_limit:
self.c.execute("REPLACE INTO master_time (id, time) VALUES(1, ?)", (str(self.master_time),))
self.conn.commit()
self.master_time = self.master_time + 1
while not self.get_plcs_ready():
time.sleep(0.01)
self.update_controls()
self.logger.debug("Iteration {x} out of {y}.".format(x=str(self.master_time),
y=str(iteration_limit)))
if p_bar:
p_bar.update(self.master_time)
# Check for simulation error, print output on exception
try:
self.sim.run_sim(convergence_error=True)
except Exception as exp:
self.logger.error(f"Error in WNTR simulation: {exp}")
self.finish()
self.register_results()
self.results_list.append(self.values_list)
self.update_tanks()
self.update_pumps()
self.update_valves()
self.update_junctions()
# Write results of this iteration if needed
if 'saving_interval' in self.data and self.master_time != 0 and \
self.master_time % self.data['saving_interval'] == 0:
self.write_results(self.results_list)
# Set sync flags for nodes
self.c.execute("UPDATE sync SET flag=0")
self.conn.commit()
self.finish()
def update_tanks(self):
"""Update tanks in database."""
for tank in self.tank_list:
a_level = self.wn.get_node(tank).level
self.c.execute(self.db_update_string, (str(a_level), tank,))
self.conn.commit()
def update_pumps(self):
""""Update pumps in database."""
for pump in self.pump_list:
flow = Decimal(self.wn.get_link(pump).flow)
self.c.execute(self.db_update_string, (str(flow), pump + "F",))
self.conn.commit()
def update_valves(self):
"""Update valve in database."""
for valve in self.valve_list:
flow = Decimal(self.wn.get_link(valve).flow)
self.c.execute(self.db_update_string, (str(flow), valve + "F",))
self.conn.commit()
def update_junctions(self):
"""Update junction pressure in database."""
# todo: Test this
# for junction in self.junction_list:
for junction in self.scada_junction_list:
level = Decimal(self.wn.get_node(junction).head - self.wn.get_node(junction).elevation)
self.c.execute(self.db_update_string, (str(level), junction,))
self.conn.commit()
def interrupt(self, sig, frame):
self.finish()
self.logger.info("Simulation ended.")
sys.exit(0)
def finish(self):
self.write_results(self.results_list)
end_time = datetime.now()
if 'batch_simulations' in self.data:
readme_path = Path(self.data['config_path']).parent / self.data['output_path']\
/ 'configuration' / 'batch_readme.md'
os.makedirs(str(readme_path.parent), exist_ok=True)
BatchReadmeGenerator(self.intermediate_yaml, readme_path, self.start_time, end_time,
self.wn, self.master_time).write_batch()
if self.data['batch_index'] == self.data['batch_simulations'] - 1:
GeneralReadmeGenerator(self.intermediate_yaml, self.data['start_time'],
end_time, True, self.master_time, self.wn).write_readme()
else:
GeneralReadmeGenerator(self.intermediate_yaml, self.data['start_time'],
end_time, False, self.master_time, self.wn).write_readme()
sys.exit(0)
def set_initial_values(self):
"""Sets custom initial values for tanks and demand patterns in the WNTR simulation"""
if "initial_tank_values" in self.data:
# Initial tank values
for tank in self.tank_list:
if str(tank) in self.data["initial_tank_values"]:
value = self.data["initial_tank_values"][str(tank)]
self.logger.debug("Setting tank " + tank + " initial value to " + str(value))
self.wn.get_node(tank).init_level = value
else:
self.logger.debug("Tank " + tank + " has no specified initial values, using default...")
if "demand_patterns_data" in self.data:
# Demand patterns for batch
demands = pd.read_csv(self.data["demand_patterns_data"])
for name, pat in self.wn.patterns():
if name in demands:
self.logger.debug("Setting demands for " + name +
" to demands defined at: " + self.data["demand_patterns_data"])
pat.multipliers = demands[name].values.tolist()
else:
self.logger.debug("Consumer " + name + " has no demands defined, using default...")
def is_valid_file(test_parser, arg):
if not os.path.exists(arg):
test_parser.error(arg + " does not exist.")
else:
return arg
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run the simulation')
parser.add_argument(dest="intermediate_yaml",
help="intermediate yaml file", metavar="FILE",
type=lambda x: is_valid_file(parser, x))
args = parser.parse_args()
simulation = PhysicalPlant(Path(args.intermediate_yaml))
simulation.main()
| [
"logging.getLogger",
"dhalsim.py3_logger.get_logger",
"pandas.read_csv",
"time.sleep",
"sys.exit",
"progressbar.ProgressBar",
"os.path.exists",
"argparse.ArgumentParser",
"pathlib.Path",
"dhalsim.parser.file_generator.BatchReadmeGenerator",
"dhalsim.parser.file_generator.GeneralReadmeGenerator",... | [((16637, 16694), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run the simulation"""'}), "(description='Run the simulation')\n", (16660, 16694), False, 'import argparse\n'), ((637, 681), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'self.interrupt'], {}), '(signal.SIGINT, self.interrupt)\n', (650, 681), False, 'import signal\n'), ((690, 735), 'signal.signal', 'signal.signal', (['signal.SIGTERM', 'self.interrupt'], {}), '(signal.SIGTERM, self.interrupt)\n', (703, 735), False, 'import signal\n'), ((977, 1011), 'dhalsim.py3_logger.get_logger', 'get_logger', (["self.data['log_level']"], {}), "(self.data['log_level'])\n", (987, 1011), False, 'from dhalsim.py3_logger import get_logger\n'), ((1208, 1245), 'sqlite3.connect', 'sqlite3.connect', (["self.data['db_path']"], {}), "(self.data['db_path'])\n", (1223, 1245), False, 'import sqlite3\n'), ((1330, 1383), 'wntr.network.WaterNetworkModel', 'wntr.network.WaterNetworkModel', (["self.data['inp_file']"], {}), "(self.data['inp_file'])\n", (1360, 1383), False, 'import wntr\n'), ((3381, 3412), 'wntr.sim.WNTRSimulator', 'wntr.sim.WNTRSimulator', (['self.wn'], {}), '(self.wn)\n', (3403, 3412), False, 'import wntr\n'), ((3586, 3600), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3598, 3600), False, 'from datetime import datetime\n'), ((14125, 14136), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (14133, 14136), False, 'import sys\n'), ((14225, 14239), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (14237, 14239), False, 'from datetime import datetime\n'), ((15142, 15153), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (15150, 15153), False, 'import sys\n'), ((16493, 16512), 'os.path.exists', 'os.path.exists', (['arg'], {}), '(arg)\n', (16507, 16512), False, 'import os\n'), ((16945, 16973), 'pathlib.Path', 'Path', (['args.intermediate_yaml'], {}), '(args.intermediate_yaml)\n', (16949, 16973), False, 'from pathlib import Path\n'), ((873, 893), 'yaml.safe_load', 'yaml.safe_load', (['file'], {}), '(file)\n', (887, 893), False, 'import yaml\n'), ((1046, 1076), 'pathlib.Path', 'Path', (["self.data['output_path']"], {}), "(self.data['output_path'])\n", (1050, 1076), False, 'from pathlib import Path\n'), ((2893, 2981), 'wntr.network.controls.ControlAction', 'controls.ControlAction', (["control['actuator']", "control['parameter']", "control['value']"], {}), "(control['actuator'], control['parameter'], control[\n 'value'])\n", (2915, 2981), True, 'import wntr.network.controls as controls\n'), ((3048, 3119), 'wntr.network.controls.Control', 'controls.Control', (["control['condition']", 'an_action'], {'name': "control['name']"}), "(control['condition'], an_action, name=control['name'])\n", (3064, 3119), True, 'import wntr.network.controls as controls\n'), ((6498, 6512), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6510, 6512), False, 'from datetime import datetime\n'), ((8796, 8884), 'wntr.network.controls.ControlAction', 'controls.ControlAction', (["control['actuator']", "control['parameter']", "control['value']"], {}), "(control['actuator'], control['parameter'], control[\n 'value'])\n", (8818, 8884), True, 'import wntr.network.controls as controls\n'), ((8954, 9026), 'wntr.network.controls.Control', 'controls.Control', (["control['condition']", 'new_action'], {'name': "control['name']"}), "(control['condition'], new_action, name=control['name'])\n", (8970, 9026), True, 'import wntr.network.controls as controls\n'), ((9299, 9312), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (9309, 9312), False, 'import csv\n'), ((11178, 11245), 'progressbar.ProgressBar', 'progressbar.ProgressBar', ([], {'max_value': 'iteration_limit', 'widgets': 'widgets'}), '(max_value=iteration_limit, widgets=widgets)\n', (11201, 11245), False, 'import progressbar\n'), ((15945, 15991), 'pandas.read_csv', 'pd.read_csv', (["self.data['demand_patterns_data']"], {}), "(self.data['demand_patterns_data'])\n", (15956, 15991), True, 'import pandas as pd\n'), ((903, 928), 'logging.getLogger', 'logging.getLogger', (['"""wntr"""'], {}), "('wntr')\n", (920, 928), False, 'import logging\n'), ((11019, 11038), 'progressbar.Timer', 'progressbar.Timer', ([], {}), '()\n', (11036, 11038), False, 'import progressbar\n'), ((11047, 11075), 'progressbar.SimpleProgress', 'progressbar.SimpleProgress', ([], {}), '()\n', (11073, 11075), False, 'import progressbar\n'), ((11106, 11123), 'progressbar.Bar', 'progressbar.Bar', ([], {}), '()\n', (11121, 11123), False, 'import progressbar\n'), ((11131, 11148), 'progressbar.ETA', 'progressbar.ETA', ([], {}), '()\n', (11146, 11148), False, 'import progressbar\n'), ((11574, 11590), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (11584, 11590), False, 'import time\n'), ((14519, 14634), 'dhalsim.parser.file_generator.BatchReadmeGenerator', 'BatchReadmeGenerator', (['self.intermediate_yaml', 'readme_path', 'self.start_time', 'end_time', 'self.wn', 'self.master_time'], {}), '(self.intermediate_yaml, readme_path, self.start_time,\n end_time, self.wn, self.master_time)\n', (14539, 14634), False, 'from dhalsim.parser.file_generator import BatchReadmeGenerator, GeneralReadmeGenerator\n'), ((14968, 15087), 'dhalsim.parser.file_generator.GeneralReadmeGenerator', 'GeneralReadmeGenerator', (['self.intermediate_yaml', "self.data['start_time']", 'end_time', '(False)', 'self.master_time', 'self.wn'], {}), "(self.intermediate_yaml, self.data['start_time'],\n end_time, False, self.master_time, self.wn)\n", (14990, 15087), False, 'from dhalsim.parser.file_generator import BatchReadmeGenerator, GeneralReadmeGenerator\n'), ((10392, 10418), 'pathlib.Path', 'Path', (["self.data['db_path']"], {}), "(self.data['db_path'])\n", (10396, 10418), False, 'from pathlib import Path\n'), ((14773, 14891), 'dhalsim.parser.file_generator.GeneralReadmeGenerator', 'GeneralReadmeGenerator', (['self.intermediate_yaml', "self.data['start_time']", 'end_time', '(True)', 'self.master_time', 'self.wn'], {}), "(self.intermediate_yaml, self.data['start_time'],\n end_time, True, self.master_time, self.wn)\n", (14795, 14891), False, 'from dhalsim.parser.file_generator import BatchReadmeGenerator, GeneralReadmeGenerator\n'), ((14312, 14342), 'pathlib.Path', 'Path', (["self.data['config_path']"], {}), "(self.data['config_path'])\n", (14316, 14342), False, 'from pathlib import Path\n')] |
"""
Utility functions for running NEB calculations
"""
import numpy as np
from aiida.orm import StructureData
from aiida.engine import calcfunction
from ase.neb import NEB
@calcfunction
def neb_interpolate(init_structure, final_strucrture, nimages):
"""
Interplate NEB frames using the starting and the final structures
Get around the PBC warpping problem by calculating the MIC displacements
from the initial to the final structure
"""
ainit = init_structure.get_ase()
afinal = final_strucrture.get_ase()
disps = []
# Find distances
acombined = ainit.copy()
acombined.extend(afinal)
# Get piece-wise MIC distances
for i in range(len(ainit)):
dist = acombined.get_distance(i, i + len(ainit), vector=True, mic=True)
disps.append(dist.tolist())
disps = np.asarray(disps)
ainit.wrap(eps=1e-1)
afinal = ainit.copy()
# Displace the atoms according to MIC distances
afinal.positions += disps
neb = NEB([ainit.copy() for i in range(int(nimages) + 1)] + [afinal.copy()])
neb.interpolate()
out_init = StructureData(ase=neb.images[0])
out_init.label = init_structure.label + ' INIT'
out_final = StructureData(ase=neb.images[-1])
out_final.label = init_structure.label + ' FINAL'
outputs = {'image_init': out_init}
for i, out in enumerate(neb.images[1:-1]):
outputs[f'image_{i+1:02d}'] = StructureData(ase=out)
outputs[f'image_{i+1:02d}'].label = init_structure.label + f' FRAME {i+1:02d}'
outputs['image_final'] = out_final
return outputs
@calcfunction
def fix_atom_order(reference, to_fix):
"""
Fix atom order by finding NN distances bet ween two frames. This resolves
the issue where two closely matching structures having diffferent atomic orders.
Note that the two frames must be close enough for this to work
"""
aref = reference.get_ase()
afix = to_fix.get_ase()
# Index of the reference atom in the second structure
new_indices = np.zeros(len(aref), dtype=int)
# Find distances
acombined = aref.copy()
acombined.extend(afix)
# Get piece-wise MIC distances
for i in range(len(aref)):
dists = []
for j in range(len(aref)):
dist = acombined.get_distance(i, j + len(aref), mic=True)
dists.append(dist)
min_idx = np.argmin(dists)
min_dist = min(dists)
if min_dist > 0.5:
print(f'Large displacement found - moving atom {j} to {i} - please check if this is correct!')
new_indices[i] = min_idx
afixed = afix[new_indices]
fixed_structure = StructureData(ase=afixed)
fixed_structure.label = to_fix.label + ' UPDATED ORDER'
return fixed_structure
| [
"numpy.argmin",
"aiida.orm.StructureData",
"numpy.asarray"
] | [((828, 845), 'numpy.asarray', 'np.asarray', (['disps'], {}), '(disps)\n', (838, 845), True, 'import numpy as np\n'), ((1098, 1130), 'aiida.orm.StructureData', 'StructureData', ([], {'ase': 'neb.images[0]'}), '(ase=neb.images[0])\n', (1111, 1130), False, 'from aiida.orm import StructureData\n'), ((1199, 1232), 'aiida.orm.StructureData', 'StructureData', ([], {'ase': 'neb.images[-1]'}), '(ase=neb.images[-1])\n', (1212, 1232), False, 'from aiida.orm import StructureData\n'), ((2633, 2658), 'aiida.orm.StructureData', 'StructureData', ([], {'ase': 'afixed'}), '(ase=afixed)\n', (2646, 2658), False, 'from aiida.orm import StructureData\n'), ((1412, 1434), 'aiida.orm.StructureData', 'StructureData', ([], {'ase': 'out'}), '(ase=out)\n', (1425, 1434), False, 'from aiida.orm import StructureData\n'), ((2365, 2381), 'numpy.argmin', 'np.argmin', (['dists'], {}), '(dists)\n', (2374, 2381), True, 'import numpy as np\n')] |
import os
import random
# Fullscreen meshlab on right monitor for this to work
for k in range(100, 200):
n_objects = random.randint(5, 10)
os.system("python kuka_pydrake_sim.py -T 60 --seed %d --hacky_save_video -N %d" % (k, n_objects))
| [
"os.system",
"random.randint"
] | [((119, 140), 'random.randint', 'random.randint', (['(5)', '(10)'], {}), '(5, 10)\n', (133, 140), False, 'import random\n'), ((142, 248), 'os.system', 'os.system', (["('python kuka_pydrake_sim.py -T 60 --seed %d --hacky_save_video -N %d' % (k,\n n_objects))"], {}), "(\n 'python kuka_pydrake_sim.py -T 60 --seed %d --hacky_save_video -N %d' %\n (k, n_objects))\n", (151, 248), False, 'import os\n')] |
# Create your views here.
from rest_framework import generics
from rest_framework.permissions import IsAuthenticated
from article.models import Product
from article.serializers import ProductSerializer
class ProductListCreateView(generics.ListCreateAPIView):
"""Create Product"""
permission_classes = [IsAuthenticated]
queryset = Product.objects.order_by('-date_modified')
serializer_class = ProductSerializer
def perform_create(self, serializer):
"""Save the post data when creating a new product."""
serializer.save()
class ProductDetailsView(generics.RetrieveUpdateDestroyAPIView):
"""This class handles the http GET, PUT and DELETE requests for a product."""
permission_classes = [IsAuthenticated]
queryset = Product.objects.all()
serializer_class = ProductSerializer
| [
"article.models.Product.objects.order_by",
"article.models.Product.objects.all"
] | [((348, 390), 'article.models.Product.objects.order_by', 'Product.objects.order_by', (['"""-date_modified"""'], {}), "('-date_modified')\n", (372, 390), False, 'from article.models import Product\n'), ((768, 789), 'article.models.Product.objects.all', 'Product.objects.all', ([], {}), '()\n', (787, 789), False, 'from article.models import Product\n')] |
# -*- coding: utf-8 -*-
# @Author: TD21forever
# @Date: 2019-05-26 12:14:07
# @Last Modified by: TD21forever
# @Last Modified time: 2019-06-17 23:11:15
import numpy as np
'''
dp[item][cap]的意思是 从前item个物品中拿东西 放到容量为cap 的背包中 能拿到的最大价值
'''
def solution(num,waste,value,capacity):
dp = np.zeros([num+5,capacity+2])
for item in range(1,num+1):
for cap in range(1,capacity+1):
if waste[item] > cap:#第item个太大了 拿不了
dp[item][cap] = dp[item-1][cap]
else:
situation1 = dp[item-1][cap]#不拿
situation2 = dp[item-1][cap-waste[item]] + value[item]#拿
if situation1 > situation2:
dp[item][cap] = situation1
else:
dp[item][cap] = situation2
return dp
if __name__ == '__main__':
waste = [0, 2, 3, 4]
value = [0, 3, 4, 5]
num = 3
capacity = 10
res = solution(num,waste,value,capacity)
print(res)
# print(res[5][20])
| [
"numpy.zeros"
] | [((295, 328), 'numpy.zeros', 'np.zeros', (['[num + 5, capacity + 2]'], {}), '([num + 5, capacity + 2])\n', (303, 328), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2018 The DisentanglementLib Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to visualize the unified scores.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pathlib
import matplotlib
matplotlib.use("Agg") # Set headless-friendly backend.
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
import numpy as np
import pandas as pd
import seaborn as sns
def heat_square(matrix, output_dir, name, xlabel, ylabel, max_val=None,
factor_names=None):
"""Plot values of a matrix.
Each entry is represented as a square of increasing size and different color.
Args:
matrix: Matrix of values to plot. Values should be in range [0, max_val].
output_dir: Where to save the image.
name: File name.
xlabel: Name of the x axis of the matrix.
ylabel: Name of the y axis of the matrix.
max_val: Maximum value acceptable in the matrix. If None, the max_val will
be set as the maximum value in the matrix.
factor_names: Names of the factors of variation.
"""
sns.set_context("notebook", font_scale=1.5, rc={"lines.linewidth": 2})
sns.set_style("whitegrid")
fig, _ = plt.subplots()
plot_grid = plt.GridSpec(1, 15, hspace=0.2, wspace=1.2)
ax = plt.subplot(plot_grid[:, :-1])
if max_val is None:
max_val = np.max(matrix)
if max_val == 0:
max_val = 1.
else:
if max_val < np.max(matrix):
raise ValueError("The matrix has maximum value larger than max_val")
palette = sns.color_palette("Blues", 256)
# Estimates the area of the squares: the length of the edge is
# roughly: length of the grid in inches * how many points per inch - space for
# the axis names times * 14/15 as the last 1/15 part of the figure is occupied
# by the colorbar legend.
size_scale = ((((ax.get_position().xmax - ax.get_position().xmin) *
fig.get_size_inches()[0] * fig.get_dpi() - 40) * 14 / 15 * 0.8) /
(matrix.shape[0])) ** 2
plot_matrix_squares(matrix, max_val, palette, size_scale, ax)
plt.xticks(range(matrix.shape[0]))
if factor_names is not None:
plt.yticks(range(matrix.shape[1]), factor_names)
else:
plt.yticks(range(matrix.shape[1]))
plt.xlabel(xlabel)
plt.ylabel(ylabel)
# Add color legend on the right side of the plot.
ax = plt.subplot(plot_grid[:, -1])
plot_bar_palette(palette, max_val, ax)
if not os.path.isdir(output_dir):
pathlib.Path(output_dir).mkdir(parents=True)
output_path = os.path.join(output_dir, "{}.png".format(name))
with open(output_path, "wb") as path:
fig.savefig(path, bbox_inches="tight")
def plot_matrix_squares(matrix, max_val, palette, size_scale, ax):
"""Grid of squares where the size is proportional to the matrix values.
Args:
matrix: Matrix of values to plot.
max_val: Maximum value that is allowed in the matrix.
palette: Color palette.
size_scale: Maximum size of the squares.
ax: Axis of the subplot.
"""
tmp = pd.melt(pd.DataFrame(matrix).reset_index(), id_vars="index")
# The columns of the dataframe are: index, variable and value.
def to_color(val):
ind = int(val / max_val * 255)
return palette[ind]
ax.scatter(x=tmp["index"], y=tmp["variable"],
s=size_scale * tmp["value"] / max_val, marker="s",
c=tmp["value"].apply(to_color))
ax.set_xticks([v + 0.5 for v in range(matrix.shape[0])], minor=True)
ax.set_yticks([v + 0.5 for v in range(matrix.shape[1])], minor=True)
ax.grid(False, "major")
ax.grid(True, "minor")
ax.set_xlim([-0.5, matrix.shape[0] - 0.5])
ax.set_ylim([-0.5, matrix.shape[1] - 0.5])
ax.tick_params(right=False, top=False, left=False, bottom=False)
ax.set_aspect(aspect=1.)
def plot_bar_palette(palette, max_val, ax):
"""Plot color bar legend."""
col_x = [0] * len(palette)
bar_y = np.linspace(0, max_val, 256, ax)
bar_height = bar_y[1] - bar_y[0]
ax.barh(bar_y, np.array([5] * len(palette)), height=bar_height, left=col_x,
align="center", color=palette, linewidth=0)
ax.set_xlim(1, 2)
ax.set_ylim(0, max_val)
ax.grid(False)
ax.set_xticks([])
ax.set_yticks(np.linspace(0, max_val, 3))
ax.yaxis.tick_right()
def plot_recovery_vs_independent(matrix, output_dir, name):
"""Plot how many factors are recovered and in how many independent groups.
Plot how many factors of variation are independently captured in a
representation at different thresholds. It takes as input a matrix
relating factors of variation and latent dimensions, sort the elements and
then plot for each threshold (1) how many factors are discovered and (2)
how many factors are encoded independently in the representation.
Args:
matrix: Contains statistical relations between factors of variation and
latent codes.
output_dir: Output directory where to save the plot.
name: Filename of the plot.
"""
thresholds = np.sort(matrix.flatten())[::-1]
precisions = [precision(matrix, x) for x in thresholds]
recalls = [recall(matrix, x) for x in thresholds]
sns.set_context("notebook", font_scale=1.5, rc={"lines.linewidth": 2})
sns.set_style("whitegrid")
fig, ax = plt.subplots()
palette = sns.color_palette()
plt.plot(range(thresholds.shape[0]), precisions, label="Independent groups",
color=palette[0], linewidth=3)
plt.plot(range(thresholds.shape[0]), recalls, "--", label="Discovered",
color=palette[1], linewidth=3)
thresholds_ids = range(0, thresholds.shape[0], 10)
plt.xticks(thresholds_ids, np.around(thresholds[thresholds_ids], 2))
ax.set_ylim([0, matrix.shape[0] * 1.1])
ax.tick_params(right=False, top=False, left=False, bottom=False)
ax.set_yticks(np.linspace(0, matrix.shape[0], matrix.shape[0] + 1))
plt.legend(loc="upper center", bbox_to_anchor=(0.5, 1.25), ncol=2)
plt.xlabel("Threshold")
plt.ylabel("Number of Factors")
if not os.path.isdir(output_dir):
pathlib.Path(output_dir).mkdir(parents=True)
output_path = os.path.join(output_dir, name + ".png")
with open(output_path, "wb") as path:
fig.savefig(path, bbox_inches="tight")
def precision(matrix, th):
"""How many independent components are discovered for a given threshold.
Args:
matrix: Adjacency matrix of shape (num_codes, num_factors) encoding the
statistical relations between factors and codes.
th: Eliminate all edges smaller than this threshold.
Returns:
Number of connected components.
"""
tmp = matrix.copy()
tmp[tmp < th] = 0
factors = np.zeros(tmp.shape[0])
codes = np.zeros(tmp.shape[1])
cc = 0
for i in range(len(factors)):
if factors[i] == 0:
to_visit = [(i, 0)]
factors, codes, size = bfs(tmp, to_visit, factors, codes, 1)
if size > 1:
cc += 1
return cc
def recall(matrix, th):
"""How many factors are discovered for a given threshold.
Counts as many factors of variation are captured in the representation.
First, we remove all edges in the adjacency matrix with weight smaller than
the threshold. Then, we count how many factors are connected to some codes.
Args:
matrix: Adjacency matrix for the graph.
th: Eliminate all edges smaller than this threshold.
Returns:
Number of discovered factors of variation for the given threshold.
"""
tmp = matrix.copy()
tmp[tmp < th] = 0
return np.sum(np.sum(tmp, axis=1) != 0)
def bfs(matrix, to_visit, factors, codes, size):
"""Traverse the matrix across connected components.
Implements breadth first search on an adjacency matrix. In our case, the
adjacency matrix encodes the statistical relations between factors of
variation and codes. This is used to traverse the adjacency matrix and
discover whether a factor is captured in multiple codes and whether there is a
path in the graph connecting two factors.
Args:
matrix: Adjacency matrix for the graph.
to_visit: Queue with the nodes to visit. We index the factors and codes in
the adjacency matrix and implement the queue with an array containing the
nodes that need to be visited.
factors: Array of shape (num_factors, ) with flags marking whether factors
of variation are visited.
codes: Array of shape (num_codes, ) with flags marking whether codes are
visited.
size: Count how many node are in the same connected component.
Returns:
factors: Array of shape (num_factors, ) with flags marking whether factors
of variation are visited.
codes: Array of shape (num_codes, ) with flags marking whether codes are
visited.
size: How many nodes were visited.
"""
(current_node, flag) = to_visit.pop()
if flag == 0:
factors[current_node] = 1
for i in range(len(matrix[current_node, :])):
if matrix[current_node, i] != 0:
if codes[i] == 0:
to_visit.append((i, 1))
size += 1
factors, codes, size = bfs(matrix, to_visit, factors, codes, size)
else:
codes[current_node] = 1
for i in range(len(matrix[:, current_node])):
if matrix[i, current_node] != 0:
if factors[i] == 0:
to_visit.append((i, 0))
size += 1
factors, codes, size = bfs(matrix, to_visit, factors, codes, size)
return factors, codes, size
| [
"matplotlib.pyplot.ylabel",
"seaborn.set_style",
"matplotlib.pyplot.GridSpec",
"seaborn.color_palette",
"pathlib.Path",
"matplotlib.pyplot.xlabel",
"numpy.max",
"numpy.linspace",
"os.path.isdir",
"pandas.DataFrame",
"matplotlib.use",
"seaborn.set_context",
"numpy.around",
"matplotlib.pyplo... | [((838, 859), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (852, 859), False, 'import matplotlib\n'), ((1703, 1773), 'seaborn.set_context', 'sns.set_context', (['"""notebook"""'], {'font_scale': '(1.5)', 'rc': "{'lines.linewidth': 2}"}), "('notebook', font_scale=1.5, rc={'lines.linewidth': 2})\n", (1718, 1773), True, 'import seaborn as sns\n'), ((1778, 1804), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (1791, 1804), True, 'import seaborn as sns\n'), ((1818, 1832), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1830, 1832), True, 'import matplotlib.pyplot as plt\n'), ((1849, 1892), 'matplotlib.pyplot.GridSpec', 'plt.GridSpec', (['(1)', '(15)'], {'hspace': '(0.2)', 'wspace': '(1.2)'}), '(1, 15, hspace=0.2, wspace=1.2)\n', (1861, 1892), True, 'import matplotlib.pyplot as plt\n'), ((1902, 1932), 'matplotlib.pyplot.subplot', 'plt.subplot', (['plot_grid[:, :-1]'], {}), '(plot_grid[:, :-1])\n', (1913, 1932), True, 'import matplotlib.pyplot as plt\n'), ((2182, 2213), 'seaborn.color_palette', 'sns.color_palette', (['"""Blues"""', '(256)'], {}), "('Blues', 256)\n", (2199, 2213), True, 'import seaborn as sns\n'), ((2929, 2947), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (2939, 2947), True, 'import matplotlib.pyplot as plt\n'), ((2952, 2970), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (2962, 2970), True, 'import matplotlib.pyplot as plt\n'), ((3034, 3063), 'matplotlib.pyplot.subplot', 'plt.subplot', (['plot_grid[:, -1]'], {}), '(plot_grid[:, -1])\n', (3045, 3063), True, 'import matplotlib.pyplot as plt\n'), ((4635, 4667), 'numpy.linspace', 'np.linspace', (['(0)', 'max_val', '(256)', 'ax'], {}), '(0, max_val, 256, ax)\n', (4646, 4667), True, 'import numpy as np\n'), ((5895, 5965), 'seaborn.set_context', 'sns.set_context', (['"""notebook"""'], {'font_scale': '(1.5)', 'rc': "{'lines.linewidth': 2}"}), "('notebook', font_scale=1.5, rc={'lines.linewidth': 2})\n", (5910, 5965), True, 'import seaborn as sns\n'), ((5970, 5996), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (5983, 5996), True, 'import seaborn as sns\n'), ((6011, 6025), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6023, 6025), True, 'import matplotlib.pyplot as plt\n'), ((6040, 6059), 'seaborn.color_palette', 'sns.color_palette', ([], {}), '()\n', (6057, 6059), True, 'import seaborn as sns\n'), ((6622, 6688), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper center"""', 'bbox_to_anchor': '(0.5, 1.25)', 'ncol': '(2)'}), "(loc='upper center', bbox_to_anchor=(0.5, 1.25), ncol=2)\n", (6632, 6688), True, 'import matplotlib.pyplot as plt\n'), ((6693, 6716), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Threshold"""'], {}), "('Threshold')\n", (6703, 6716), True, 'import matplotlib.pyplot as plt\n'), ((6721, 6752), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of Factors"""'], {}), "('Number of Factors')\n", (6731, 6752), True, 'import matplotlib.pyplot as plt\n'), ((6862, 6901), 'os.path.join', 'os.path.join', (['output_dir', "(name + '.png')"], {}), "(output_dir, name + '.png')\n", (6874, 6901), False, 'import os\n'), ((7423, 7445), 'numpy.zeros', 'np.zeros', (['tmp.shape[0]'], {}), '(tmp.shape[0])\n', (7431, 7445), True, 'import numpy as np\n'), ((7458, 7480), 'numpy.zeros', 'np.zeros', (['tmp.shape[1]'], {}), '(tmp.shape[1])\n', (7466, 7480), True, 'import numpy as np\n'), ((1975, 1989), 'numpy.max', 'np.max', (['matrix'], {}), '(matrix)\n', (1981, 1989), True, 'import numpy as np\n'), ((3119, 3144), 'os.path.isdir', 'os.path.isdir', (['output_dir'], {}), '(output_dir)\n', (3132, 3144), False, 'import os\n'), ((4952, 4978), 'numpy.linspace', 'np.linspace', (['(0)', 'max_val', '(3)'], {}), '(0, max_val, 3)\n', (4963, 4978), True, 'import numpy as np\n'), ((6391, 6431), 'numpy.around', 'np.around', (['thresholds[thresholds_ids]', '(2)'], {}), '(thresholds[thresholds_ids], 2)\n', (6400, 6431), True, 'import numpy as np\n'), ((6564, 6616), 'numpy.linspace', 'np.linspace', (['(0)', 'matrix.shape[0]', '(matrix.shape[0] + 1)'], {}), '(0, matrix.shape[0], matrix.shape[0] + 1)\n', (6575, 6616), True, 'import numpy as np\n'), ((6764, 6789), 'os.path.isdir', 'os.path.isdir', (['output_dir'], {}), '(output_dir)\n', (6777, 6789), False, 'import os\n'), ((2071, 2085), 'numpy.max', 'np.max', (['matrix'], {}), '(matrix)\n', (2077, 2085), True, 'import numpy as np\n'), ((8323, 8342), 'numpy.sum', 'np.sum', (['tmp'], {'axis': '(1)'}), '(tmp, axis=1)\n', (8329, 8342), True, 'import numpy as np\n'), ((3154, 3178), 'pathlib.Path', 'pathlib.Path', (['output_dir'], {}), '(output_dir)\n', (3166, 3178), False, 'import pathlib\n'), ((3744, 3764), 'pandas.DataFrame', 'pd.DataFrame', (['matrix'], {}), '(matrix)\n', (3756, 3764), True, 'import pandas as pd\n'), ((6799, 6823), 'pathlib.Path', 'pathlib.Path', (['output_dir'], {}), '(output_dir)\n', (6811, 6823), False, 'import pathlib\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 18 09:17:23 2018
@author: Manuel
the pentaton logic go around II
"""
import random
random.seed(0)
print(random.getrandbits(5))
# =============================================================================
#
# Variables
# =============================================================================
# one board track with all tiles from start to finish
tile_dict = {}
# =============================================================================
#
# aBout the Board:
# =============================================================================
class Board:
def __init__(self, length, min_val, max_val):
self.min_val = min_val
self.max_val = max_val
self.value = self.create_tile(min_val, max_val)
self.length = length
self.track1 = self.build_track()
self.track2 = self.build_track()
self.track3 = self.build_track()
self.track4 = self.build_track()
self.track5 = self.build_track()
self.dict_ = {'Tracks': [self.track1, self.track2, self.track3, self.track4, self.track5]}
if 'Wit' in self.value.keys():
self.Wit = self.value['Wit']
else:
self.Wit = None
if 'Stren' in self.value.keys():
self.Stren = self.value['Stren']
else:
self.Stren = None
if 'Dex' in self.value.keys():
self.Dex = self.value['Dex']
else:
self.Dex = None
if 'Intel' in self.value.keys():
self.Intel = self.value['Intel']
else:
self.Intel = None
def create_tile(self, min_val, max_val):
pos = ['Wit', 'Stren', 'Dex', 'Intel']
pos = random.sample(pos, 2)
tile_value = {}
for i in pos:
tile_value[i] = random.randrange(min_val, max_val+1)
return tile_value
def build_track(self):
tile_dict = {}
var_min = self.min_val
var_max = self.max_val
for i in range(self.length):
tile_dict[i] = self.create_tile(var_min, var_max)
var_min += 3
var_max += 3
print('tile_track created.')
return tile_dict
# def __str__(self):
# return '\n'.join(str(i) for i in self.dict_['Tracks'])
#
# def __str__(self):
# #return '{}'.format(i) (for board.dict_['Tracks'][i] in board.dict_)
# return ','.join("{}\n".format(i) for i in self.dict_['Tracks'])
#
# def __str__(self):
# return ('{}\n{}\n{}\n{}\n{}'.format(self.track1, self.track2, self.track3, self.track4, self.track5))
board = Board(10, 1, 4)
print(board.track1, board.track2)
track1 = Board(10, 1, 4).track1
print(track1)
track2 = Board(10, 1, 4).track2
print(track2)
print(board.dict_['Tracks'][3])
print(board)
tr_dict = board.dict_
for i in tr_dict['Tracks']:
print('\n\n', i)
# =============================================================================
#
# concerning Attributes:
# =============================================================================
class Attribute:
"""the base atr-class"""
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
def increase_value(self, increment):
self.value += increment
def getValue(self):
return int(self.value)
class Wit(Attribute):
"""Wit-atr w/ wit-special ability"""
def ability(self):
if self.value < 6:
print('you noob')
if self.value > 5 and self.value < 11:
print('keep growing')
elif self.value > 10 and self.value < 16:
print('getting there')
elif self.value > 15 and self.value < 17:
print('pretty good')
elif self.value > 20:
print('woaah')
class Stren(Attribute):
"""Stren-atr w/ stren-special ability"""
def ability(self):
if self.value < 6:
print('you noob')
if self.value > 5 and self.value < 11:
print('keep growing')
elif self.value > 10 and self.value < 16:
print('getting there')
elif self.value > 15 and self.value < 17:
print('pretty good')
elif self.value > 20:
print('woaah')
class Dex(Attribute):
"""Dex-atr w/ dex-special ability"""
def ability(self):
if self.value < 6:
print('you noob')
if self.value > 5 and self.value < 11:
print('keep growing')
elif self.value > 10 and self.value < 16:
print('getting there')
elif self.value > 15 and self.value < 17:
print('pretty good')
elif self.value > 20:
print('woaah')
class Intel(Attribute):
"""Intel-atr w/ intel-special ability"""
def ability(self):
if self.value < 6:
print('you noob')
if self.value > 5 and self.value < 11:
print('keep growing')
elif self.value > 10 and self.value < 16:
print('getting there')
elif self.value > 15 and self.value < 17:
print('pretty good')
elif self.value > 20:
print('woaah')
"""
test = Wit(5)
print(test)
test.getValue()
test.increase_value(2)
test.getValue()
"""
# =============================================================================
#
# playing with the Player:
# =============================================================================
class Player():
"""the player base class."""
def __init__(self, name, text, Wit, Stren, Dex, Intel, ):
self.name = name
self.text = text
self.Wit = Wit
self.Stren = Stren
self.Dex = Dex
self.Intel = Intel
self.atr_tup = ('Wit', 'Stren', 'Dex', 'Intel')
self.atr_self_tup = (self.Wit, self.Stren, self.Dex, self.Intel)
self.track = 0
self.tier_complete = 0
self.hand = []
def __str__(self):
return "\nPlayer Name: {} \n{}'s Wit: {} \n{}'s Stren: {} \n{}'s Dex: {}\
\n{}'s Intel: {} {} Cards: {}".format\
(self.name, self.name, self.Wit, self.name, self.Stren, self.name, self.Dex,\
self.name, self.Intel, self.name, self.hand)
def test_attribute(self, attribute, test_value):
"""tests attribute, the self.atr against a test_value."""
global tile_dict
atr = attribute
if atr >= test_value:
print('you passed')
return True
else:
print('you failed')
return False
def tile_check(self):
"""lets player select the atr he wants to challenge in self.Track and self.tier;
if passsed applies lvl-up m/ to player."""
tier = tile_dict['Tracks'][self.track][self.tier_complete]
while True:
print(tier)
atr = input('\nwhich atr to challange: ').capitalize()
if atr not in tier:
print('\nno match, try agian..')
else:
print('\natrb choice accepted.')
break
if atr == 'Wit' and self.test_attribute(self.Wit.getValue(), tier[atr]) == True:
self.tier_complete += 1; print('\ngz, you are in tier', self.tier_complete,' now.')
self.level_up()
if atr == 'Stren' and self.test_attribute(self.Stren.getValue(), tier[atr]) == True:
self.tier_complete += 1; print('\ngz, you are in tier', self.tier_complete,' now.')
self.level_up()
if atr == 'Dex' and self.test_attribute(self.Dex.getValue(), tier[atr]) == True:
self.tier_complete += 1; print('\ngz, you are in tier', self.tier_complete,' now.')
self.level_up()
if atr == 'Intel' and self.test_attribute(self.Intel.getValue(), tier[atr]) == True:
self.tier_complete += 1; print('\ngz, you are in tier', self.tier_complete,' now.')
self.level_up()
def level_up(self):
"""after a succesfull self.tile_check() it allows the player to increase one
of his atris by +1 and increases self.tier_complete."""
print('\nyour current stats: Wit: {} Stren: {} Dex: {} Intel: {}.'.format\
(self.Wit, self.Stren, self.Dex, self.Intel))
while True:
up = input('\nwhich atribute do you wanna level up(+1)? ').capitalize()
if up not in self.atr_tup:
print('\nplease retype, couldnt understand your input.')
else:
if up == 'Wit':
self.Wit.increase_value(1); print('\ngz, your new self:\n', self); break
if up == 'Stren':
self.Stren.increase_value(1); print('\ngz, your new self:\n', self); break
if up == 'Dex':
self.Dex.increase_value(1); print('\ngz, your new self:\n', self); break
if up == 'Intel':
self.Intel.increase_value(1); print('\ngz, your new self:\n', self); break
def draw_card(self, amount):
i = 0
while i <= amount:
try:
rand_index = random.randrange(0, deck.deck_size())
drawn_card = deck.deck[rand_index]
drawn_card.location = 'hand'
self.hand.append(drawn_card)
deck.deck.pop(rand_index)
print(self.hand)
i += 1
except ValueError:
print('pile exhausted, shuffle yard back to deck\n raised by draw_card.')
break
def discard_card(self):
pass
def play_card(self):
pass
def return_from_yard(self):
pass
"""
drizzt = Player('Drizzt', 'can kill stuff', Wit(21), Stren(5), Dex(5), Intel(5))
print(drizzt)
drizzt.Dex.increase_value(15)
print(drizzt)
drizzt.Dex.getValue()
drizzt.test_attribute(drizzt.Dex.getValue(), 11)
type(drizzt.Dex)
drizzt.tile_check()
drizzt.Wit.ability()
"""
drizzt = Player('Drizzt', 'can kill stuff', Wit(21), Stren(5), Dex(5), Intel(5))
print(drizzt)
# =============================================================================
#
# just read the fucking card:
# =============================================================================
class Card():
"""description of Card; with M/ for testing im card can target a player and
applying the card effect depending on temp/perm mod effect"""
def __init__(self, name, text, atr, atri_mod, temp):
self.name = name
self.text = text
self.atr = atr
self.atri_mod = atri_mod
self.temp = temp
self.container = []
def __str__(self):
return '\nCard name: {}\nText: {}\nModifies: {}\nBy: {}\nTemp: {}'.format\
(self.name, self.text, self.atr, self.atri_mod, self.temp)
def test_card(self):
"""tests if card can mod a player, by checking if the mod is type(int)"""
return isinstance(self.atri_mod, int)
def show_cards(self):
"""prints/returns a list of all cards in contianer."""
for card in self.container:
print(card)
def mod_player(self, player):
"""applys the atri-mod-number to eihter the player's temp or perm atr(attribute),
if atri-mod would set art to < 0 it sets 0 instead."""
if self.test_card() == True and self.atr in dir(player):
if self.temp:
if self.atr == 'Stren' and not player.Stren.getValue() + self.atri_mod < 0:
player.Stren.increase_value(self.atri_mod)
else:
setattr(player, 'Stren', Stren(0))
if self.atr == 'Dex' and not player.Dex.getValue() + self.atri_mod < 0:
player.Dex.increase_value(self.atri_mod)
else:
setattr(player, 'Dex', Dex(0))
if self.atr == 'Wit' and not player.Wit.getValue() + self.atri_mod < 0:
player.Wit.increase_value(self.atri_mod)
else:
setattr(player, 'Wit', Wit(0))
if self.atr == 'Intel' and not player.Wit.getValue() + self.atri_mod < 0:
player.Wit.increase_value(self.atri_mod)
else:
setattr(player, 'Wit', Wit(0))
else:
if self.atr == 'Stren' and not player.Stren.getValue() + self.atri_mod < 0:
player.Stren.increase_value(self.atri_mod)
else:
setattr(player, 'Stren', Stren(0))
if self.atr == 'Dex' and not player.Dex.getValue() + self.atri_mod < 0:
player.Dex.increase_value(self.atri_mod)
else:
setattr(player, 'Dex', Dex(0))
if self.atr == 'Wit' and not player.Wit.getValue() + self.atri_mod < 0:
player.Wit.increase_value(self.atri_mod)
else:
setattr(player, 'Wit', Wit(0))
if self.atr == 'Intel' and not player.Wit.getValue() + self.atri_mod < 0:
player.Wit.increase_value(self.atri_mod)
else:
setattr(player, 'Wit', Wit(0))
else:
print('it is not a moddable target.')
"""
testCard = Card('Strength Potion', 'adds +2 to your strength', 'Stren', 10, True)
print(testCard)
a = testCard.test_card()
print(a)
testCard.mod_player(drizzt)
print(drizzt)
#print(drizzt.Stren.getValue)
#print(dir(Player))
"""
# =============================================================================
#
# all the cards are in contianers:
# =============================================================================
class Card_container:
def __init__(self):
self.name = 'base_container'
self.size = self.container_size()
self.container = []
def __str__(self):
result = ""
for c in self.container:
result += c.__str__() + '\n'
return result
def initialize_container(self):
if self.name == 'deck':
file_path = 'pent_resc\\cards.txt'
deck_list = []
fopen = open(file_path, 'r', encoding = 'utf-8')
for line in fopen:
print(line)
text_list = line.split(', ')
print(text_list)
list_format = [text_list[0], text_list[1], text_list[2], int(text_list[3]), bool(text_list[4])]
print(list_format)
print('\n')
deck_list.append(Card(*list_format))
fopen.close()
print(deck_list)
return deck_list
def container_size(self):
if len(self.container) > 0:
return len(self.container)
else:
print('your pile is empty.')
class Deck(Card_container):
def __init__(self):
super(type(self)).__init__()
self.name = 'deck'
self.container = self.initialize_container()
self.size = self.container_size()
deck = Deck()
type(deck)
print(deck)
print(deck.container)
deck.size
class Hand(Card_container):
def __init__(self):
self.name = 'hand'
self.size = self.container_size()
self.container = []
super().__init__()
class Graveyard(Card_container):
def __init__(self):
self.name = 'yard'
self.size = self.container_size()
self.container = []
super().__init__()
def schuffle_to_deck():
pass
# =============================================================================
#
# game logic - functional
# =============================================================================
if __name__ == '__main__':
tile_dict = Board(10, 1, 4).dict_
for i in tile_dict['Tracks']:
print('\n', i)
'''
class C_test():
def __init__(self):
self.container = self.initialize_container()
def __str__(self):
result = ""
for c in self.container:
result += c.__str__() + '\n'
return result
def initialize_container(self):
file_path = 'pent_resc/cards.txt'
deck_list = []
fopen = open(file_path, 'r')
for line in fopen:
text_list = line.split(', ')
list_format = [text_list[0], text_list[1], text_list[2], int(text_list[3]), bool(text_list[4])]
deck_list.append(Card(*list_format))
fopen.close()
return deck_list
test = C_test()
test.initialize_container()
print(test)
#test2 = C_test()
#print(test2)
#test2.show_cards()
''' | [
"random.sample",
"random.getrandbits",
"random.seed",
"random.randrange"
] | [((149, 163), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (160, 163), False, 'import random\n'), ((171, 192), 'random.getrandbits', 'random.getrandbits', (['(5)'], {}), '(5)\n', (189, 192), False, 'import random\n'), ((1893, 1914), 'random.sample', 'random.sample', (['pos', '(2)'], {}), '(pos, 2)\n', (1906, 1914), False, 'import random\n'), ((1992, 2030), 'random.randrange', 'random.randrange', (['min_val', '(max_val + 1)'], {}), '(min_val, max_val + 1)\n', (2008, 2030), False, 'import random\n')] |
import scrapy
from aroay_cloudscraper import CloudScraperRequest
class JavdbSpider(scrapy.Spider):
name = 'javdb'
allowed_domains = ['javdb.com']
headers = {"Accept-Language": "zh-cn;q=0.8,en-US;q=0.6"}
def start_requests(self):
yield CloudScraperRequest("https://javdb.com/v/BOeQO", callback=self.parse
)
def parse(self, response):
print(response.text)
| [
"aroay_cloudscraper.CloudScraperRequest"
] | [((262, 331), 'aroay_cloudscraper.CloudScraperRequest', 'CloudScraperRequest', (['"""https://javdb.com/v/BOeQO"""'], {'callback': 'self.parse'}), "('https://javdb.com/v/BOeQO', callback=self.parse)\n", (281, 331), False, 'from aroay_cloudscraper import CloudScraperRequest\n')] |
# *=========================================================================
# *
# * Copyright Erasmus MC Rotterdam and contributors
# * This software is licensed under the Apache 2 license, quoted below.
# * Copyright 2019 Erasmus MC Rotterdam.
# * Copyright 2019 <NAME> <<EMAIL>>
# * Licensed under the Apache License, Version 2.0 (the "License"); you may not
# * use this file except in compliance with the License. You may obtain a copy of
# * the License at
# * http: //www.apache.org/licenses/LICENSE-2.0
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# * License for the specific language governing permissions and limitations under
# * the License.
# *=========================================================================
from Parameter.Parameter import Parameter as Par
from ExpSettings.DatasetBase import DatasetBase
from ExpSettings.Dataset.SyntheticImages.Environment import Environment
import os
__selfPath = os.path.dirname(os.path.realpath(__file__))
def GetParameters():
mapFunct = lambda a : pow(2, a)
"""Simulated Dataset"""
par = []
"""Simulated Dataset"""
par1 = Par("Metric1Weight", "Gauss", 4.12, 2.65)
par1.SetMapFunct(mapFunct)
"""Simulated Dataset"""
par2 = Par("FinalGridSpacingInPhysicalUnits", "Gauss", 4.37, 0.55)
par2.SetMapFunct(mapFunct)
par.append(par1)
par.append(par2)
return par
"""
@brief: Used to generate weights file from PCE executable for registration sampling locations .
@return: NA.
"""
__DATASET_SIZE = 30
def GetFixedImage(ind: int):
return __selfPath + "/Images/ImFlatN.mhd"
def GetFixedImageSegmentation(ind: int):
return __selfPath + "/Images/ImFlat.mhd"
def GetMovingImage(ind: int):
return __selfPath + "/Images/Im" + str(ind) + "N.mhd"
def GetMovingImageSegmentation(ind: int):
return __selfPath + "/Images/Im" + str(ind) + ".mhd"
def GetDataset(ind: int):
retVal = {}
retVal.update({"fixedIm": GetFixedImage(ind)})
retVal.update({"movingIm": GetMovingImage(ind)})
retVal.update({"fixedSeg": GetFixedImageSegmentation(ind)})
retVal.update({"movingSeg": GetMovingImageSegmentation(ind)})
return retVal
def GetPceSettingsFile():
return __selfPath + "/PceSettings.json"
class Dataset(DatasetBase):
def __init__(self):
pass
def GetDatasetSize(self):
return __DATASET_SIZE
def GetDatasetWithIndex(self, ind:int):
return GetDataset(ind)
def GetMethodExtensionParams(self, ind:int):
return {"commandlineParameters": {}}
def GetModeExtensionParams(self, ind:int):
return {"sampleSize": 100, "batchSize":50, "isVector": True}
def GetParameters(self, datasetIndex):
return GetParameters()
def GetEnvironment(self, rootDir):
return Environment(rootDir)
| [
"os.path.realpath",
"ExpSettings.Dataset.SyntheticImages.Environment.Environment",
"Parameter.Parameter.Parameter"
] | [((1142, 1168), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1158, 1168), False, 'import os\n'), ((1309, 1350), 'Parameter.Parameter.Parameter', 'Par', (['"""Metric1Weight"""', '"""Gauss"""', '(4.12)', '(2.65)'], {}), "('Metric1Weight', 'Gauss', 4.12, 2.65)\n", (1312, 1350), True, 'from Parameter.Parameter import Parameter as Par\n'), ((1421, 1480), 'Parameter.Parameter.Parameter', 'Par', (['"""FinalGridSpacingInPhysicalUnits"""', '"""Gauss"""', '(4.37)', '(0.55)'], {}), "('FinalGridSpacingInPhysicalUnits', 'Gauss', 4.37, 0.55)\n", (1424, 1480), True, 'from Parameter.Parameter import Parameter as Par\n'), ((2994, 3014), 'ExpSettings.Dataset.SyntheticImages.Environment.Environment', 'Environment', (['rootDir'], {}), '(rootDir)\n', (3005, 3014), False, 'from ExpSettings.Dataset.SyntheticImages.Environment import Environment\n')] |
import argparse
import cv2
import numpy as np
from inference import Network
from openvino.inference_engine import IENetwork, IECore
import pylab as plt
import math
import matplotlib
from scipy.ndimage.filters import gaussian_filter
INPUT_STREAM = "emotion.mp4"
CPU_EXTENSION = "C:\\Program Files (x86)\\IntelSWTools\\openvino\\deployment_tools\\inference_engine\\bin\\intel64\\Release\\cpu_extension_avx2.dll"
MODEL = "C:/Users/gremi/Documents/Julien/udacity_intel/models/intel/emotions-recognition-retail-0003/INT8/emotions-recognition-retail-0003.xml"
# if linux : /opt/intel/openvino/deployment_tools/inference_engine/lib/intel64/libcpu_extension_sse4.so"
COLORS = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \
[0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], \
[170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
EMOTIONS = ['neutral', 'happy', 'sad', 'surprise', 'anger']
def get_args():
'''
Gets the arguments from the command line.
'''
parser = argparse.ArgumentParser("Run inference on an input video")
# -- Create the descriptions for the commands
i_desc = "The location of the input file"
d_desc = "The device name, if not 'CPU'"
### Add additional arguments and descriptions for:
### 1) Different confidence thresholds used to draw bounding boxes
t_desc = "The confidence thresholds used to draw bounding boxes"
### 2) The user choosing the color of the bounding boxes
c_desc = "The color name of the bounding boxes"
# -- Add required and optional groups
parser._action_groups.pop()
optional = parser.add_argument_group('optional arguments')
# -- Create the arguments
optional.add_argument("-i", help=i_desc, default=INPUT_STREAM)
optional.add_argument("-d", help=d_desc, default='CPU')
optional.add_argument("-t", help=t_desc, default=0.2)
optional.add_argument("-c", help=c_desc, default="green")
args = parser.parse_args()
return args
def preprocessing(input_image, height, width):
'''
Given an input image, height and width:
- Resize to width and height
- Transpose the final "channel" dimension to be first
- Reshape the image to add a "batch" of 1 at the start
'''
image = cv2.resize(input_image, (width, height))
image = image.transpose((2,0,1))
#image = image.reshape(1, 3, height, width)
#print("in preprocessing", *image.shape) # same thine : in preprocessing 3 384 672
image = image.reshape(1, *image.shape)
return image
def get_mask(processed_output):
'''
Given an input image size and processed output for a semantic mask,
returns a masks able to be combined with the original image.
'''
# Create an empty array for other color channels of mask
empty = np.zeros(processed_output.shape)
# Stack to make a Green mask where text detected
mask = np.dstack((empty, processed_output, empty))
return mask
def create_output_image(image, output):
'''
creates an output image showing the result of inference.
'''
# Remove final part of output not used for heatmaps
output = output[:-1]
# Get only pose detections above 0.5 confidence, set to 255
#for c in range(len(output)):
# output[c] = np.where(output[c]>0.5, 255, 0)
# Sum along the "class" axis
output = np.sum(output, axis=0)
# Get semantic mask
pose_mask = get_mask(output)
# Combine with original image
image = image + pose_mask
#return image.astype('uint8')
return pose_mask.astype('uint8')
def infer_on_video(args):
'''
Performs inference on video - main method
'''
### Load the network model into the IE
print("Load the network model into the IE")
net = Network()
net.load_model(MODEL, "CPU", CPU_EXTENSION)
# Get and open video capture
cap = cv2.VideoCapture(args.i)
cap.open(args.i)
# Grab the shape of the input
width = int(cap.get(3))
height = int(cap.get(4))
# Create a video writer for the output video
# The second argument should be `cv2.VideoWriter_fourcc('M','J','P','G')`
# on Mac, and `0x00000021` on Linux
out = cv2.VideoWriter('out-' + INPUT_STREAM, 0x00000021, 30, (width,height))
# Process frames until the video ends, or process is exited
frame_count = 0;
while cap.isOpened():
# Read the next frame
flag, frame = cap.read()
if not flag:
break
key_pressed = cv2.waitKey(60)
preprocessed_frame = preprocessing(frame, net.get_input_shape()[2], net.get_input_shape()[3])
#print("Perform inference on the frame")
net.async_inference(preprocessed_frame)
if net.wait() == 0:
# Get the output of inference
output_blobs = net.extract_output()
probs = output_blobs['prob_emotion'][0]
index_of_maximum = np.argmax(probs)
emotion = EMOTIONS[index_of_maximum]
if index_of_maximum == 0:
probs[0] = 0
emotion = emotion + " (" + EMOTIONS[np.argmax(probs)] + ")"
print("emotion=", emotion)
# Scale the output text by the image shape
scaler = max(int(frame.shape[0] / 1000), 1)
# Write the text of color and type onto the image
frame = cv2.putText(frame,
"Detected: {}".format(emotion),
(750 * scaler, 50 * scaler), cv2.FONT_HERSHEY_SIMPLEX,
scaler, (0, 0, 0), 3 * scaler)
# Write a frame here for debug purpose
#cv2.imwrite("frame" + str(frame_count) + ".png", frame)
# Write out the frame in the video
out.write(frame)
# frame count
frame_count = frame_count + 1
# Break if escape key pressed
if key_pressed == 27:
break
# Release the out writer, capture, and destroy any OpenCV windows
out.release()
cap.release()
cv2.destroyAllWindows()
def main():
print("Starting")
args = get_args()
infer_on_video(args)
if __name__ == "__main__":
main()
| [
"numpy.dstack",
"argparse.ArgumentParser",
"numpy.argmax",
"cv2.VideoWriter",
"numpy.sum",
"numpy.zeros",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"inference.Network",
"cv2.resize",
"cv2.waitKey"
] | [((1115, 1173), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Run inference on an input video"""'], {}), "('Run inference on an input video')\n", (1138, 1173), False, 'import argparse\n'), ((2370, 2410), 'cv2.resize', 'cv2.resize', (['input_image', '(width, height)'], {}), '(input_image, (width, height))\n', (2380, 2410), False, 'import cv2\n'), ((2907, 2939), 'numpy.zeros', 'np.zeros', (['processed_output.shape'], {}), '(processed_output.shape)\n', (2915, 2939), True, 'import numpy as np\n'), ((3004, 3047), 'numpy.dstack', 'np.dstack', (['(empty, processed_output, empty)'], {}), '((empty, processed_output, empty))\n', (3013, 3047), True, 'import numpy as np\n'), ((3461, 3483), 'numpy.sum', 'np.sum', (['output'], {'axis': '(0)'}), '(output, axis=0)\n', (3467, 3483), True, 'import numpy as np\n'), ((3866, 3875), 'inference.Network', 'Network', ([], {}), '()\n', (3873, 3875), False, 'from inference import Network\n'), ((3968, 3992), 'cv2.VideoCapture', 'cv2.VideoCapture', (['args.i'], {}), '(args.i)\n', (3984, 3992), False, 'import cv2\n'), ((4285, 4348), 'cv2.VideoWriter', 'cv2.VideoWriter', (["('out-' + INPUT_STREAM)", '(33)', '(30)', '(width, height)'], {}), "('out-' + INPUT_STREAM, 33, 30, (width, height))\n", (4300, 4348), False, 'import cv2\n'), ((6152, 6175), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (6173, 6175), False, 'import cv2\n'), ((4605, 4620), 'cv2.waitKey', 'cv2.waitKey', (['(60)'], {}), '(60)\n', (4616, 4620), False, 'import cv2\n'), ((5022, 5038), 'numpy.argmax', 'np.argmax', (['probs'], {}), '(probs)\n', (5031, 5038), True, 'import numpy as np\n'), ((5207, 5223), 'numpy.argmax', 'np.argmax', (['probs'], {}), '(probs)\n', (5216, 5223), True, 'import numpy as np\n')] |
# Copyright 2014 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import hmac
import urllib.parse
from werkzeug.exceptions import SecurityError
from warehouse.utils import random_token, vary_by
def _verify_csrf_origin(request):
# Determine the origin of this request
origin = request.headers.get("Origin", request.headers.get("Referer"))
# Fail if we were not able to locate an origin at all
if origin is None:
raise SecurityError("Origin checking failed - no Origin or Referer.")
# Parse the origin and host for comparison
origin_parsed = urllib.parse.urlparse(origin)
host_parsed = urllib.parse.urlparse(request.host_url)
# Fail if our origin is null
if origin == "null":
raise SecurityError(
"Origin checking failed - null does not match {}.".format(
urllib.parse.urlunparse(host_parsed[:2] + ("", "", "", ""))
)
)
# Fail if the received origin does not match the host
if ((origin_parsed.scheme, origin_parsed.hostname, origin_parsed.port) !=
(host_parsed.scheme, host_parsed.hostname, host_parsed.port)):
raise SecurityError(
"Origin checking failed - {} does not match {}.".format(
urllib.parse.urlunparse(origin_parsed[:2] + ("", "", "", "")),
urllib.parse.urlunparse(host_parsed[:2] + ("", "", "", "")),
)
)
def _verify_csrf_token(request):
# Get the token out of the session
# Note: We have to use the private request._session because
# request.session is not guaranteed to exist when this function is
# called.
csrf_token = request._session.get("user.csrf")
# Validate that we have a stored token, if we do not then we have nothing
# to compare the incoming token against.
if csrf_token is None:
raise SecurityError("CSRF token not set.")
# Attempt to look in the form data
request_token = request.form.get("csrf_token")
# Also attempt to look in the headers, this makes things like Ajax easier
# and PUT/DELETE possible.
request_token = request.headers.get("X-CSRF-Token", request_token)
# Validate that we have a token attached to this request somehow
if not request_token:
raise SecurityError("CSRF token missing.")
# Validate that the stored token and the request token match each other
if not hmac.compare_digest(csrf_token, request_token):
raise SecurityError("CSRF token incorrect.")
def _ensure_csrf_token(request):
# Store a token in the session if one doesn't exist there already
# Note: We have to use the private request._session because
# request.session is not guaranteed to exist when this function is
# called.
if not request._session.get("user.csrf"):
request._session["user.csrf"] = random_token()
# Store the fact that CSRF is in use for this request on the request
request._csrf = True
def handle_csrf(fn,
_verify_origin=_verify_csrf_origin,
_verify_token=_verify_csrf_token):
@functools.wraps(fn)
def wrapped(self, view, app, request, *args, **kwargs):
# Assume that anything not defined as 'safe' by RFC2616 needs
# protection
if request.method not in {"GET", "HEAD", "OPTIONS", "TRACE"}:
# We have 3 potential states for a view function to be in, it could
# have asked for CSRF, exempted for CSRF, or done none of these.
if getattr(view, "_csrf", None) is None:
# CSRF influences the response and thus we cannot know if it is
# safe to access the session or if that will inadvertently
# trigger the response to require a Vary: Cookie so if the
# function has not explicitly told us one way or another we
# will always hard fail on an unsafe method.
raise SecurityError("No CSRF protection applied to view")
elif getattr(view, "_csrf", None):
# The function has explicitly opted in to the CSRF protection
# and we can assume that it has handled setting up the CSRF
# token as well as making sure that a Vary: Cookie header has
# been added.
_verify_origin(request)
_verify_token(request)
# Ensure that the session has a token stored for this request. This is
# purposely done *after* we've validated the CSRF above. If there is
# no CSRF token stored we want that to be a distinct messages from if
# the given token doesn't match a new, random, token.
if getattr(view, "_csrf", None):
_ensure_csrf_token(request)
# If we've gotten to this point, than either the request was a "safe"
# method, the view has opted out of CSRF, or the CSRF has been
# verified. In any case it *should* be safe to actually process this
# request.
return fn(self, view, app, request, *args, **kwargs)
# Set an attribute so that we can verify the dispatch_view has had CSRF
# enabled
wrapped._csrf_handled = True
return wrapped
def csrf_protect(fn):
# Mark the view function as requiring CSRF
fn._csrf = True
# Return the original view function, but varied by Cookie
return vary_by("Cookie")(fn)
def csrf_exempt(fn):
# Mark the view function as exempt from CSRF
fn._csrf = False
# Return the original view function
return fn
def csrf_cycle(session):
# Store a token in the session if one doesn't exist there already
# Note: We have to use the session inside of the environ dictionary
# because request.session does not exist when this function runs
session["user.csrf"] = random_token()
| [
"warehouse.utils.vary_by",
"hmac.compare_digest",
"functools.wraps",
"warehouse.utils.random_token",
"werkzeug.exceptions.SecurityError"
] | [((3637, 3656), 'functools.wraps', 'functools.wraps', (['fn'], {}), '(fn)\n', (3652, 3656), False, 'import functools\n'), ((6356, 6370), 'warehouse.utils.random_token', 'random_token', ([], {}), '()\n', (6368, 6370), False, 'from warehouse.utils import random_token, vary_by\n'), ((963, 1026), 'werkzeug.exceptions.SecurityError', 'SecurityError', (['"""Origin checking failed - no Origin or Referer."""'], {}), "('Origin checking failed - no Origin or Referer.')\n", (976, 1026), False, 'from werkzeug.exceptions import SecurityError\n'), ((2389, 2425), 'werkzeug.exceptions.SecurityError', 'SecurityError', (['"""CSRF token not set."""'], {}), "('CSRF token not set.')\n", (2402, 2425), False, 'from werkzeug.exceptions import SecurityError\n'), ((2808, 2844), 'werkzeug.exceptions.SecurityError', 'SecurityError', (['"""CSRF token missing."""'], {}), "('CSRF token missing.')\n", (2821, 2844), False, 'from werkzeug.exceptions import SecurityError\n'), ((2933, 2979), 'hmac.compare_digest', 'hmac.compare_digest', (['csrf_token', 'request_token'], {}), '(csrf_token, request_token)\n', (2952, 2979), False, 'import hmac\n'), ((2995, 3033), 'werkzeug.exceptions.SecurityError', 'SecurityError', (['"""CSRF token incorrect."""'], {}), "('CSRF token incorrect.')\n", (3008, 3033), False, 'from werkzeug.exceptions import SecurityError\n'), ((3392, 3406), 'warehouse.utils.random_token', 'random_token', ([], {}), '()\n', (3404, 3406), False, 'from warehouse.utils import random_token, vary_by\n'), ((5911, 5928), 'warehouse.utils.vary_by', 'vary_by', (['"""Cookie"""'], {}), "('Cookie')\n", (5918, 5928), False, 'from warehouse.utils import random_token, vary_by\n'), ((4477, 4528), 'werkzeug.exceptions.SecurityError', 'SecurityError', (['"""No CSRF protection applied to view"""'], {}), "('No CSRF protection applied to view')\n", (4490, 4528), False, 'from werkzeug.exceptions import SecurityError\n')] |
"""Checks if any of the latests tests has performed considerably different than
the previous ones. Takes the log directory as an argument."""
import os
import sys
from testsuite_common import Result, processLogLine, bcolors, getLastTwoLines
LOGDIR = sys.argv[1] #Get the log directory as an argument
PERCENTAGE = 5 #Default value for how much a test shoudl change
if len(sys.argv) == 3:
PERCENTAGE = float(sys.argv[2]) #Default is 5%, but we can specify more
#line parameter
def printResults(regressed, better, unchanged, firsttime):
"""Pretty print the results in different colours"""
if regressed != []:
for item in regressed:
print(bcolors.RED + "REGRESSION! " + item.testname + " Was: "\
+ str(item.previous) + " Is: " + str(item.current) + " Change: "\
+ str(abs(item.percentage)) + "%. Revision: " + item.revision\
+ bcolors.ENDC)
print('\n')
if unchanged != []:
for item in unchanged:
print(bcolors.BLUE + "UNCHANGED: " + item.testname + " Revision: " +\
item.revision + bcolors.ENDC)
print('\n')
if better != []:
for item in better:
print(bcolors.GREEN + "IMPROVEMENT! " + item.testname + " Was: "\
+ str(item.previous) + " Is: " + str(item.current) + " Change: "\
+ str(abs(item.percentage)) + "%. Revision: " + item.revision\
+ bcolors.ENDC)
if firsttime != []:
for item in firsttime:
print(bcolors.PURPLE + "First time test! " + item.testname +\
" Took: " + str(item.real) + " seconds. Revision: " +\
item.revision + bcolors.ENDC)
all_files = os.listdir(LOGDIR)
regressed = []
better = []
unchanged = []
firsttime = []
#Go through all log files and find which tests have performed better.
for logfile in all_files:
(line1, line2) = getLastTwoLines(logfile, LOGDIR)
log1 = processLogLine(line1)
if line2 == '\n': # Empty line, only one test ever run
firsttime.append(log1)
continue
log2 = processLogLine(line2)
res = Result(log1.testname, log1.real, log2.real, log2.revision,\
log2.branch, log1.revision, log1.branch)
if res.percentage < -PERCENTAGE:
regressed.append(res)
elif res.change > PERCENTAGE:
better.append(res)
else:
unchanged.append(res)
printResults(regressed, better, unchanged, firsttime)
| [
"testsuite_common.Result",
"os.listdir",
"testsuite_common.processLogLine",
"testsuite_common.getLastTwoLines"
] | [((1688, 1706), 'os.listdir', 'os.listdir', (['LOGDIR'], {}), '(LOGDIR)\n', (1698, 1706), False, 'import os\n'), ((1882, 1914), 'testsuite_common.getLastTwoLines', 'getLastTwoLines', (['logfile', 'LOGDIR'], {}), '(logfile, LOGDIR)\n', (1897, 1914), False, 'from testsuite_common import Result, processLogLine, bcolors, getLastTwoLines\n'), ((1926, 1947), 'testsuite_common.processLogLine', 'processLogLine', (['line1'], {}), '(line1)\n', (1940, 1947), False, 'from testsuite_common import Result, processLogLine, bcolors, getLastTwoLines\n'), ((2066, 2087), 'testsuite_common.processLogLine', 'processLogLine', (['line2'], {}), '(line2)\n', (2080, 2087), False, 'from testsuite_common import Result, processLogLine, bcolors, getLastTwoLines\n'), ((2098, 2201), 'testsuite_common.Result', 'Result', (['log1.testname', 'log1.real', 'log2.real', 'log2.revision', 'log2.branch', 'log1.revision', 'log1.branch'], {}), '(log1.testname, log1.real, log2.real, log2.revision, log2.branch,\n log1.revision, log1.branch)\n', (2104, 2201), False, 'from testsuite_common import Result, processLogLine, bcolors, getLastTwoLines\n')] |
import functools
from spaceone.api.repository.v1 import schema_pb2
from spaceone.core.pygrpc.message_type import *
from spaceone.repository.model.schema_model import Schema
from spaceone.repository.info.repository_info import RepositoryInfo
__all__ = ['SchemaInfo', 'SchemasInfo']
def SchemaInfo(schema_vo: Schema, minimal=False):
info = {
'name': schema_vo.name,
'service_type': schema_vo.service_type
}
if not minimal:
info.update({
'schema': change_struct_type(schema_vo.schema),
'labels': change_list_value_type(schema_vo.labels),
'tags': change_struct_type(schema_vo.tags),
'project_id': schema_vo.project_id,
'domain_id': schema_vo.domain_id,
'created_at': change_timestamp_type(schema_vo.created_at)
})
# WARNING
# Based on local_schema or remote_schema
# vo has different repository or repository_info field
if getattr(schema_vo, 'repository', None) is not None:
info.update({
'repository_info': RepositoryInfo(schema_vo.repository, minimal=True)})
if getattr(schema_vo, 'repository_info', None) is not None:
info.update({
'repository_info': RepositoryInfo(schema_vo.repository_info, minimal=True)})
return schema_pb2.SchemaInfo(**info)
def SchemasInfo(schema_vos, total_count):
results = list(map(functools.partial(SchemaInfo), schema_vos))
return schema_pb2.SchemasInfo(results=results, total_count=total_count)
| [
"spaceone.api.repository.v1.schema_pb2.SchemasInfo",
"functools.partial",
"spaceone.api.repository.v1.schema_pb2.SchemaInfo",
"spaceone.repository.info.repository_info.RepositoryInfo"
] | [((1340, 1369), 'spaceone.api.repository.v1.schema_pb2.SchemaInfo', 'schema_pb2.SchemaInfo', ([], {}), '(**info)\n', (1361, 1369), False, 'from spaceone.api.repository.v1 import schema_pb2\n'), ((1492, 1556), 'spaceone.api.repository.v1.schema_pb2.SchemasInfo', 'schema_pb2.SchemasInfo', ([], {'results': 'results', 'total_count': 'total_count'}), '(results=results, total_count=total_count)\n', (1514, 1556), False, 'from spaceone.api.repository.v1 import schema_pb2\n'), ((1437, 1466), 'functools.partial', 'functools.partial', (['SchemaInfo'], {}), '(SchemaInfo)\n', (1454, 1466), False, 'import functools\n'), ((1088, 1138), 'spaceone.repository.info.repository_info.RepositoryInfo', 'RepositoryInfo', (['schema_vo.repository'], {'minimal': '(True)'}), '(schema_vo.repository, minimal=True)\n', (1102, 1138), False, 'from spaceone.repository.info.repository_info import RepositoryInfo\n'), ((1270, 1325), 'spaceone.repository.info.repository_info.RepositoryInfo', 'RepositoryInfo', (['schema_vo.repository_info'], {'minimal': '(True)'}), '(schema_vo.repository_info, minimal=True)\n', (1284, 1325), False, 'from spaceone.repository.info.repository_info import RepositoryInfo\n')] |
############################################################
# Dev: <NAME>
# Class: Machine Learning
# Date: 2/23/2022
# file: utils.py
# Description: utility functions for artificial neural
# network learning
#############################################################
import random
class Data:
'''class to process data set
'''
def __init__(
self,
training,
testing,
attributes,
debug):
'''
Initialize the APBT class
'''
self.debug = debug
# reading attributes
self.attributes, self.in_attr, self.out_attr = self.read_attributes(attributes)
# reading input,output lenght
self.input_units, self.output_units = self.get_input_output_len()
# reading data
if testing is None:
self.training = self.read_data(training)
self.testing = self.training
self.validation = self.training
else:
self.training = self.read_data(training)
self.testing = self.read_data(testing)
self.n_examples = len(self.training)
# suffle training data
random.shuffle(self.training)
# setting validation to 20% of the training data
self.validation = self.training[:int(self.n_examples * 0.2)]
self.training = self.training[int(self.n_examples * 0.2):]
if self.debug:
print('Training:', self.training)
print('validation:', self.validation)
print('Testing:', self.testing)
def read_attributes(self, attr_path):
'''
Read in the attributes
'''
attributes = {}
in_attr, out_attr = [], []
is_input = True
# read in the attributes
with open(attr_path, 'r') as f:
for line in f:
if len(line) > 1:
words = line.strip().split()
# storing the attributes
attributes[words[0]] = words[1:]
# storing the input attributes
if is_input:
in_attr.append(words[0])
else:
out_attr.append(words[0])
# order.append(words[0])
else:
is_input = False
if self.debug:
print('Attributes: ', attributes)
print('Input attributes: ', in_attr)
print('Output attributes: ', out_attr)
if len(attributes) == 0:
raise Exception('No attributes found')
return attributes, in_attr, out_attr
def to_encode(self, attr):
'''
Return true if the value is discrete
to encode
'''
values = self.attributes[attr]
# encode the values
if len(values) > 1:
if values[0] == '0' and values[1] == '1':
return False
else:
return True
else:
return False
def onehot(self, attr, value):
'''
Preprocess to convert a data instance
to one-of-n/onehot encoding
'''
# get the index of the value
encoded = [0.0 for _ in range(len(self.attributes[attr]))]
encoded[self.attributes[attr].index(value)] = 1.0
return encoded
def read_data(self, data_path):
'''
Read in the training data and testing data
'''
data = []
# read in the attributes
with open(data_path, 'r') as f:
for line in f:
if len(line) > 0:
items = line.strip().split()
# get items iterator
items_iter = iter(items)
In, Out = [],[]
# get inputs
for attr in self.in_attr:
value = next(items_iter)
if self.to_encode(attr):
# encode discrete values
encoded = self.onehot(attr, value)
In += encoded # since encoded is a list
else:
# encode continuous values
In.append(float(value))
# get outputs
for attr in self.out_attr:
value = next(items_iter)
if self.to_encode(attr):
# encode discrete values
encoded = self.onehot(attr, value)
Out += encoded # since encoded is a list
else:
# encode continuous values
Out.append(float(value))
# check if the encoding should be applied
# when encoding applied, update the input or output units sizes
data.append([In, Out])
if len(data) == 0:
raise Exception('No data found')
return data
def decode(self, attr, encoded):
'''
Decode the encoded value
'''
# get the index of the value
# value = self.attributes[attr][encoded.index(1.0)]
if self.debug:
print('Encoded: ', encoded)
print('attr: ', attr)
print('Attributes: ', self.attributes[attr])
value_encoded = zip(self.attributes[attr], encoded)
# sort the encoded value
sorted_encoded = sorted(value_encoded, key=lambda x: x[1], reverse=True)
# get the value
value = sorted_encoded[0][0]
if self.debug:
print('Decoded: ', value)
print('Sorted encoded: ', sorted_encoded)
return value
def get_input_output_len(self):
'''
Get the input and output units
'''
# getting total number of input units
input_units = 0
for attr in self.in_attr:
values = self.attributes[attr]
# check specifically for identity
if values[0] == '0' and values[1] == '1':
input_units += 1
else:
input_units += len(values)
# getting total number of output units
output_units = 0
for attr in self.out_attr:
values = self.attributes[attr]
# check specifically for identity
if values[0] == '0' and values[1] == '1':
output_units += 1
else:
output_units += len(values)
return input_units, output_units
def log_csv(path, histories, headers):
'''log the data to the csv file'''
headers = ['e'] + headers
# open the file
with open(path, 'w') as f:
# write the headers
f.write(','.join(headers) + '\n')
# write the data
for h in range(len(histories[0])):
line = f'{h},'
for hh in range(len(histories)):
line += str(histories[hh][h]) + ','
f.write(line[:-1] + '\n')
def corrupt_data(data, classes, percent):
'''corrupt the class labels of training examples from 0% to 20% (2% in-
crement) by changing from the correct class to another class; output the
accuracy on the uncorrupted test set with and without rule post-pruning.'''
# get the number of training examples
num_examples = len(data)
# get the number of classes to corrupt
num_examples_to_corrupt = int(percent * num_examples)
# get the elements to corrupt
corrupt_elements = random.sample(range(num_examples), num_examples_to_corrupt)
# corrupt the data
for e in corrupt_elements:
# get the class label
correct_label = data[e][-1]
random_class = random.choice(classes)
# while the random class is the same as the correct class
while random_class == correct_label:
random_class = random.choice(classes)
# change the class label
data[e][-1] = random_class
return data
| [
"random.choice",
"random.shuffle"
] | [((7894, 7916), 'random.choice', 'random.choice', (['classes'], {}), '(classes)\n', (7907, 7916), False, 'import random\n'), ((1179, 1208), 'random.shuffle', 'random.shuffle', (['self.training'], {}), '(self.training)\n', (1193, 1208), False, 'import random\n'), ((8056, 8078), 'random.choice', 'random.choice', (['classes'], {}), '(classes)\n', (8069, 8078), False, 'import random\n')] |
# Copyright FMR LLC <<EMAIL>>
# SPDX-License-Identifier: Apache-2.0
"""
The script generates variations for the parameters using configuration file and stores them in respective named tuple
"""
import math
import random
from collections import namedtuple
import numpy as np
# configuration parameters
scene_options = [
"aspect_ratio",
"color_mode",
"exposure_value",
"contrast",
"crop_min_x",
"crop_max_x",
"crop_min_y",
"crop_max_y",
"resolution_x",
"resolution_y",
"resolution_percentage",
"render_engine",
]
Scene_tuple = namedtuple(
"SceneParameters", scene_options, defaults=[None] * len(scene_options)
)
light_options = [
"light_energies",
"light_x_location",
"light_y_location",
"light_z_location",
"color_hue",
"color_saturation",
"color_value",
"light_type",
]
Light_tuple = namedtuple(
"LightParameters", light_options, defaults=[None] * len(light_options)
)
camera_options = [
"camera_x_location",
"camera_y_location",
"camera_z_location",
"camera_x_rotation",
"camera_y_rotation",
"camera_z_rotation",
"camera_focal_length",
]
Camera_tuple = namedtuple(
"CameraParameters", camera_options, defaults=[None] * len(camera_options)
)
image_options = [
"image_x_scale",
"image_y_scale",
"image_z_scale",
"image_x_rotation",
"image_y_rotation",
"image_z_rotation",
"image_bbs",
"background_image_name",
"image_name",
]
Image_tuple = namedtuple(
"ImageParameters", image_options, defaults=[None] * len(image_options)
)
other_options = ["render_device_type"]
other_parameter_tuple = namedtuple(
"OtherBlenderParameters", other_options, defaults=[None] * len(other_options)
)
def random_range(configs, variable, variations):
"""
Generate random values for the variable in continous scale
"""
random_values = np.random.uniform(
configs[variable]["range"][0], configs[variable]["range"][1], variations
)
return random_values
def random_categorical_values(configs, variable, variations):
"""
Generate random values for the variable (e.g aspect ratio etc)
If weights values are not given, the function assign equal weight to all the values
"""
try:
weight_values = configs[variable]["weights"]
except:
weight_values = [1.0] * len(configs[variable]["range"])
random_values = random.choices(
configs[variable]["range"], k=variations, weights=weight_values
)
return random_values
def get_image_parameters(
n_variations: int, image_configs: dict, image_files: list, bg_list: list
):
"""
Generate scene variations based on random values in config file and creates a named tuple for each variation
"""
# sampling background images from background image files
if len(bg_list) == 0:
bg_images = [""] * len(image_files)
else:
bg_images = [random.choice(bg_list) for i in range(len(image_files))]
image_parameters_list = [Image_tuple for i in range(n_variations)]
image_scale_x_values = random_range(image_configs, "image_x_scale", n_variations)
image_scale_y_values = random_range(image_configs, "image_y_scale", n_variations)
image_scale_z_values = random_range(image_configs, "image_z_scale", n_variations)
image_rotation_x_values = random_range(
image_configs, "image_x_rotation", n_variations
)
image_rotation_y_values = random_range(
image_configs, "image_y_rotation", n_variations
)
image_rotation_z_values = random_range(
image_configs, "image_z_rotation", n_variations
)
for index, _ in enumerate(image_parameters_list):
image_parameters_list[index] = image_parameters_list[index](
image_x_scale=image_scale_x_values[index],
image_y_scale=image_scale_y_values[index],
image_z_scale=image_scale_z_values[index],
image_x_rotation=image_rotation_x_values[index],
image_y_rotation=image_rotation_y_values[index],
image_z_rotation=image_rotation_z_values[index],
image_bbs=[],
image_name=image_files[index],
background_image_name=bg_images[index],
)
return image_parameters_list
def get_other_blender_parameters(other_parameters: dict):
other_parameter_tuple_value = other_parameter_tuple(
render_device_type=other_parameters["render_device_type"]
)
return other_parameter_tuple_value
def get_camera_parameters(n_variations: int, camera_configs: dict):
"""
Generate camera variations based on random values in config file and creates a named tuple for each variation
"""
camera_parameters_list = [Camera_tuple for i in range(n_variations)]
camera_focal_length_values = random_range(
camera_configs, "camera_focal_length", n_variations
)
camera_x_location_values = random_range(
camera_configs, "camera_x_location", n_variations
)
camera_y_location_values = random_range(
camera_configs, "camera_y_location", n_variations
)
camera_z_location_values = random_range(
camera_configs, "camera_z_location", n_variations
)
camera_x_rotation_values = random_range(
camera_configs, "camera_x_rotation", n_variations
)
camera_y_rotation_values = random_range(
camera_configs, "camera_y_rotation", n_variations
)
camera_z_rotation_values = random_range(
camera_configs, "camera_z_rotation", n_variations
)
for index, _ in enumerate(camera_parameters_list):
camera_parameters_list[index] = camera_parameters_list[index](
camera_x_location=camera_x_location_values[index],
camera_y_location=camera_y_location_values[index],
camera_z_location=camera_z_location_values[index],
camera_focal_length=camera_focal_length_values[index],
camera_x_rotation=math.radians(camera_x_rotation_values[index]),
camera_y_rotation=math.radians(camera_y_rotation_values[index]),
camera_z_rotation=math.radians(camera_z_rotation_values[index]),
)
return camera_parameters_list
def get_light_parameters(n_variations: int, light_configs: dict):
"""
Generate light variations based on random values in config file and creates a named tuple for each variation
"""
light_parameters_list = [Light_tuple for i in range(n_variations)]
light_energies = random_range(light_configs, "light_energy", n_variations)
light_type_values = random_categorical_values(
light_configs, "light_types", n_variations
)
hue = random_range(light_configs, "hue", n_variations)
saturation = random_range(light_configs, "saturation", n_variations)
value = random_range(light_configs, "value", n_variations)
light_x_values = random_range(light_configs, "light_x_location", n_variations)
light_y_values = random_range(light_configs, "light_x_location", n_variations)
light_z_values = random_range(light_configs, "light_x_location", n_variations)
for index, _ in enumerate(light_parameters_list):
light_parameters_list[index] = light_parameters_list[index](
light_energies=light_energies[index],
light_x_location=light_x_values[index],
light_y_location=light_y_values[index],
light_z_location=light_z_values[index],
color_hue=hue[index],
color_saturation=saturation[index],
color_value=value[index],
light_type=light_type_values[index],
)
return light_parameters_list
def get_scene_parameters(n_variations: int, scene_config: dict):
"""
Generate scene variations based on random values in config file and creates a named tuple for each variation
"""
scene_parameters_list = [Scene_tuple for i in range(n_variations)]
aspect_ratio_values = random_categorical_values(
scene_config, "aspect_ratio", n_variations
)
color_mode_values = random_categorical_values(
scene_config, "color_modes", n_variations
)
resolution_values = random_categorical_values(
scene_config, "resolution", n_variations
)
contrast_values = random_categorical_values(scene_config, "contrast", n_variations)
render_engine_values = random_categorical_values(
scene_config, "render_engine", n_variations
)
exposure_value_values = random_range(scene_config, "exposure", n_variations)
crop_min_x_values = random_range(scene_config, "crop_min_x", n_variations)
crop_max_x_values = random_range(scene_config, "crop_max_x", n_variations)
crop_min_y_values = random_range(scene_config, "crop_min_y", n_variations)
crop_max_y_values = random_range(scene_config, "crop_max_y", n_variations)
resolution_percentage_values = random_range(
scene_config, "resolution_percentage", n_variations
)
for index, _ in enumerate(scene_parameters_list):
scene_parameters_list[index] = scene_parameters_list[index](
aspect_ratio=aspect_ratio_values[index],
color_mode=color_mode_values[index],
exposure_value=exposure_value_values[index],
contrast=contrast_values[index],
crop_min_x=crop_min_x_values[index],
crop_max_x=crop_max_x_values[index],
crop_min_y=crop_min_y_values[index],
crop_max_y=crop_max_y_values[index],
resolution_x=resolution_values[index][0],
resolution_y=resolution_values[index][1],
resolution_percentage=resolution_percentage_values[index],
render_engine=render_engine_values[index],
)
return scene_parameters_list
| [
"random.choices",
"random.choice",
"math.radians",
"numpy.random.uniform"
] | [((1896, 1992), 'numpy.random.uniform', 'np.random.uniform', (["configs[variable]['range'][0]", "configs[variable]['range'][1]", 'variations'], {}), "(configs[variable]['range'][0], configs[variable]['range']\n [1], variations)\n", (1913, 1992), True, 'import numpy as np\n'), ((2421, 2500), 'random.choices', 'random.choices', (["configs[variable]['range']"], {'k': 'variations', 'weights': 'weight_values'}), "(configs[variable]['range'], k=variations, weights=weight_values)\n", (2435, 2500), False, 'import random\n'), ((2939, 2961), 'random.choice', 'random.choice', (['bg_list'], {}), '(bg_list)\n', (2952, 2961), False, 'import random\n'), ((5959, 6004), 'math.radians', 'math.radians', (['camera_x_rotation_values[index]'], {}), '(camera_x_rotation_values[index])\n', (5971, 6004), False, 'import math\n'), ((6036, 6081), 'math.radians', 'math.radians', (['camera_y_rotation_values[index]'], {}), '(camera_y_rotation_values[index])\n', (6048, 6081), False, 'import math\n'), ((6113, 6158), 'math.radians', 'math.radians', (['camera_z_rotation_values[index]'], {}), '(camera_z_rotation_values[index])\n', (6125, 6158), False, 'import math\n')] |
import sys
import setuptools
sys.path.insert(0, "src")
import pytorch_adapt
with open("README.md", "r") as fh:
long_description = fh.read()
extras_require_ignite = ["pytorch-ignite == 0.5.0.dev20220221"]
extras_require_lightning = ["pytorch-lightning"]
extras_require_record_keeper = ["record-keeper >= 0.9.31"]
extras_require_timm = ["timm"]
extras_require_docs = [
"mkdocs-material",
"mkdocstrings[python]",
"griffe",
"mkdocs-gen-files",
"mkdocs-section-index",
"mkdocs-literate-nav",
]
extras_require_dev = ["black", "isort", "nbqa", "flake8"]
setuptools.setup(
name="pytorch-adapt",
version=pytorch_adapt.__version__,
author="<NAME>",
description="Domain adaptation made easy. Fully featured, modular, and customizable.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/KevinMusgrave/pytorch-adapt",
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.0",
install_requires=[
"numpy",
"torch",
"torchvision",
"torchmetrics",
"pytorch-metric-learning >= 1.3.1.dev0",
],
extras_require={
"ignite": extras_require_ignite,
"lightning": extras_require_lightning,
"record-keeper": extras_require_record_keeper,
"timm": extras_require_timm,
"docs": extras_require_docs,
"dev": extras_require_dev,
},
)
| [
"sys.path.insert",
"setuptools.find_packages"
] | [((31, 56), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""src"""'], {}), "(0, 'src')\n", (46, 56), False, 'import sys\n'), ((966, 1003), 'setuptools.find_packages', 'setuptools.find_packages', ([], {'where': '"""src"""'}), "(where='src')\n", (990, 1003), False, 'import setuptools\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
class SoftAttention(nn.Module):
"""
Soft Attention module
"""
def __init__(self, rnn_hidden_size, attn_hidden_size, temp=1):
super(SoftAttention, self).__init__()
self.softmax = nn.Softmax(dim=1)
self.h2attn = nn.Linear(rnn_hidden_size, attn_hidden_size)
self.temp = temp
# this min_value is used to prevent in the case that,
# when the mask is all empty, softmax will result in NaN
self.min_value = -1e8
def forward(self, h, proj_context, context=None, mask=None, proposal_frame_mask=None, with_sentinel=False):
"""Propagate h through the network.
h: batch x dim (concat(img, action))
context: batch x seq_len x dim
mask: batch x seq_len indices to be masked
"""
attn_h = self.h2attn(h)
# Get attention
attn = torch.bmm(proj_context, attn_h.unsqueeze(2)
).squeeze(2) # batch x seq_len
attn = attn / self.temp
if mask is not None:
if with_sentinel:
attn.data.masked_fill_(mask.data, -float('inf'))
else:
# without sentinel, we need to use a very small value for masking,
# because there are corner cases where a image has no ROI proposals
# masking with -inf will thus result in NaN.
attn.data.masked_fill_(mask.data, self.min_value)
if proposal_frame_mask is not None:
# this `frame_masked_attn` is only used to computing (supervised) attention loss
# since our proposed method does not rely on supervision, we will not update the model
# based on this loss
frame_masked_attn = attn.clone()
if with_sentinel:
frame_masked_attn.data.masked_fill_(
proposal_frame_mask.data, -float('inf'))
else:
# without sentinel, we need to use a very small value for masking,
# because there are corner cases where a image has no ROI proposals
# masking with -inf will thus result in NaN.
frame_masked_attn.data.masked_fill_(
proposal_frame_mask.data, self.min_value)
attn = self.softmax(attn)
attn3 = attn.view(attn.size(0), 1, attn.size(1)) # batch x 1 x seq_len
if context is not None:
weighted_context = torch.bmm(
attn3, context).squeeze(1) # batch x dim
else:
weighted_context = torch.bmm(
attn3, proj_context).squeeze(1) # batch x dim
if proposal_frame_mask is not None:
return weighted_context, attn, frame_masked_attn
else:
return weighted_context, attn, None
class AdditiveSoftAttention(nn.Module):
"""
Soft Attention module
"""
def __init__(self, rnn_hidden_size, attn_hidden_size, temp=1):
super(AdditiveSoftAttention, self).__init__()
self.softmax = nn.Softmax(dim=1)
self.rnn_size = rnn_hidden_size
self.att_hid_size = attn_hidden_size
self.h2attn = nn.Linear(rnn_hidden_size, attn_hidden_size)
self.alpha_net = nn.Linear(attn_hidden_size, 1)
self.temp = temp
# this min_value is used to prevent in the case that,
# when the mask is all empty, softmax will result in NaN
self.min_value = -1e8
def forward(self, h, proj_context, context=None, mask=None, proposal_frame_mask=None, with_sentinel=False):
"""Propagate h through the network.
h: batch x dim (concat(img, action))
context: batch x seq_len x dim
mask: batch x seq_len indices to be masked
"""
attn_size = proj_context.size(1)
attn_h = self.h2attn(h)
attn_h = attn_h.unsqueeze(1)
dot = proj_context + attn_h
dot = torch.tanh(dot)
dot = dot.view(-1, self.att_hid_size)
dot = self.alpha_net(dot)
attn = dot.view(-1, attn_size)
# Get attention
# attn = torch.bmm(proj_context, attn_h.unsqueeze(2)).squeeze(2) # batch x seq_len
# attn = attn / self.temp
if mask is not None:
if with_sentinel:
attn.data.masked_fill_(mask.data, -float('inf'))
else:
# without sentinel, we need to use a very small value for masking,
# because there are corner cases where a image has no ROI proposals
# masking with -inf will thus result in NaN.
attn.data.masked_fill_(mask.data, self.min_value)
if proposal_frame_mask is not None:
# this `frame_masked_attn` is only used to computing (supervised) attention loss
# since our proposed method does not rely on supervision, we will not update the model
# based on this loss
frame_masked_attn = attn.clone()
if with_sentinel:
frame_masked_attn.data.masked_fill_(
proposal_frame_mask.data, -float('inf'))
else:
# without sentinel, we need to use a very small value for masking,
# because there are corner cases where a image has no ROI proposals
# masking with -inf will thus result in NaN.
frame_masked_attn.data.masked_fill_(
proposal_frame_mask.data, self.min_value)
attn = self.softmax(attn)
attn3 = attn.unsqueeze(1) # batch x 1 x seq_len
if context is not None:
weighted_context = torch.bmm(
attn3, context).squeeze(1) # batch x dim
else:
weighted_context = torch.bmm(
attn3, proj_context).squeeze(1) # batch x dim
if proposal_frame_mask is not None:
return weighted_context, attn, frame_masked_attn
else:
return weighted_context, attn, None
def proj_masking(feat, projector, mask=None):
"""Universal projector and masking"""
proj_feat = projector(feat.view(-1, feat.size(2)))
proj_feat = proj_feat.view(feat.size(0), feat.size(1), -1)
if mask is not None:
# check that there are at least one element not masked for each sample (row),
# this is more strict but won't work for features from NBT because
# some images do not have any regional proposal features
# assert 0 not in mask.sum(1)
assert mask.sum() != 0 # check that not all the elements across all samples are set to 0
return proj_feat * mask.unsqueeze(2).expand_as(proj_feat)
else:
return proj_feat
| [
"torch.tanh",
"torch.bmm",
"torch.nn.Linear",
"torch.nn.Softmax"
] | [((281, 298), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (291, 298), True, 'import torch.nn as nn\n'), ((322, 366), 'torch.nn.Linear', 'nn.Linear', (['rnn_hidden_size', 'attn_hidden_size'], {}), '(rnn_hidden_size, attn_hidden_size)\n', (331, 366), True, 'import torch.nn as nn\n'), ((3083, 3100), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (3093, 3100), True, 'import torch.nn as nn\n'), ((3210, 3254), 'torch.nn.Linear', 'nn.Linear', (['rnn_hidden_size', 'attn_hidden_size'], {}), '(rnn_hidden_size, attn_hidden_size)\n', (3219, 3254), True, 'import torch.nn as nn\n'), ((3280, 3310), 'torch.nn.Linear', 'nn.Linear', (['attn_hidden_size', '(1)'], {}), '(attn_hidden_size, 1)\n', (3289, 3310), True, 'import torch.nn as nn\n'), ((3961, 3976), 'torch.tanh', 'torch.tanh', (['dot'], {}), '(dot)\n', (3971, 3976), False, 'import torch\n'), ((2498, 2523), 'torch.bmm', 'torch.bmm', (['attn3', 'context'], {}), '(attn3, context)\n', (2507, 2523), False, 'import torch\n'), ((2612, 2642), 'torch.bmm', 'torch.bmm', (['attn3', 'proj_context'], {}), '(attn3, proj_context)\n', (2621, 2642), False, 'import torch\n'), ((5661, 5686), 'torch.bmm', 'torch.bmm', (['attn3', 'context'], {}), '(attn3, context)\n', (5670, 5686), False, 'import torch\n'), ((5775, 5805), 'torch.bmm', 'torch.bmm', (['attn3', 'proj_context'], {}), '(attn3, proj_context)\n', (5784, 5805), False, 'import torch\n')] |
import torch
import torchvision
import torch.nn as nn
import numpy as np
import torchvision.transforms as transforms
# ================================================================== #
# 目录 #
# ================================================================== #
# 1. autograd计算梯度举例1 (Line 25 to 39)
# 2. autograd计算梯度举例2 (Line 46 to 83)
# 3. 从numpy加载数据 (Line 90 to 97)
# 4. 输入pipline (Line 104 to 129)
# 5. 自定义数据的输入pipline (Line 136 to 156)
# 6. 预定义模型 (Line 163 to 176)
# 7. 保存和加载模型 (Line 183 to 189)
# ================================================================== #
# 1. autograd计算梯度举例1 #
# ================================================================== #
# 创建张量
x = torch.tensor(1., requires_grad=True)
w = torch.tensor(2., requires_grad=True)
b = torch.tensor(3., requires_grad=True)
# 创建计算图
y = w * x + b # y = 2 * x + 3
# 计算梯度
y.backward()
# 输出梯度
print(x.grad)
print(w.grad)
print(b.grad)
'''
x.grad = tensor(2.)
w.grad = tensor(1.)
b.grad = tensor(1.)
'''
# ================================================================== #
# 2. autograd计算梯度举例2 #
# ================================================================== #
# 创建10×3和10×2的两个随机张量
x = torch.randn(10, 3)
y = torch.randn(10, 2)
# 构建两个全连接层
linear = nn.Linear(3, 2)
print('w: ', linear.weight)
print('b: ', linear.bias)
'''
w: Parameter containing:
tensor([[-0.0707, 0.2341, 0.4827],
[-0.5092, -0.1537, 0.2582]], requires_grad=True)
b: Parameter containing:
tensor([ 0.5335, -0.2167], requires_grad=True)
'''
# 构建损失函数和优化器
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(linear.parameters(), lr=0.01)
# 前向传播
pred = linear(x)
# 计算损失
loss = criterion(pred, y)
print('loss: ', loss.item())
'''
loss: 1.831163763999939
'''
# 反向传播
loss.backward()
# 输出梯度
print('dL/dw: ', linear.weight.grad)
print('dL/db: ', linear.bias.grad)
'''
dL/dw: tensor([[ 0.5340, 0.4947, 0.1947],
[-0.1455, 0.5270, 0.6877]])
dL/db: tensor([ 0.5586, -0.8556])
'''
# 1步梯度下降
optimizer.step()
# You can also perform gradient descent at the low level.
# linear.weight.data.sub_(0.01 * linear.weight.grad.data)
# linear.bias.data.sub_(0.01 * linear.bias.grad.data)
# 打印出1步梯度下降后的损失
pred = linear(x)
loss = criterion(pred, y)
print('1步优化后的损失: ', loss.item())
'''
1步优化后的损失: 1.631872534751892
'''
# ================================================================== #
# 3. 从numpy加载数据 #
# ================================================================== #
# 创建一个numpy数组
x = np.array([[1, 2], [3, 4]])
# 将numpy数组转换为张量
y = torch.from_numpy(x)
# 将张量转换为numpy数组
z = y.numpy()
# ================================================================== #
# 4. 输入pipeline #
# ================================================================== #
# 下载并构建CIFAR-10数据集.
train_dataset = torchvision.datasets.CIFAR10(root='./data/',
train=True,
transform=transforms.ToTensor(),
download=True)
# 获取一对数据(从磁盘读数据)
image, label = train_dataset[0]
print(image.size())
print(label)
'''
torch.Size([3, 32, 32])
6
'''
# 数据加载器(提供队列和线程的方法).
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=64,
shuffle=True)
# 迭代开始,队列和线程开始加载数据
data_iter = iter(train_loader)
# 小批量图像和标签.
images, labels = data_iter.next()
# 数据加载器的实际使用情况
for images, labels in train_loader:
# 训练代码写在此处
pass
# ================================================================== #
# 5. 自定义数据集的输入pipeline #
# ================================================================== #
# 构建自定义数据集
class CustomDataset(torch.utils.data.Dataset):
def __init__(self):
# TODO
# 1. 初始化文件路径或者文件名列表
pass
def __getitem__(self, index):
# TODO
# 1. 从文件中读取一个数据(例如numpy.fromfile, PIL.Image.open).
# 2. 预处理数据(例如torchvision.Transform).
# 3. 返回数据对(例如image and label).
pass
def __len__(self):
# 返回数据集大小
return 0
# 使用预构建的数据加载器
# custom_dataset = CustomDataset()
# train_loader = torch.utils.data.DataLoader(dataset=custom_dataset,
# batch_size=64,
# shuffle=True)
# ================================================================== #
# 6. 预训练模型 #
# ================================================================== #
# 下载并加载预训练的ResNet-18模型.
resnet = torchvision.models.resnet18(pretrained=True)
# 如果只想微调模型的顶层,请进行如下设置.
for param in resnet.parameters():
param.requires_grad = False
# 更换顶层以进行微调.
resnet.fc = nn.Linear(resnet.fc.in_features, 100)
# 前向计算
images = torch.randn(64, 3, 224, 224)
outputs = resnet(images)
print(outputs.size())
'''
64x3x224x224->64x100
torch.Size([64, 100])
'''
# ================================================================== #
# 7. 保存并加载模型 #
# ================================================================== #
# 保存并加载整个模型
torch.save(resnet, 'model.ckpt')
model = torch.load('model.ckpt')
# 仅保存和加载模型参数(推荐)
torch.save(resnet.state_dict(), 'params.ckpt')
resnet.load_state_dict(torch.load('params.ckpt'))
| [
"torch.load",
"torchvision.models.resnet18",
"torch.from_numpy",
"numpy.array",
"torch.tensor",
"torch.nn.MSELoss",
"torch.nn.Linear",
"torch.utils.data.DataLoader",
"torch.save",
"torchvision.transforms.ToTensor",
"torch.randn"
] | [((953, 990), 'torch.tensor', 'torch.tensor', (['(1.0)'], {'requires_grad': '(True)'}), '(1.0, requires_grad=True)\n', (965, 990), False, 'import torch\n'), ((994, 1031), 'torch.tensor', 'torch.tensor', (['(2.0)'], {'requires_grad': '(True)'}), '(2.0, requires_grad=True)\n', (1006, 1031), False, 'import torch\n'), ((1035, 1072), 'torch.tensor', 'torch.tensor', (['(3.0)'], {'requires_grad': '(True)'}), '(3.0, requires_grad=True)\n', (1047, 1072), False, 'import torch\n'), ((1490, 1508), 'torch.randn', 'torch.randn', (['(10)', '(3)'], {}), '(10, 3)\n', (1501, 1508), False, 'import torch\n'), ((1513, 1531), 'torch.randn', 'torch.randn', (['(10)', '(2)'], {}), '(10, 2)\n', (1524, 1531), False, 'import torch\n'), ((1553, 1568), 'torch.nn.Linear', 'nn.Linear', (['(3)', '(2)'], {}), '(3, 2)\n', (1562, 1568), True, 'import torch.nn as nn\n'), ((1852, 1864), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (1862, 1864), True, 'import torch.nn as nn\n'), ((2833, 2859), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (2841, 2859), True, 'import numpy as np\n'), ((2881, 2900), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (2897, 2900), False, 'import torch\n'), ((3575, 3654), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'train_dataset', 'batch_size': '(64)', 'shuffle': '(True)'}), '(dataset=train_dataset, batch_size=64, shuffle=True)\n', (3602, 3654), False, 'import torch\n'), ((5011, 5055), 'torchvision.models.resnet18', 'torchvision.models.resnet18', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (5038, 5055), False, 'import torchvision\n'), ((5172, 5209), 'torch.nn.Linear', 'nn.Linear', (['resnet.fc.in_features', '(100)'], {}), '(resnet.fc.in_features, 100)\n', (5181, 5209), True, 'import torch.nn as nn\n'), ((5227, 5255), 'torch.randn', 'torch.randn', (['(64)', '(3)', '(224)', '(224)'], {}), '(64, 3, 224, 224)\n', (5238, 5255), False, 'import torch\n'), ((5575, 5607), 'torch.save', 'torch.save', (['resnet', '"""model.ckpt"""'], {}), "(resnet, 'model.ckpt')\n", (5585, 5607), False, 'import torch\n'), ((5616, 5640), 'torch.load', 'torch.load', (['"""model.ckpt"""'], {}), "('model.ckpt')\n", (5626, 5640), False, 'import torch\n'), ((5729, 5754), 'torch.load', 'torch.load', (['"""params.ckpt"""'], {}), "('params.ckpt')\n", (5739, 5754), False, 'import torch\n'), ((3338, 3359), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3357, 3359), True, 'import torchvision.transforms as transforms\n')] |
from flask import Flask
from lambdarado import start
def get_app():
app = Flask(__name__)
@app.route('/a')
def get_a():
return 'AAA'
@app.route('/b')
def get_b():
return 'BBB'
return app
print("RUNNING main.py")
start(get_app)
| [
"lambdarado.start",
"flask.Flask"
] | [((260, 274), 'lambdarado.start', 'start', (['get_app'], {}), '(get_app)\n', (265, 274), False, 'from lambdarado import start\n'), ((80, 95), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (85, 95), False, 'from flask import Flask\n')] |
"""A module containing tests for the pyIATI representation of Standard metadata."""
import copy
import math
import operator
import pytest
import iati.tests.utilities
from iati.tests.fixtures.versions import iativer, semver, split_decimal, split_iativer, split_semver
class TestVersionInit:
"""A container for tests relating to initialisation of Standard Versions."""
def test_version_no_params(self):
"""Test Version creation with no parameters."""
with pytest.raises(TypeError):
iati.Version() # pylint: disable=E1120
def test_version_not_string(self, std_ver_minor_uninst_typeerr):
"""Test Version creation with a non-string."""
with pytest.raises(TypeError) as excinfo:
iati.Version(std_ver_minor_uninst_typeerr)
assert 'A Version object must be created from a string or Decimal, not a ' in str(excinfo.value)
assert str(type(std_ver_minor_uninst_typeerr)) in str(excinfo.value)
def test_version_supported_iati_versions(self, std_ver_minor_uninst_valid_fullsupport):
"""Test Version creation with supported IATI version numbers."""
iati.Version(std_ver_minor_uninst_valid_fullsupport)
def test_version_valid_decimal(self, std_ver_minor_uninst_valid_decimal_possible):
"""Test Version creations with valid decimal version numbers."""
integer_component, decimal_component = split_decimal(std_ver_minor_uninst_valid_decimal_possible)
version = iati.Version(std_ver_minor_uninst_valid_decimal_possible)
assert version.integer == integer_component
assert version.major == integer_component
assert version.decimal == decimal_component
assert version.minor == decimal_component - 1
assert version.patch == 0
def test_version_invalid_float(self, std_ver_minor_uninst_valid_decimal_possible):
"""Test Version creation with a float that would be valid as a Decimal."""
float_version = float(std_ver_minor_uninst_valid_decimal_possible)
with pytest.raises(TypeError):
iati.Version(float_version)
def test_version_invalid_decimal(self, std_ver_minor_uninst_valueerr_decimal):
"""Test Version creation with a Decimal that is not a valid decimal version number."""
with pytest.raises(ValueError) as excinfo:
iati.Version(std_ver_minor_uninst_valueerr_decimal)
assert str(excinfo.value) == 'A valid version number must be specified.'
def test_version_valid_iativer(self, std_ver_minor_uninst_valid_iativer_possible):
"""Test Version creations with correctly constructed IATIver version numbers."""
integer_component, decimal_component = split_iativer(std_ver_minor_uninst_valid_iativer_possible)
version = iati.Version(std_ver_minor_uninst_valid_iativer_possible)
assert version.integer == integer_component
assert version.major == integer_component
assert version.decimal == decimal_component
assert version.minor == decimal_component - 1
assert version.patch == 0
def test_version_invalid_iativer(self, std_ver_minor_uninst_valueerr_iativer):
"""Test Version creation with a string that is not a valid IATIver version number, but looks like it could be."""
with pytest.raises(ValueError) as excinfo:
iati.Version(std_ver_minor_uninst_valueerr_iativer)
assert str(excinfo.value) == 'A valid version number must be specified.'
def test_version_valid_semver_3_part(self, std_ver_minor_uninst_valid_semver_possible):
"""Test Version creation with valid SemVer version numbers."""
major_component, minor_component, patch_component = split_semver(std_ver_minor_uninst_valid_semver_possible)
version = iati.Version(std_ver_minor_uninst_valid_semver_possible)
assert version.major == major_component
assert version.integer == major_component
assert version.minor == minor_component
assert version.decimal == minor_component + 1
assert version.patch == patch_component
def semver_version_invalid_major_0(self, str_ver_minor_uninst_valueerr_v0):
"""Test version creation with a Major version of 0."""
with pytest.raises(ValueError) as excinfo:
iati.Version(str_ver_minor_uninst_valueerr_v0)
assert str(excinfo.value) == 'A valid version number must be specified.'
class TestVersionComparison:
"""A container for tests relating to comparison between Standard Versions."""
@pytest.fixture(params=[
# with patch components of zero
('1.01', '1.01', '='), # equal IATIver - zero minor
('1.0.0', '1.0.0', '='), # equal SemVer - zero minor
('1.01', '1.0.0', '='), # equal IATIver and SemVer - zero minor
('1.0.0', '1.01', '='), # equal Semver and IATIVer - zero minor
('1.02', '1.02', '='), # equal IATIver - non-zero minor
('1.1.0', '1.1.0', '='), # equal SemVer - non-zero minor
('1.02', '1.1.0', '='), # equal IATIver and SemVer - non-zero minor
('1.1.0', '1.02', '='), # equal SemVer and IATIver - non-zero minor
('1.01', '1.02', '<'), # less than IATIver - minor
('1.0.0', '1.1.0', '<'), # less than SemVer - minor
('1.01', '1.1.0', '<'), # less than IATIver and SemVer - minor
('1.0.0', '1.02', '<'), # less than SemVer and IATIver - minor
('1.01', '2.01', '<'), # less than IATIver - major
('1.0.0', '2.0.0', '<'), # less than SemVer - major
('1.01', '2.0.0', '<'), # less than IATIver and SemVer - major
('1.0.0', '2.01', '<'), # less than SemVer and IATIVer - major
('1.1.0', '1.0.0', '>'), # more than SemVer - minor
('1.1.0', '1.01', '>'), # more than IATIver and SemVer - minor
('1.02', '1.0.0', '>'), # more than SemVer and IATIver - minor
('2.01', '1.01', '>'), # more than IATIver - major
('2.0.0', '1.0.0', '>'), # more than SemVer - major
('2.01', '1.0.0', '>'), # more than IATIver and SemVer - major
('2.0.0', '1.01', '>'), # more than SemVer and IATIVer - major
# non-zero patch components
('1.02', '1.1.7', '<'), # less than IATIver and SemVer - different patch
('1.1.7', '1.02', '>'), # more equal SemVer and IATIver - different patch
('1.1.6', '1.1.7', '<'), # less than SemVer - patch
('1.1.7', '1.1.6', '>') # more than SemVer - patch
])
def version_relationship(self, request):
"""Return a tuple containing a pair of Version Numbers and their relationships.
The first two items in the tuple are Version Numbers.
The third item is a string containing symbols indicating the relationship.
* =: The two values are equal.
* <: The first value is less than the second.
* >: The first value is more than the second.
"""
return request.param
@pytest.fixture(params=[
(operator.eq, ['=']),
(operator.ne, ['<', '>']),
(operator.lt, ['<']),
(operator.le, ['<', '=']),
(operator.gt, ['>']),
(operator.ge, ['>', '='])
])
def comparison_op_mapping(self, request):
"""Return a tuple containing a comparison operator and a list of symbols it represents."""
return request.param
def test_comparisons(self, version_relationship, comparison_op_mapping):
"""Test that the relationships between two Versions are correctly detected."""
version_1 = iati.Version(version_relationship[0])
version_2 = iati.Version(version_relationship[1])
expected_relationships = version_relationship[2]
comparison_op, op_relationships = comparison_op_mapping
should_pass = len([op for op in op_relationships if op in expected_relationships]) > 0
result = comparison_op(version_1, version_2)
assert result == should_pass
class TestVersionModification:
"""A container for tests relating to modifying Version Numbers after they are instantiated."""
CHANGE_AMOUNT = 10
"""int: The amount that Components are modified by."""
@pytest.fixture(params=[
('major', 0),
('integer', 0),
('minor', 1),
('decimal', 1),
('patch', 2)
])
def modifiable_attrib(self, request):
"""Return a tuple containing the name of a component within a Version, plus the index as it appears when components are ordered from most to least major."""
return request.param
def test_attribute_components_writable_valid_values(self, std_ver_minor_inst_valid_possible, modifiable_attrib):
"""Test that the core Version Number Component attributes are writable."""
attrib_name, idx = modifiable_attrib
components = split_semver(std_ver_minor_inst_valid_possible.semver_str)
components[idx] = components[idx] + self.CHANGE_AMOUNT
version_new = iati.Version(semver(components[0], components[1], components[2]))
setattr(std_ver_minor_inst_valid_possible, attrib_name, components[idx])
assert std_ver_minor_inst_valid_possible == version_new
@pytest.mark.parametrize("not_int", iati.tests.utilities.generate_test_types(['int'], True))
def test_attribute_components_writable_invalid_values(self, std_ver_minor_inst_valid_single, modifiable_attrib, not_int):
"""Test that core Version Number Components can have invalid values set."""
attrib_name, _ = modifiable_attrib
setattr(std_ver_minor_inst_valid_single, attrib_name, not_int)
class TestVersionRepresentation:
"""A container for tests relating to how Standard Versions are represented when output."""
def test_iativer_string_output(self, std_ver_minor_uninst_valid_iativer_possible):
"""Test that the string output for an IATIver version is as expected."""
integer_component, decimal_component = split_iativer(std_ver_minor_uninst_valid_iativer_possible)
semver_str = semver(integer_component, decimal_component - 1, 0)
version = iati.Version(std_ver_minor_uninst_valid_iativer_possible)
assert str(version) == std_ver_minor_uninst_valid_iativer_possible
assert repr(version) == "iati.Version('" + semver_str + "')"
assert version.iativer_str == std_ver_minor_uninst_valid_iativer_possible
assert version.semver_str == semver_str
def test_semver_string_output(self, std_ver_minor_uninst_valid_semver_possible):
"""Test that the str() output for an SemVer version is in IATIver-format."""
major_component, minor_component, _ = split_semver(std_ver_minor_uninst_valid_semver_possible)
iativer_str = iativer(major_component, minor_component + 1)
version = iati.Version(std_ver_minor_uninst_valid_semver_possible)
assert str(version) == iativer_str
assert repr(version) == "iati.Version('" + std_ver_minor_uninst_valid_semver_possible + "')"
assert version.iativer_str == iativer_str
assert version.semver_str == std_ver_minor_uninst_valid_semver_possible
class TestVersionBumping:
"""A container for tests relating to bumping of Version Numbers."""
def test_version_bump_major(self, std_ver_minor_uninst_valid_semver_possible):
"""Test that the next valid Major/Integer version can be located."""
major_component, _, _ = split_semver(std_ver_minor_uninst_valid_semver_possible)
next_major_version = iati.Version(semver(major_component + 1, 0, 0))
version = iati.Version(std_ver_minor_uninst_valid_semver_possible)
assert isinstance(version.next_major(), iati.Version)
assert version.next_major() == next_major_version
assert isinstance(version.next_integer(), iati.Version)
assert version.next_integer() == next_major_version
def test_version_bump_minor(self, std_ver_minor_uninst_valid_semver_possible):
"""Test that the next valid Minor/Decimal version can be located."""
major_component, minor_component, _ = split_semver(std_ver_minor_uninst_valid_semver_possible)
next_minor_version = iati.Version(semver(major_component, minor_component + 1, 0))
version = iati.Version(std_ver_minor_uninst_valid_semver_possible)
assert isinstance(version.next_minor(), iati.Version)
assert version.next_minor() == next_minor_version
assert isinstance(version.next_decimal(), iati.Version)
assert version.next_decimal() == next_minor_version
class TestVersionImplementationDetailHiding:
"""A container for tests relating to ensuring implementation detail is hidden.
The implementation of the Version class makes use of a Semantic Versioning library by inheriting from a base class.
The utilised base class contains attributes that are not desired.
Tests in this container check that attributes that are not desired have been hidden.
"""
def test_version_bump_patch(self, std_ver_minor_inst_valid_possible):
"""Test that the next Patch version cannot be obtained."""
with pytest.raises(AttributeError):
std_ver_minor_inst_valid_possible.next_patch()
with pytest.raises(AttributeError):
std_ver_minor_inst_valid_possible.next_patch # pylint: disable=pointless-statement
def test_version_attrib_prerelease(self, std_ver_minor_inst_valid_possible):
"""Test that the 'prerelease' attribute has been set to None on initialisation."""
assert std_ver_minor_inst_valid_possible.prerelease is None
def test_version_attrib_build(self, std_ver_minor_inst_valid_possible):
"""Test that the 'build' attribute has been set to None on initialisation."""
assert std_ver_minor_inst_valid_possible.build is None
def test_version_attrib_partial(self, std_ver_minor_inst_valid_possible):
"""Test that the 'partial' attribute has been set to True on initialisation."""
assert std_ver_minor_inst_valid_possible.partial is True
class TestVersionConstants:
"""A container for tests relating to constants that define useful groups of IATI version numbers."""
@pytest.fixture(params=[
iati.version.STANDARD_VERSIONS,
iati.version.STANDARD_VERSIONS_SUPPORTED,
iati.version.STANDARD_VERSIONS_MINOR
])
def standard_versions_list(self, request):
"""Return a list of Version Numbers."""
return request.param
def test_standard_versions_all_are_versions(self, standard_versions_list):
"""Check that each item in standard versions is a Version instance."""
for version in standard_versions_list:
assert isinstance(version, iati.Version)
def test_standard_versions_correct_format(self, standard_versions_list):
"""Check that standard versions is in the correct format."""
assert isinstance(standard_versions_list, list)
@pytest.mark.latest_version('2.03')
def test_standard_versions_correct_number(self):
"""Check that standard versions has the expected number of items."""
assert len(iati.version.STANDARD_VERSIONS) == 8
@pytest.mark.latest_version('2.03')
def test_standard_versions_correct_number_supported(self):
"""Check that supported standard versions has the expected number of items."""
assert len(iati.version.STANDARD_VERSIONS_SUPPORTED) == 5
def test_standard_versions_major_all_are_integers(self):
"""Check that each major version is an integer."""
for major_version in iati.version.STANDARD_VERSIONS_MAJOR:
assert isinstance(major_version, int)
@pytest.mark.latest_version('2.03')
def test_standard_versions_major_correct_number(self):
"""Check that the correct number of major versions are detected."""
assert len(iati.version.STANDARD_VERSIONS_MAJOR) == 2
@pytest.mark.latest_version('2.03')
def test_standard_versions_minor_correct_number(self):
"""Check that the correct number of minor versions are detected."""
assert len(iati.version.STANDARD_VERSIONS_MINOR) == 8
def test_standard_version_any_has_length(self):
"""Check that the value to represent any version is a value with length."""
assert iati.version.STANDARD_VERSION_ANY != ''
class TestVersionDecorators:
"""A container for tests that cover all version decorators."""
def func_with_no_args(self):
"""A function that takes no arguments."""
return True
@pytest.mark.parametrize('decorator', [
iati.version.allow_fully_supported_version,
iati.version.allow_known_version,
iati.version.allow_possible_version,
iati.version.decimalise_integer,
iati.version.normalise_decimals
])
def test_version_decorators_require_arg(self, decorator):
"""Test that decorators raise a TypeError when given a function that requires no arguments."""
with pytest.raises(TypeError):
decorator(self.func_with_no_args)()
# pylint: disable=protected-access
class VersionSupportChecksBase:
"""A container for functions and fixtures used to check version support.
These are in their own class to reduce the number of public methods in the parent class below the linting limit of 20.
"""
@iati.version.allow_fully_supported_version
def return_fully_supported_version(version): # pylint: disable=no-self-argument
"""Return the version parameter, but only if it's fully supported by pyIATI. Check undertaken with decorator."""
return version
@iati.version.allow_known_version
def return_known_version(version): # pylint: disable=no-self-argument
"""Return the version parameter, but only if it's known of by pyIATI. Check undertaken with decorator."""
return version
@iati.version.allow_possible_version
def return_possibly_version(version): # pylint: disable=no-self-argument
"""Return the version parameter, but only if it's a possible representation of a version number. Check undertaken with decorator."""
return version
@pytest.fixture(params=[return_fully_supported_version])
def decorated_func_full_support(self, request):
"""Return a decorated function that returns a version of the IATI Standard that is fully supported by pyIATI."""
return request.param
@pytest.fixture(params=[return_known_version])
def decorated_func_known(self, request):
"""Return a decorated function that returns a version of the IATI Standard that pyIATI knows exists."""
return request.param
@pytest.fixture(params=[
return_possibly_version,
iati.version._prevent_non_version_representations
])
def possibly_version_func(self, request):
"""Return a function that returns a value that represents a possible IATI Version. Other values cause an error."""
return request.param
@pytest.fixture(params=[
iati.version._is_fully_supported,
iati.version._is_known
])
def truthy_func(self, request):
"""Return a function to check whether an input value is True or False based on whether it's a valid version."""
return request.param
@pytest.fixture(params=[
return_fully_supported_version,
return_known_version
])
def decorated_func(self, request):
"""Return a function to restrict whether an input value is a valid version, and raise a ValueError if it is not."""
return request.param
@pytest.fixture(params=[
return_fully_supported_version,
iati.version._is_fully_supported,
return_known_version,
iati.version._is_known
])
def func_to_test(self, request):
"""Return a function to check for TypeErrors being raised when provided values other than iati.Versions."""
return request.param
class TestVersionSupportChecks(VersionSupportChecksBase):
"""A container for tests relating to the detection of how much pyIATI supports particular versions."""
def test_fully_supported_version_fully_supported(self, std_ver_minor_inst_valid_fullsupport, decorated_func_full_support):
"""Check that fully supported IATI Versions are detected as such."""
version = std_ver_minor_inst_valid_fullsupport
assert iati.version._is_fully_supported(version) is True
assert decorated_func_full_support(version) == version
def test_fully_supported_version_partially_supported(self, std_ver_minor_inst_valid_partsupport, decorated_func_full_support):
"""Check that partially supported IATI Versions are detected as not fully supported."""
assert iati.version._is_fully_supported(std_ver_minor_inst_valid_partsupport) is False
with pytest.raises(ValueError):
decorated_func_full_support(std_ver_minor_inst_valid_partsupport)
def test_known_version_known(self, std_ver_minor_inst_valid_known, decorated_func_known):
"""Check that known IATI Versions are detected as such."""
assert iati.version._is_known(std_ver_minor_inst_valid_known) is True
assert decorated_func_known(std_ver_minor_inst_valid_known) == std_ver_minor_inst_valid_known
def test_known_version_not_known(self, std_ver_minor_inst_valid_unknown, decorated_func_known):
"""Check that unknown IATI Versions are detected as such."""
assert iati.version._is_known(std_ver_minor_inst_valid_unknown) is False
with pytest.raises(ValueError):
decorated_func_known(std_ver_minor_inst_valid_unknown)
def test_supported_version_str(self, std_ver_minor_uninst_valid_possible, truthy_func, decorated_func):
"""Check that Version Numbers cause an error if provided as anything other than an iati.Version."""
assert truthy_func(std_ver_minor_uninst_valid_possible) is False
with pytest.raises(ValueError):
decorated_func(std_ver_minor_uninst_valid_possible)
def test_supported_version_junk_value(self, std_ver_minor_uninst_typeerr, truthy_func, decorated_func):
"""Check that supported IATI Versions cause an error if a junk value is provided."""
assert truthy_func(std_ver_minor_uninst_typeerr) is False
with pytest.raises(ValueError):
decorated_func(std_ver_minor_uninst_typeerr)
def test_non_version_representation_valid_version_obj(self, std_ver_minor_inst_valid_possible, possibly_version_func):
"""Test that instantiated iati.Versions are detected as being valid representations of an IATI Version Number."""
original_value = copy.deepcopy(std_ver_minor_inst_valid_possible)
version = possibly_version_func(std_ver_minor_inst_valid_possible)
assert version == original_value
assert version is std_ver_minor_inst_valid_possible
def test_non_version_representation_valid_val_decimal(self, std_ver_minor_uninst_valid_possible, possibly_version_func):
"""Test that values that can become iati.Versions are detected as being valid representations of an IATI Version Number."""
original_value = copy.deepcopy(std_ver_minor_uninst_valid_possible)
version = possibly_version_func(std_ver_minor_uninst_valid_possible)
assert version == original_value
assert version is std_ver_minor_uninst_valid_possible
def test_non_version_representation_valid_val_integer(self, std_ver_major_uninst_valid_possible, possibly_version_func):
"""Test that positive integers are detected as being valid representations of an IATI Version Number."""
original_value = copy.deepcopy(std_ver_major_uninst_valid_possible)
version = possibly_version_func(std_ver_major_uninst_valid_possible)
assert version == original_value
assert version is std_ver_major_uninst_valid_possible
def test_non_version_representation_valid_val_any(self, possibly_version_func):
"""Test that the specified ANY_VERSION values are detected as being valid representations of an IATI Version Number."""
version = possibly_version_func(iati.version.STANDARD_VERSION_ANY)
assert version == iati.version.STANDARD_VERSION_ANY
def test_non_version_representation_invalid_val_integer(self, std_ver_all_uninst_valueerr, possibly_version_func):
"""Test that non-positive integers are detected as not being valid representations of an IATI Version Number."""
with pytest.raises(ValueError):
possibly_version_func(std_ver_all_uninst_valueerr)
def test_non_version_representation_invalid_type(self, std_ver_all_uninst_typeerr, possibly_version_func):
"""Test that values of a type that cannot represent a Version cause a TypeError."""
with pytest.raises(TypeError):
possibly_version_func(std_ver_all_uninst_typeerr)
class TestVersionNormalisation:
"""A container for tests relating to normalising how versions are passed into functions."""
@iati.version.decimalise_integer
def return_decimalised_integer(version): # pylint: disable=no-self-argument
"""Return the version parameter, but converted to an iati.Version representing the newest Decimal Version in the given Integer Version if something that can be treated as an Integer Version is provided."""
return version
@iati.version.normalise_decimals
def return_normalised_decimal(version): # pylint: disable=no-self-argument
"""Return the version parameter, but converted to an iati.Version if something that can be treated as a Decimal Version is provided."""
return version
INTEGER_TO_DECIMAL_FUNCTIONS = [
return_decimalised_integer,
iati.version._decimalise_integer
]
@pytest.fixture(params=INTEGER_TO_DECIMAL_FUNCTIONS)
def integer_decimalisation_func(self, request):
"""Return a function for which the return value can be checked."""
return request.param
DECIMAL_S13N_FUNCTIONS = [
return_normalised_decimal,
iati.version._normalise_decimal_version
]
@pytest.fixture(params=DECIMAL_S13N_FUNCTIONS)
def decimal_normalisation_func(self, request):
"""Return a function for which the return value can be checked."""
return request.param
@pytest.fixture(params=INTEGER_TO_DECIMAL_FUNCTIONS + DECIMAL_S13N_FUNCTIONS)
def junk_ignoring_func(self, request):
"""Return a function that does not modify junk values before returning them."""
return request.param
# decimal normalisation
def test_decimal_versions_normalised(self, std_ver_minor_uninst_valid_possible, decimal_normalisation_func):
"""Check that values that represent Decimal Versions of the IATI Standard are converted to iati.Versions."""
assert decimal_normalisation_func(std_ver_minor_uninst_valid_possible) == iati.Version(std_ver_minor_uninst_valid_possible)
def test_integer_versions_not_normalised(self, std_ver_major_uninst_valid_possible, decimal_normalisation_func):
"""Check that values that represent Integer Versions of the IATI Standard are returned as-is when normalising Decimal Versions."""
assert decimal_normalisation_func(std_ver_major_uninst_valid_possible) == std_ver_major_uninst_valid_possible
# integer decimalisation
def test_decimal_version_conversion_valid_version(self, std_ver_minor_inst_valid_known, integer_decimalisation_func):
"""Check that known Decimal Versions remain unchanged."""
assert integer_decimalisation_func(std_ver_minor_inst_valid_known) == std_ver_minor_inst_valid_known
def test_decimal_version_conversion_valid_decimal_representation(self, std_ver_minor_uninst_valid_known, integer_decimalisation_func):
"""Check that values that can be used to create actual Decimal Versions are left alone."""
assert integer_decimalisation_func(std_ver_minor_uninst_valid_known) == std_ver_minor_uninst_valid_known
@pytest.mark.parametrize('integer_version, expected_decimal', [
('1', iati.Version('1.05')),
('2', iati.version.STANDARD_VERSION_LATEST),
('3', iati.Version('3.0.0')),
(1, iati.Version('1.05')),
(2, iati.version.STANDARD_VERSION_LATEST),
(3, iati.Version('3.0.0'))
])
@pytest.mark.latest_version('2.03')
def test_integer_version_conversion_valid(self, integer_version, expected_decimal, integer_decimalisation_func):
"""Check that valid Integer Versions return the last Decimal in the Integer."""
assert integer_decimalisation_func(integer_version) == expected_decimal
def test_junk_values_not_modified(self, std_ver_all_uninst_mixederr, junk_ignoring_func):
"""Check that junk values are returned as-is when standardising Decimal Versions.
An `is` check is performed to check that the same object is returned.
An `==` check is performed to check that the value is not modified.
"""
try:
original_value = copy.deepcopy(std_ver_all_uninst_mixederr)
except TypeError:
original_value = std_ver_all_uninst_mixederr
result = junk_ignoring_func(std_ver_all_uninst_mixederr)
assert result is std_ver_all_uninst_mixederr
assert (result == original_value) or isinstance(original_value, type(iter([]))) or math.isnan(original_value)
class TestVersionMajorMinorRelationship:
"""A container for tests relating to the relationship between major and minor versions."""
def test_versions_for_integer(self, std_ver_major_uninst_valid_known):
"""Check that the each of the decimal versions returned by versions_for_integer starts with the input major version."""
result = iati.version.versions_for_integer(std_ver_major_uninst_valid_known)
assert result != []
for version in result:
assert version.major == int(std_ver_major_uninst_valid_known)
| [
"iati.tests.fixtures.versions.split_semver",
"iati.tests.fixtures.versions.iativer",
"iati.tests.fixtures.versions.semver",
"pytest.mark.latest_version",
"pytest.mark.parametrize",
"pytest.raises",
"iati.tests.fixtures.versions.split_decimal",
"copy.deepcopy",
"pytest.fixture",
"iati.tests.fixture... | [((4555, 5267), 'pytest.fixture', 'pytest.fixture', ([], {'params': "[('1.01', '1.01', '='), ('1.0.0', '1.0.0', '='), ('1.01', '1.0.0', '='), (\n '1.0.0', '1.01', '='), ('1.02', '1.02', '='), ('1.1.0', '1.1.0', '='),\n ('1.02', '1.1.0', '='), ('1.1.0', '1.02', '='), ('1.01', '1.02', '<'),\n ('1.0.0', '1.1.0', '<'), ('1.01', '1.1.0', '<'), ('1.0.0', '1.02', '<'),\n ('1.01', '2.01', '<'), ('1.0.0', '2.0.0', '<'), ('1.01', '2.0.0', '<'),\n ('1.0.0', '2.01', '<'), ('1.1.0', '1.0.0', '>'), ('1.1.0', '1.01', '>'),\n ('1.02', '1.0.0', '>'), ('2.01', '1.01', '>'), ('2.0.0', '1.0.0', '>'),\n ('2.01', '1.0.0', '>'), ('2.0.0', '1.01', '>'), ('1.02', '1.1.7', '<'),\n ('1.1.7', '1.02', '>'), ('1.1.6', '1.1.7', '<'), ('1.1.7', '1.1.6', '>')]"}), "(params=[('1.01', '1.01', '='), ('1.0.0', '1.0.0', '='), (\n '1.01', '1.0.0', '='), ('1.0.0', '1.01', '='), ('1.02', '1.02', '='), (\n '1.1.0', '1.1.0', '='), ('1.02', '1.1.0', '='), ('1.1.0', '1.02', '='),\n ('1.01', '1.02', '<'), ('1.0.0', '1.1.0', '<'), ('1.01', '1.1.0', '<'),\n ('1.0.0', '1.02', '<'), ('1.01', '2.01', '<'), ('1.0.0', '2.0.0', '<'),\n ('1.01', '2.0.0', '<'), ('1.0.0', '2.01', '<'), ('1.1.0', '1.0.0', '>'),\n ('1.1.0', '1.01', '>'), ('1.02', '1.0.0', '>'), ('2.01', '1.01', '>'),\n ('2.0.0', '1.0.0', '>'), ('2.01', '1.0.0', '>'), ('2.0.0', '1.01', '>'),\n ('1.02', '1.1.7', '<'), ('1.1.7', '1.02', '>'), ('1.1.6', '1.1.7', '<'),\n ('1.1.7', '1.1.6', '>')])\n", (4569, 5267), False, 'import pytest\n'), ((6976, 7156), 'pytest.fixture', 'pytest.fixture', ([], {'params': "[(operator.eq, ['=']), (operator.ne, ['<', '>']), (operator.lt, ['<']), (\n operator.le, ['<', '=']), (operator.gt, ['>']), (operator.ge, ['>', '='])]"}), "(params=[(operator.eq, ['=']), (operator.ne, ['<', '>']), (\n operator.lt, ['<']), (operator.le, ['<', '=']), (operator.gt, ['>']), (\n operator.ge, ['>', '='])])\n", (6990, 7156), False, 'import pytest\n'), ((8185, 8287), 'pytest.fixture', 'pytest.fixture', ([], {'params': "[('major', 0), ('integer', 0), ('minor', 1), ('decimal', 1), ('patch', 2)]"}), "(params=[('major', 0), ('integer', 0), ('minor', 1), (\n 'decimal', 1), ('patch', 2)])\n", (8199, 8287), False, 'import pytest\n'), ((14203, 14343), 'pytest.fixture', 'pytest.fixture', ([], {'params': '[iati.version.STANDARD_VERSIONS, iati.version.STANDARD_VERSIONS_SUPPORTED,\n iati.version.STANDARD_VERSIONS_MINOR]'}), '(params=[iati.version.STANDARD_VERSIONS, iati.version.\n STANDARD_VERSIONS_SUPPORTED, iati.version.STANDARD_VERSIONS_MINOR])\n', (14217, 14343), False, 'import pytest\n'), ((14961, 14995), 'pytest.mark.latest_version', 'pytest.mark.latest_version', (['"""2.03"""'], {}), "('2.03')\n", (14987, 14995), False, 'import pytest\n'), ((15188, 15222), 'pytest.mark.latest_version', 'pytest.mark.latest_version', (['"""2.03"""'], {}), "('2.03')\n", (15214, 15222), False, 'import pytest\n'), ((15683, 15717), 'pytest.mark.latest_version', 'pytest.mark.latest_version', (['"""2.03"""'], {}), "('2.03')\n", (15709, 15717), False, 'import pytest\n'), ((15921, 15955), 'pytest.mark.latest_version', 'pytest.mark.latest_version', (['"""2.03"""'], {}), "('2.03')\n", (15947, 15955), False, 'import pytest\n'), ((16553, 16787), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""decorator"""', '[iati.version.allow_fully_supported_version, iati.version.\n allow_known_version, iati.version.allow_possible_version, iati.version.\n decimalise_integer, iati.version.normalise_decimals]'], {}), "('decorator', [iati.version.\n allow_fully_supported_version, iati.version.allow_known_version, iati.\n version.allow_possible_version, iati.version.decimalise_integer, iati.\n version.normalise_decimals])\n", (16576, 16787), False, 'import pytest\n'), ((18168, 18223), 'pytest.fixture', 'pytest.fixture', ([], {'params': '[return_fully_supported_version]'}), '(params=[return_fully_supported_version])\n', (18182, 18223), False, 'import pytest\n'), ((18432, 18477), 'pytest.fixture', 'pytest.fixture', ([], {'params': '[return_known_version]'}), '(params=[return_known_version])\n', (18446, 18477), False, 'import pytest\n'), ((18670, 18774), 'pytest.fixture', 'pytest.fixture', ([], {'params': '[return_possibly_version, iati.version._prevent_non_version_representations]'}), '(params=[return_possibly_version, iati.version.\n _prevent_non_version_representations])\n', (18684, 18774), False, 'import pytest\n'), ((18996, 19082), 'pytest.fixture', 'pytest.fixture', ([], {'params': '[iati.version._is_fully_supported, iati.version._is_known]'}), '(params=[iati.version._is_fully_supported, iati.version.\n _is_known])\n', (19010, 19082), False, 'import pytest\n'), ((19291, 19368), 'pytest.fixture', 'pytest.fixture', ([], {'params': '[return_fully_supported_version, return_known_version]'}), '(params=[return_fully_supported_version, return_known_version])\n', (19305, 19368), False, 'import pytest\n'), ((19589, 19729), 'pytest.fixture', 'pytest.fixture', ([], {'params': '[return_fully_supported_version, iati.version._is_fully_supported,\n return_known_version, iati.version._is_known]'}), '(params=[return_fully_supported_version, iati.version.\n _is_fully_supported, return_known_version, iati.version._is_known])\n', (19603, 19729), False, 'import pytest\n'), ((25812, 25863), 'pytest.fixture', 'pytest.fixture', ([], {'params': 'INTEGER_TO_DECIMAL_FUNCTIONS'}), '(params=INTEGER_TO_DECIMAL_FUNCTIONS)\n', (25826, 25863), False, 'import pytest\n'), ((26147, 26192), 'pytest.fixture', 'pytest.fixture', ([], {'params': 'DECIMAL_S13N_FUNCTIONS'}), '(params=DECIMAL_S13N_FUNCTIONS)\n', (26161, 26192), False, 'import pytest\n'), ((26354, 26430), 'pytest.fixture', 'pytest.fixture', ([], {'params': '(INTEGER_TO_DECIMAL_FUNCTIONS + DECIMAL_S13N_FUNCTIONS)'}), '(params=INTEGER_TO_DECIMAL_FUNCTIONS + DECIMAL_S13N_FUNCTIONS)\n', (26368, 26430), False, 'import pytest\n'), ((28366, 28400), 'pytest.mark.latest_version', 'pytest.mark.latest_version', (['"""2.03"""'], {}), "('2.03')\n", (28392, 28400), False, 'import pytest\n'), ((1407, 1465), 'iati.tests.fixtures.versions.split_decimal', 'split_decimal', (['std_ver_minor_uninst_valid_decimal_possible'], {}), '(std_ver_minor_uninst_valid_decimal_possible)\n', (1420, 1465), False, 'from iati.tests.fixtures.versions import iativer, semver, split_decimal, split_iativer, split_semver\n'), ((2712, 2770), 'iati.tests.fixtures.versions.split_iativer', 'split_iativer', (['std_ver_minor_uninst_valid_iativer_possible'], {}), '(std_ver_minor_uninst_valid_iativer_possible)\n', (2725, 2770), False, 'from iati.tests.fixtures.versions import iativer, semver, split_decimal, split_iativer, split_semver\n'), ((3718, 3774), 'iati.tests.fixtures.versions.split_semver', 'split_semver', (['std_ver_minor_uninst_valid_semver_possible'], {}), '(std_ver_minor_uninst_valid_semver_possible)\n', (3730, 3774), False, 'from iati.tests.fixtures.versions import iativer, semver, split_decimal, split_iativer, split_semver\n'), ((8832, 8890), 'iati.tests.fixtures.versions.split_semver', 'split_semver', (['std_ver_minor_inst_valid_possible.semver_str'], {}), '(std_ver_minor_inst_valid_possible.semver_str)\n', (8844, 8890), False, 'from iati.tests.fixtures.versions import iativer, semver, split_decimal, split_iativer, split_semver\n'), ((9958, 10016), 'iati.tests.fixtures.versions.split_iativer', 'split_iativer', (['std_ver_minor_uninst_valid_iativer_possible'], {}), '(std_ver_minor_uninst_valid_iativer_possible)\n', (9971, 10016), False, 'from iati.tests.fixtures.versions import iativer, semver, split_decimal, split_iativer, split_semver\n'), ((10038, 10089), 'iati.tests.fixtures.versions.semver', 'semver', (['integer_component', '(decimal_component - 1)', '(0)'], {}), '(integer_component, decimal_component - 1, 0)\n', (10044, 10089), False, 'from iati.tests.fixtures.versions import iativer, semver, split_decimal, split_iativer, split_semver\n'), ((10659, 10715), 'iati.tests.fixtures.versions.split_semver', 'split_semver', (['std_ver_minor_uninst_valid_semver_possible'], {}), '(std_ver_minor_uninst_valid_semver_possible)\n', (10671, 10715), False, 'from iati.tests.fixtures.versions import iativer, semver, split_decimal, split_iativer, split_semver\n'), ((10738, 10783), 'iati.tests.fixtures.versions.iativer', 'iativer', (['major_component', '(minor_component + 1)'], {}), '(major_component, minor_component + 1)\n', (10745, 10783), False, 'from iati.tests.fixtures.versions import iativer, semver, split_decimal, split_iativer, split_semver\n'), ((11428, 11484), 'iati.tests.fixtures.versions.split_semver', 'split_semver', (['std_ver_minor_uninst_valid_semver_possible'], {}), '(std_ver_minor_uninst_valid_semver_possible)\n', (11440, 11484), False, 'from iati.tests.fixtures.versions import iativer, semver, split_decimal, split_iativer, split_semver\n'), ((12090, 12146), 'iati.tests.fixtures.versions.split_semver', 'split_semver', (['std_ver_minor_uninst_valid_semver_possible'], {}), '(std_ver_minor_uninst_valid_semver_possible)\n', (12102, 12146), False, 'from iati.tests.fixtures.versions import iativer, semver, split_decimal, split_iativer, split_semver\n'), ((22676, 22724), 'copy.deepcopy', 'copy.deepcopy', (['std_ver_minor_inst_valid_possible'], {}), '(std_ver_minor_inst_valid_possible)\n', (22689, 22724), False, 'import copy\n'), ((23186, 23236), 'copy.deepcopy', 'copy.deepcopy', (['std_ver_minor_uninst_valid_possible'], {}), '(std_ver_minor_uninst_valid_possible)\n', (23199, 23236), False, 'import copy\n'), ((23683, 23733), 'copy.deepcopy', 'copy.deepcopy', (['std_ver_major_uninst_valid_possible'], {}), '(std_ver_major_uninst_valid_possible)\n', (23696, 23733), False, 'import copy\n'), ((481, 505), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (494, 505), False, 'import pytest\n'), ((697, 721), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (710, 721), False, 'import pytest\n'), ((2046, 2070), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (2059, 2070), False, 'import pytest\n'), ((2304, 2329), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2317, 2329), False, 'import pytest\n'), ((3310, 3335), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3323, 3335), False, 'import pytest\n'), ((4257, 4282), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4270, 4282), False, 'import pytest\n'), ((8990, 9041), 'iati.tests.fixtures.versions.semver', 'semver', (['components[0]', 'components[1]', 'components[2]'], {}), '(components[0], components[1], components[2])\n', (8996, 9041), False, 'from iati.tests.fixtures.versions import iativer, semver, split_decimal, split_iativer, split_semver\n'), ((11527, 11560), 'iati.tests.fixtures.versions.semver', 'semver', (['(major_component + 1)', '(0)', '(0)'], {}), '(major_component + 1, 0, 0)\n', (11533, 11560), False, 'from iati.tests.fixtures.versions import iativer, semver, split_decimal, split_iativer, split_semver\n'), ((12189, 12236), 'iati.tests.fixtures.versions.semver', 'semver', (['major_component', '(minor_component + 1)', '(0)'], {}), '(major_component, minor_component + 1, 0)\n', (12195, 12236), False, 'from iati.tests.fixtures.versions import iativer, semver, split_decimal, split_iativer, split_semver\n'), ((13132, 13161), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (13145, 13161), False, 'import pytest\n'), ((13236, 13265), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (13249, 13265), False, 'import pytest\n'), ((16997, 17021), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (17010, 17021), False, 'import pytest\n'), ((20838, 20863), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (20851, 20863), False, 'import pytest\n'), ((21550, 21575), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (21563, 21575), False, 'import pytest\n'), ((21948, 21973), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (21961, 21973), False, 'import pytest\n'), ((22321, 22346), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (22334, 22346), False, 'import pytest\n'), ((24519, 24544), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (24532, 24544), False, 'import pytest\n'), ((24826, 24850), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (24839, 24850), False, 'import pytest\n'), ((29081, 29123), 'copy.deepcopy', 'copy.deepcopy', (['std_ver_all_uninst_mixederr'], {}), '(std_ver_all_uninst_mixederr)\n', (29094, 29123), False, 'import copy\n'), ((29418, 29444), 'math.isnan', 'math.isnan', (['original_value'], {}), '(original_value)\n', (29428, 29444), False, 'import math\n')] |
"""Console script for r_freeze."""
import argparse
import sys
from r_freeze.r_freeze import get_packages, write_package_file
def main():
"""Console script for r_freeze."""
parser = argparse.ArgumentParser()
parser.add_argument("dir", type=str, help="Directory to look for")
parser.add_argument(
"-o", type=str, help="Name of outfile if you want to craete one"
)
parser.add_argument(
"--overwrite",
action="store_true",
default=False,
help="Over write output file if it already exists",
)
args = parser.parse_args()
if not isinstance(args.dir, str):
raise TypeError('Directory should be string or "."')
if args.o and args.dir:
if not isinstance(args.o, str):
raise TypeError('Outfile should be string or "."')
if args.dir and args.o:
packages = get_packages(args.dir)
write_package_file(packages, args.o, args.overwrite)
elif args.dir:
print(get_packages(args.dir))
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
| [
"r_freeze.r_freeze.get_packages",
"r_freeze.r_freeze.write_package_file",
"argparse.ArgumentParser"
] | [((191, 216), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (214, 216), False, 'import argparse\n'), ((868, 890), 'r_freeze.r_freeze.get_packages', 'get_packages', (['args.dir'], {}), '(args.dir)\n', (880, 890), False, 'from r_freeze.r_freeze import get_packages, write_package_file\n'), ((899, 951), 'r_freeze.r_freeze.write_package_file', 'write_package_file', (['packages', 'args.o', 'args.overwrite'], {}), '(packages, args.o, args.overwrite)\n', (917, 951), False, 'from r_freeze.r_freeze import get_packages, write_package_file\n'), ((986, 1008), 'r_freeze.r_freeze.get_packages', 'get_packages', (['args.dir'], {}), '(args.dir)\n', (998, 1008), False, 'from r_freeze.r_freeze import get_packages, write_package_file\n')] |
import sqlalchemy
from application import metadata
Token = sqlalchemy.Table(
"tokens",
metadata,
sqlalchemy.Column("user_id", sqlalchemy.ForeignKey(
'_ps_users.id', ondelete="CASCADE"), primary_key=True),
sqlalchemy.Column("token", sqlalchemy.String(length=1000),
nullable=False)
)
# class Token(db.Model):
# __tablename__ = "tokens"
# user_id = db.Column(db.ForeignKey(
# '_ps_users.id', ondelete="CASCADE"), primary_key=True)
# token = db.Column(db.String(length=1000), nullable=False)
| [
"sqlalchemy.String",
"sqlalchemy.ForeignKey"
] | [((140, 197), 'sqlalchemy.ForeignKey', 'sqlalchemy.ForeignKey', (['"""_ps_users.id"""'], {'ondelete': '"""CASCADE"""'}), "('_ps_users.id', ondelete='CASCADE')\n", (161, 197), False, 'import sqlalchemy\n'), ((258, 288), 'sqlalchemy.String', 'sqlalchemy.String', ([], {'length': '(1000)'}), '(length=1000)\n', (275, 288), False, 'import sqlalchemy\n')] |
import astropy.units as u
import numpy as np
from ..utils import cone_solid_angle
#: Unit of the background rate IRF
BACKGROUND_UNIT = u.Unit('s-1 TeV-1 sr-1')
def background_2d(events, reco_energy_bins, fov_offset_bins, t_obs):
"""
Calculate background rates in radially symmetric bins in the field of view.
GADF documentation here:
https://gamma-astro-data-formats.readthedocs.io/en/latest/irfs/full_enclosure/bkg/index.html#bkg-2d
Parameters
----------
events: astropy.table.QTable
DL2 events table of the selected background events.
Needed columns for this function: `reco_source_fov_offset`, `reco_energy`, `weight`
reco_energy: astropy.units.Quantity[energy]
The bins in reconstructed energy to be used for the IRF
fov_offset_bins: astropy.units.Quantity[angle]
The bins in the field of view offset to be used for the IRF
t_obs: astropy.units.Quantity[time]
Observation time. This must match with how the individual event
weights are calculated.
Returns
-------
bg_rate: astropy.units.Quantity
The background rate as particles per energy, time and solid angle
in the specified bins.
Shape: (len(reco_energy_bins) - 1, len(fov_offset_bins) - 1)
"""
hist, _, _ = np.histogram2d(
events["reco_energy"].to_value(u.TeV),
events["reco_source_fov_offset"].to_value(u.deg),
bins=[
reco_energy_bins.to_value(u.TeV),
fov_offset_bins.to_value(u.deg),
],
weights=events['weight'],
)
# divide all energy bins by their width
# hist has shape (n_energy, n_fov_offset) so we need to transpose and then back
bin_width_energy = np.diff(reco_energy_bins)
per_energy = (hist.T / bin_width_energy).T
# divide by solid angle in each fov bin and the observation time
bin_solid_angle = np.diff(cone_solid_angle(fov_offset_bins))
bg_rate = per_energy / t_obs / bin_solid_angle
return bg_rate.to(BACKGROUND_UNIT)
| [
"numpy.diff",
"astropy.units.Unit"
] | [((137, 161), 'astropy.units.Unit', 'u.Unit', (['"""s-1 TeV-1 sr-1"""'], {}), "('s-1 TeV-1 sr-1')\n", (143, 161), True, 'import astropy.units as u\n'), ((1738, 1763), 'numpy.diff', 'np.diff', (['reco_energy_bins'], {}), '(reco_energy_bins)\n', (1745, 1763), True, 'import numpy as np\n')] |
import pymysql
import pandas as pd
import logging
import traceback
logger = logging.getLogger(__name__)
TABLE_CANDLE_PATTERN = "CREATE TABLE IF NOT EXISTS {table}(" \
" id int(11) UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY, " \
" time DATETIME UNIQUE," \
" open FLOAT NOT NULL," \
" high FLOAT," \
" low FLOAT," \
" close FLOAT NOT NULL," \
" volume_to FLOAT," \
" volume_from FLOAT)"
class OneHourCandleDB(object):
def __init__(self):
pass
@staticmethod
def get_connection():
try:
connection = pymysql.connect(host='127.0.0.1',
user='root',
password='<PASSWORD>',
db='hourly_ohlc',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor,
autocommit=True)
return connection
except pymysql.Error as e:
logger.exception(str(e))
print(traceback.print_exc())
def create_candle_table(self, asset):
connection = self.get_connection()
try:
with connection.cursor() as cursor:
query = TABLE_CANDLE_PATTERN.replace('{table}', asset)
cursor.execute(query)
if cursor.description:
logger.info(cursor.description)
finally:
connection.close()
def does_table_exist(self, table):
connection = self.get_connection()
try:
with connection.cursor(pymysql.cursors.SSCursor) as cursor:
query = "SHOW TABLES"
cursor.execute(query)
tables_in_db = cursor.fetchall()
for table_in_db in tables_in_db:
if table == table_in_db[0]:
return True
return False
finally:
if connection:
connection.close()
def insert_many_candles(self, table, candles):
self.create_candle_table(table)
connection = self.get_connection()
try:
with connection.cursor() as cursor:
query = "INSERT IGNORE INTO " + table + "(time, open, high, low, close, volume_to, volume_from)" \
" VALUES(FROM_UNIXTIME(%s),%s,%s,%s,%s,%s,%s)"
cursor.execute("SET time_zone='+00:00'")
cursor.executemany(query, candles)
if cursor.description:
logger.info(cursor.description)
return cursor.rowcount
finally:
if connection:
connection.close()
def get_last_candle(self, table):
connection = self.get_connection()
try:
with connection.cursor() as cursor:
query = "SELECT time FROM " + table + " ORDER BY time DESC LIMIT 1"
cursor.execute(query)
fetched_data = cursor.fetchone()
if fetched_data:
return fetched_data['time']
finally:
if connection:
connection.close()
def aggregate_candles(self, candles, aggregate):
idx = 0
res_candles = []
while True:
try:
open_time = candles[idx]['time']
if open_time.hour % aggregate == 0:
open_price = candles[idx]['open']
close_price = candles[idx + aggregate - 1]['close']
highest_price = candles[idx]['high']
lowest_price = candles[idx]['low']
total_volume_to = candles[idx]['volume_to']
total_volume_from = candles[idx]['volume_from'] if candles[idx]['volume_from'] else 0
for i in range(idx + 1, idx + aggregate):
if candles[i]['low'] < lowest_price:
lowest_price = candles[i]['low']
if candles[i]['high'] > highest_price:
highest_price = candles[i]['high']
total_volume_to = total_volume_to + candles[i]['volume_to']
total_volume_from = total_volume_from + candles[i]['volume_from'] if candles[i][
'volume_from'] else 0
res_candles.append({
'open': open_price,
'close': close_price,
'high': highest_price,
'low': lowest_price,
'volume_to': total_volume_to,
'volume_from': total_volume_from,
'time': open_time
})
idx = idx + aggregate
else:
idx = idx + 1
except IndexError as e:
break
return res_candles
def get_all_candles_between(self, table, start_dtm, end_dtm, aggregate=1):
connection = self.get_connection()
try:
with connection.cursor() as cursor:
query = "SELECT time, open, high, low, close, volume_from, volume_to " \
"FROM " + table + " WHERE time BETWEEN %s AND %s ORDER BY time ASC"
cursor.execute(query, (start_dtm, end_dtm))
candles = cursor.fetchall()
if candles:
aggregated_candles = self.aggregate_candles(candles, aggregate)
df = pd.DataFrame(aggregated_candles)
df.rename(columns={'time': 'T',
'open': 'O',
'high': 'H',
'low': 'L',
'close': 'C',
'volume_from': 'V',
'volume_to': 'QV',
},
inplace=True)
return df
finally:
if connection:
connection.close()
| [
"logging.getLogger",
"traceback.print_exc",
"pymysql.connect",
"pandas.DataFrame"
] | [((77, 104), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (94, 104), False, 'import logging\n'), ((727, 900), 'pymysql.connect', 'pymysql.connect', ([], {'host': '"""127.0.0.1"""', 'user': '"""root"""', 'password': '"""<PASSWORD>"""', 'db': '"""hourly_ohlc"""', 'charset': '"""utf8mb4"""', 'cursorclass': 'pymysql.cursors.DictCursor', 'autocommit': '(True)'}), "(host='127.0.0.1', user='root', password='<PASSWORD>', db=\n 'hourly_ohlc', charset='utf8mb4', cursorclass=pymysql.cursors.\n DictCursor, autocommit=True)\n", (742, 900), False, 'import pymysql\n'), ((1257, 1278), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (1276, 1278), False, 'import traceback\n'), ((5819, 5851), 'pandas.DataFrame', 'pd.DataFrame', (['aggregated_candles'], {}), '(aggregated_candles)\n', (5831, 5851), True, 'import pandas as pd\n')] |
import json
import requests
from requests_toolbelt import MultipartEncoder
from pymessenger.graph_api import FacebookGraphApi
import pymessenger.utils as utils
class Bot(FacebookGraphApi):
def __init__(self, *args, **kwargs):
super(Bot, self).__init__(*args, **kwargs)
def send_text_message(self, recipient_id, message):
'''Send text messages to the specified recipient.
https://developers.facebook.com/docs/messenger-platform/send-api-reference/text-message
Input:
recipient_id: recipient id to send to
message: message to send
Output:
Response from API as <dict>
'''
payload = {
'recipient': {
'id': recipient_id
},
'message': {
'text': message
}
}
return self.send_raw(payload)
def send_message(self, recipient_id, message):
'''Send text messages to the specified recipient.
https://developers.facebook.com/docs/messenger-platform/send-api-reference/text-message
Input:
recipient_id: recipient id to send to
message: raw message to send
Output:
Response from API as <dict>
'''
payload = {
'recipient': {
'id': recipient_id
},
'message': message
}
return self.send_raw(payload)
def send_generic_message(self, recipient_id, elements):
'''Send generic messages to the specified recipient.
https://developers.facebook.com/docs/messenger-platform/send-api-reference/generic-template
Input:
recipient_id: recipient id to send to
elements: generic message elements to send
Output:
Response from API as <dict>
'''
payload = {
'recipient': {
'id': recipient_id
},
'message': {
"attachment": {
"type": "template",
"payload": {
"template_type": "generic",
"elements": elements
}
}
}
}
return self.send_raw(payload)
def send_button_message(self, recipient_id, text, buttons):
'''Send text messages to the specified recipient.
https://developers.facebook.com/docs/messenger-platform/send-api-reference/button-template
Input:
recipient_id: recipient id to send to
text: text of message to send
buttons: buttons to send
Output:
Response from API as <dict>
'''
payload = {
'recipient': {
'id': recipient_id
},
'message': {
"attachment": {
"type": "template",
"payload": {
"template_type": "button",
"text": text,
"buttons": buttons
}
}
}
}
return self.send_raw(payload)
def send_image(self, recipient_id, image_path):
'''Send an image to the specified recipient.
Image must be PNG or JPEG or GIF (more might be supported).
https://developers.facebook.com/docs/messenger-platform/send-api-reference/image-attachment
Input:
recipient_id: recipient id to send to
image_path: path to image to be sent
Output:
Response from API as <dict>
'''
payload = {
'recipient': json.dumps(
{
'id': recipient_id
}
),
'message': json.dumps(
{
'attachment': {
'type': 'image',
'payload': {}
}
}
),
'filedata': (image_path, open(image_path, 'rb'))
}
multipart_data = MultipartEncoder(payload)
multipart_header = {
'Content-Type': multipart_data.content_type
}
return requests.post(self.base_url, data=multipart_data, headers=multipart_header).json()
def send_image_url(self, recipient_id, image_url):
'''Send an image to specified recipient using URL.
Image must be PNG or JPEG or GIF (more might be supported).
https://developers.facebook.com/docs/messenger-platform/send-api-reference/image-attachment
Input:
recipient_id: recipient id to send to
image_url: url of image to be sent
Output:
Response from API as <dict>
'''
payload = {
'recipient': json.dumps(
{
'id': recipient_id
}
),
'message': json.dumps(
{
'attachment': {
'type': 'image',
'payload': {
'url': image_url
}
}
}
)
}
return self.send_raw(payload)
def send_action(self, recipient_id, action):
'''Send typing indicators or send read receipts to the specified recipient.
Image must be PNG or JPEG.
https://developers.facebook.com/docs/messenger-platform/send-api-reference/sender-actions
Input:
recipient_id: recipient id to send to
action: action type (mark_seen, typing_on, typing_off)
Output:
Response from API as <dict>
'''
payload = {
'recipient': {
'id': recipient_id
},
'sender_action': action
}
return self.send_raw(payload)
def _send_payload(self, payload):
''' Deprecated, use send_raw instead '''
return self.send_raw(payload)
def send_raw(self, payload):
request_endpoint = '{0}/me/messages'.format(self.graph_url)
response = requests.post(
request_endpoint,
params=self.auth_args,
json=payload
)
result = response.json()
return result
def send_audio(self, recipient_id, audio_path):
'''Send audio to the specified recipient.
Audio must be MP3 or WAV
https://developers.facebook.com/docs/messenger-platform/send-api-reference/audio-attachment
Input:
recipient_id: recipient id to send to
audio_path: path to audio to be sent
Output:
Response from API as <dict>
'''
payload = {
'recipient': json.dumps(
{
'id': recipient_id
}
),
'message': json.dumps(
{
'attachment': {
'type': 'audio',
'payload': {}
}
}
),
'filedata': (audio_path, open(image_path, 'rb'))
}
multipart_data = MultipartEncoder(payload)
multipart_header = {
'Content-Type': multipart_data.content_type
}
return requests.post(self.base_url, data=multipart_data, headers=multipart_header).json()
def send_audio_url(self, recipient_id, audio_url):
'''Send audio to specified recipient using URL.
Audio must be MP3 or WAV
https://developers.facebook.com/docs/messenger-platform/send-api-reference/audio-attachment
Input:
recipient_id: recipient id to send to
audio_url: url of audio to be sent
Output:
Response from API as <dict>
'''
payload = {
'recipient': json.dumps(
{
'id': recipient_id
}
),
'message': json.dumps(
{
'attachment': {
'type': 'audio',
'payload': {
'url': audio_url
}
}
}
)
}
return self.send_raw(payload)
def send_video(self, recipient_id, video_path):
'''Send video to the specified recipient.
Video should be MP4 or MOV, but supports more (https://www.facebook.com/help/218673814818907).
https://developers.facebook.com/docs/messenger-platform/send-api-reference/video-attachment
Input:
recipient_id: recipient id to send to
video_path: path to video to be sent
Output:
Response from API as <dict>
'''
payload = {
'recipient': json.dumps(
{
'id': recipient_id
}
),
'message': json.dumps(
{
'attachment': {
'type': 'audio',
'payload': {}
}
}
),
'filedata': (video_path, open(image_path, 'rb'))
}
multipart_data = MultipartEncoder(payload)
multipart_header = {
'Content-Type': multipart_data.content_type
}
return requests.post(self.base_url, data=multipart_data, headers=multipart_header).json()
def send_video_url(self, recipient_id, video_url):
'''Send video to specified recipient using URL.
Video should be MP4 or MOV, but supports more (https://www.facebook.com/help/218673814818907).
https://developers.facebook.com/docs/messenger-platform/send-api-reference/video-attachment
Input:
recipient_id: recipient id to send to
video_url: url of video to be sent
Output:
Response from API as <dict>
'''
payload = {
'recipient': json.dumps(
{
'id': recipient_id
}
),
'message': json.dumps(
{
'attachment': {
'type': 'audio',
'payload': {
'url': video_url
}
}
}
)
}
return self.send_raw(payload)
def send_file(self, recipient_id, file_path):
'''Send file to the specified recipient.
https://developers.facebook.com/docs/messenger-platform/send-api-reference/file-attachment
Input:
recipient_id: recipient id to send to
file_path: path to file to be sent
Output:
Response from API as <dict>
'''
payload = {
'recipient': json.dumps(
{
'id': recipient_id
}
),
'message': json.dumps(
{
'attachment': {
'type': 'file',
'payload': {}
}
}
),
'filedata': (file_path, open(image_path, 'rb'))
}
multipart_data = MultipartEncoder(payload)
multipart_header = {
'Content-Type': multipart_data.content_type
}
return requests.post(self.base_url, data=multipart_data, headers=multipart_header).json()
def send_file_url(self, recipient_id, file_url):
'''Send file to the specified recipient.
https://developers.facebook.com/docs/messenger-platform/send-api-reference/file-attachment
Input:
recipient_id: recipient id to send to
file_url: url of file to be sent
Output:
Response from API as <dict>
'''
payload = {
'recipient': json.dumps(
{
'id': recipient_id
}
),
'message': json.dumps(
{
'attachment': {
'type': 'file',
'payload': {
'url': file_url
}
}
}
)
}
return self.send_raw(payload)
| [
"json.dumps",
"requests_toolbelt.MultipartEncoder",
"requests.post"
] | [((4093, 4118), 'requests_toolbelt.MultipartEncoder', 'MultipartEncoder', (['payload'], {}), '(payload)\n', (4109, 4118), False, 'from requests_toolbelt import MultipartEncoder\n'), ((6174, 6242), 'requests.post', 'requests.post', (['request_endpoint'], {'params': 'self.auth_args', 'json': 'payload'}), '(request_endpoint, params=self.auth_args, json=payload)\n', (6187, 6242), False, 'import requests\n'), ((7228, 7253), 'requests_toolbelt.MultipartEncoder', 'MultipartEncoder', (['payload'], {}), '(payload)\n', (7244, 7253), False, 'from requests_toolbelt import MultipartEncoder\n'), ((9313, 9338), 'requests_toolbelt.MultipartEncoder', 'MultipartEncoder', (['payload'], {}), '(payload)\n', (9329, 9338), False, 'from requests_toolbelt import MultipartEncoder\n'), ((11359, 11384), 'requests_toolbelt.MultipartEncoder', 'MultipartEncoder', (['payload'], {}), '(payload)\n', (11375, 11384), False, 'from requests_toolbelt import MultipartEncoder\n'), ((3672, 3704), 'json.dumps', 'json.dumps', (["{'id': recipient_id}"], {}), "({'id': recipient_id})\n", (3682, 3704), False, 'import json\n'), ((3797, 3857), 'json.dumps', 'json.dumps', (["{'attachment': {'type': 'image', 'payload': {}}}"], {}), "({'attachment': {'type': 'image', 'payload': {}}})\n", (3807, 3857), False, 'import json\n'), ((4820, 4852), 'json.dumps', 'json.dumps', (["{'id': recipient_id}"], {}), "({'id': recipient_id})\n", (4830, 4852), False, 'import json\n'), ((4945, 5021), 'json.dumps', 'json.dumps', (["{'attachment': {'type': 'image', 'payload': {'url': image_url}}}"], {}), "({'attachment': {'type': 'image', 'payload': {'url': image_url}}})\n", (4955, 5021), False, 'import json\n'), ((6807, 6839), 'json.dumps', 'json.dumps', (["{'id': recipient_id}"], {}), "({'id': recipient_id})\n", (6817, 6839), False, 'import json\n'), ((6932, 6992), 'json.dumps', 'json.dumps', (["{'attachment': {'type': 'audio', 'payload': {}}}"], {}), "({'attachment': {'type': 'audio', 'payload': {}}})\n", (6942, 6992), False, 'import json\n'), ((7917, 7949), 'json.dumps', 'json.dumps', (["{'id': recipient_id}"], {}), "({'id': recipient_id})\n", (7927, 7949), False, 'import json\n'), ((8042, 8118), 'json.dumps', 'json.dumps', (["{'attachment': {'type': 'audio', 'payload': {'url': audio_url}}}"], {}), "({'attachment': {'type': 'audio', 'payload': {'url': audio_url}}})\n", (8052, 8118), False, 'import json\n'), ((8892, 8924), 'json.dumps', 'json.dumps', (["{'id': recipient_id}"], {}), "({'id': recipient_id})\n", (8902, 8924), False, 'import json\n'), ((9017, 9077), 'json.dumps', 'json.dumps', (["{'attachment': {'type': 'audio', 'payload': {}}}"], {}), "({'attachment': {'type': 'audio', 'payload': {}}})\n", (9027, 9077), False, 'import json\n'), ((10073, 10105), 'json.dumps', 'json.dumps', (["{'id': recipient_id}"], {}), "({'id': recipient_id})\n", (10083, 10105), False, 'import json\n'), ((10198, 10274), 'json.dumps', 'json.dumps', (["{'attachment': {'type': 'audio', 'payload': {'url': video_url}}}"], {}), "({'attachment': {'type': 'audio', 'payload': {'url': video_url}}})\n", (10208, 10274), False, 'import json\n'), ((10940, 10972), 'json.dumps', 'json.dumps', (["{'id': recipient_id}"], {}), "({'id': recipient_id})\n", (10950, 10972), False, 'import json\n'), ((11065, 11124), 'json.dumps', 'json.dumps', (["{'attachment': {'type': 'file', 'payload': {}}}"], {}), "({'attachment': {'type': 'file', 'payload': {}}})\n", (11075, 11124), False, 'import json\n'), ((12003, 12035), 'json.dumps', 'json.dumps', (["{'id': recipient_id}"], {}), "({'id': recipient_id})\n", (12013, 12035), False, 'import json\n'), ((12128, 12202), 'json.dumps', 'json.dumps', (["{'attachment': {'type': 'file', 'payload': {'url': file_url}}}"], {}), "({'attachment': {'type': 'file', 'payload': {'url': file_url}}})\n", (12138, 12202), False, 'import json\n'), ((4229, 4304), 'requests.post', 'requests.post', (['self.base_url'], {'data': 'multipart_data', 'headers': 'multipart_header'}), '(self.base_url, data=multipart_data, headers=multipart_header)\n', (4242, 4304), False, 'import requests\n'), ((7364, 7439), 'requests.post', 'requests.post', (['self.base_url'], {'data': 'multipart_data', 'headers': 'multipart_header'}), '(self.base_url, data=multipart_data, headers=multipart_header)\n', (7377, 7439), False, 'import requests\n'), ((9449, 9524), 'requests.post', 'requests.post', (['self.base_url'], {'data': 'multipart_data', 'headers': 'multipart_header'}), '(self.base_url, data=multipart_data, headers=multipart_header)\n', (9462, 9524), False, 'import requests\n'), ((11495, 11570), 'requests.post', 'requests.post', (['self.base_url'], {'data': 'multipart_data', 'headers': 'multipart_header'}), '(self.base_url, data=multipart_data, headers=multipart_header)\n', (11508, 11570), False, 'import requests\n')] |