hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ea5efbdfca030f1ac642543165d5cb1e6eec0af3 | 4,305 | py | Python | streamlit_web.py | BennyWestsyde/FakeNewsDetection | 8b171f2c93d0849e13c9ea6d94b784caf037a3bb | [
"BSD-3-Clause"
] | null | null | null | streamlit_web.py | BennyWestsyde/FakeNewsDetection | 8b171f2c93d0849e13c9ea6d94b784caf037a3bb | [
"BSD-3-Clause"
] | 16 | 2021-04-29T14:22:46.000Z | 2021-05-21T04:02:02.000Z | streamlit_web.py | BennyWestsyde/FakeNewsDetection | 8b171f2c93d0849e13c9ea6d94b784caf037a3bb | [
"BSD-3-Clause"
] | 2 | 2021-04-09T16:39:45.000Z | 2021-05-02T19:39:32.000Z |
import os
import pickle
from nltk.util import pr
from src.seq_train import MachineBuilder as ms
from numpy.lib.ufunclike import _fix_and_maybe_deprecate_out_named_y
import streamlit as st
import pandas as pd
from src.twitterscraper import Twitter as ts
from src.preparedata import PrepareData as prep
import urllib
import pandas as pd
from pandas_profiling import ProfileReport
from streamlit_pandas_profiling import st_profile_report
import webbrowser
# pylint: disable=R0201, C0103
# @st.cache
def get_twitter_data():
AWS_BUCKET_URL = "https://streamlit-demo-data.s3-us-west-2.amazonaws.com"
df = pd.read_csv(AWS_BUCKET_URL + "/agri.csv.gz")
return df.set_index("Region")
try:
st.sidebar.title("Welcome to FakeNewsDetection!")
data_retreive_method = st.sidebar.selectbox(
"How would you like to search Twitter?",
[
"Keyword",
"Hashtag",
"Username",
],
)
# Alternative syntax, declare a form and use the returned object
form = st.form(key='user_search')
text_input = form.text_input(label='What would you like to search')
submit_button = form.form_submit_button(label='Submit')
# st.form_submit_button returns True upon form submit
if submit_button:
if data_retreive_method == 'Keyword':
with st.empty():
with st.spinner("Collecting tweets"):
results_df = ts().search_term(searching=text_input)
st.success("Collected")
with st.empty():
with st.spinner("Formatting Tweets"):
training_df = prep().build_Training_Results(results_df)
results_df = prep().build_Results(results_df)
st.success("Formatted")
st.write(results_df)
if data_retreive_method == 'Hashtag':
with st.empty():
with st.spinner("Collecting tweets"):
results_df = ts().search_hashtag(searching=text_input)
st.success("Collected")
with st.empty():
with st.spinner("Formatting Tweets"):
training_df = prep().build_Training_Results(results_df)
results_df = prep().build_Results(results_df)
st.success("Formatted")
st.write(results_df)
if data_retreive_method == 'Username':
with st.empty():
with st.spinner("Collecting tweets"):
results_df = ts().search_user(searching=text_input)
st.success("Collected")
with st.empty():
with st.spinner("Formatting Tweets"):
training_df = prep().build_Training_Results(results_df)
results_df = prep().build_Results(results_df)
st.success("Formatted")
st.write(results_df)
print("Entering Machine Learning")
mach = ms()
results = mach.display_valid(training_df)
st.empty()
st.write(results)
design_report = ProfileReport(results)
design_report.to_file(output_file='report.html')
webbrowser.open_new_tab(url="report.html")
# df = get_twitter_data()
# countries = st.multiselect(
# "Choose countries", list(df.index), ["China", "United States of America"]
# )
# if not countries:
# st.error("Please select at least one country.")
# else:
# data = df.loc[countries]
# data /= 1000000.0
# st.write("### Gross Agricultural Production ($B)", data.sort_index())
# data = data.T.reset_index()
# data = pd.melt(data, id_vars=["index"]).rename(
# columns={"index": "year", "value": "Gross Agricultural Product ($B)"}
# )
# chart = (
# alt.Chart(data)
# .mark_area(opacity=0.3)
# .encode(
# x="year:T",
# y=alt.Y("Gross Agricultural Product ($B):Q", stack=None),
# color="Region:N",
# )
# )
# st.altair_chart(chart, use_container_width=True)
except urllib.error.URLError as e:
st.error(
"""
**This demo requires internet access.**
Connection error: %s
"""
% e.reason
)
| 36.483051 | 83 | 0.590941 | 497 | 4,305 | 4.94165 | 0.392354 | 0.054967 | 0.026873 | 0.036645 | 0.346906 | 0.314332 | 0.314332 | 0.314332 | 0.314332 | 0.314332 | 0 | 0.006618 | 0.298026 | 4,305 | 117 | 84 | 36.794872 | 0.806089 | 0.21626 | 0 | 0.376623 | 0 | 0 | 0.132534 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.012987 | false | 0 | 0.181818 | 0 | 0.207792 | 0.012987 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea60b380681f10e577415fde62737357e31b1b11 | 3,435 | bzl | Python | bazel/toolchain/x86_64-none-linux-gnu/cc_toolchain_config.bzl | mazzystr/kubevirt | d73c04439558a031096c3506c10f20f26dc8ae43 | [
"Apache-2.0"
] | 3,231 | 2016-12-19T15:05:47.000Z | 2022-03-31T13:38:56.000Z | bazel/toolchain/x86_64-none-linux-gnu/cc_toolchain_config.bzl | mazzystr/kubevirt | d73c04439558a031096c3506c10f20f26dc8ae43 | [
"Apache-2.0"
] | 7,176 | 2016-12-19T09:58:22.000Z | 2022-03-31T22:39:48.000Z | bazel/toolchain/x86_64-none-linux-gnu/cc_toolchain_config.bzl | mazzystr/kubevirt | d73c04439558a031096c3506c10f20f26dc8ae43 | [
"Apache-2.0"
] | 884 | 2016-12-19T09:10:20.000Z | 2022-03-31T13:16:44.000Z | load("@bazel_tools//tools/build_defs/cc:action_names.bzl", "ACTION_NAMES")
load(
"@bazel_tools//tools/cpp:cc_toolchain_config_lib.bzl",
"feature",
"flag_group",
"flag_set",
"tool_path",
)
all_link_actions = [
ACTION_NAMES.cpp_link_executable,
ACTION_NAMES.cpp_link_dynamic_library,
ACTION_NAMES.cpp_link_nodeps_dynamic_library,
]
all_compile_actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.c_compile,
ACTION_NAMES.clif_match,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.lto_backend,
ACTION_NAMES.preprocess_assemble,
]
def _impl(ctx):
tool_paths = [
tool_path(
name = "ar",
path = "/usr/bin/ar",
),
tool_path(
name = "cpp",
path = "/usr/bin/cpp",
),
tool_path(
name = "gcc",
path = "/usr/bin/gcc",
),
tool_path(
name = "gcov",
path = "/usr/bin/gcov",
),
tool_path(
name = "ld",
path = "/usr/bin/ld",
),
tool_path(
name = "nm",
path = "/usr/bin/nm",
),
tool_path(
name = "objdump",
path = "/usr/bin/objdump",
),
tool_path(
name = "strip",
path = "/usr/bin/strip",
),
]
default_compiler_flags = feature(
name = "default_compiler_flags",
enabled = True,
flag_sets = [
flag_set(
actions = all_compile_actions,
flag_groups = [
flag_group(
flags = [
"-no-canonical-prefixes",
"-fno-canonical-system-headers",
"-Wno-builtin-macro-redefined",
"-D__DATE__=\"redacted\"",
"-D__TIMESTAMP__=\"redacted\"",
"-D__TIME__=\"redacted\"",
],
),
],
),
],
)
default_linker_flags = feature(
name = "default_linker_flags",
enabled = False,
flag_sets = [
flag_set(
actions = all_link_actions,
flag_groups = ([
flag_group(
flags = [
"",
],
),
]),
),
],
)
features = [
default_compiler_flags,
default_linker_flags,
]
return cc_common.create_cc_toolchain_config_info(
ctx = ctx,
cxx_builtin_include_directories = [
"/usr/lib/gcc/x86_64-redhat-linux/8/include",
"/usr/include",
],
features = features,
toolchain_identifier = "x86_64-toolchain",
host_system_name = "local",
target_system_name = "unknown",
target_cpu = "unknown",
target_libc = "unknown",
compiler = "unknown",
abi_version = "unknown",
abi_libc_version = "unknown",
tool_paths = tool_paths,
)
cc_toolchain_config = rule(
implementation = _impl,
attrs = {},
provides = [CcToolchainConfigInfo],
)
| 26.022727 | 74 | 0.482096 | 305 | 3,435 | 5.036066 | 0.340984 | 0.107422 | 0.0625 | 0.035156 | 0.072917 | 0.072917 | 0 | 0 | 0 | 0 | 0 | 0.004427 | 0.408151 | 3,435 | 131 | 75 | 26.221374 | 0.751107 | 0 | 0 | 0.276423 | 0 | 0 | 0.161281 | 0.071033 | 0 | 0 | 0 | 0 | 0 | 1 | 0.00813 | false | 0 | 0 | 0 | 0.01626 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea61168a39500dabfd76ac90293254807e2704b4 | 5,541 | py | Python | breakout/train_with_experiencebuffer.py | gdanfx/agents | 6ef1a7992fe22a299a7d470756142b567a0368c2 | [
"Apache-2.0"
] | null | null | null | breakout/train_with_experiencebuffer.py | gdanfx/agents | 6ef1a7992fe22a299a7d470756142b567a0368c2 | [
"Apache-2.0"
] | null | null | null | breakout/train_with_experiencebuffer.py | gdanfx/agents | 6ef1a7992fe22a299a7d470756142b567a0368c2 | [
"Apache-2.0"
] | 2 | 2019-10-23T23:31:30.000Z | 2019-10-25T21:32:26.000Z | import os
import argparse
import datetime
from tqdm import tqdm
import numpy as np
import tensorflow as tf
from google.oauth2 import service_account
from protobuf.experience_replay_pb2 import Trajectory, Info
from breakout.dqn_model import DQN_Model, ExperienceBuffer
from util.gcp_io import gcp_load_pipeline, gcs_load_weights, gcs_save_weights, cbt_global_iterator, cbt_read_rows
from util.logging import TimeLogger
SCOPES = ['https://www.googleapis.com/auth/cloud-platform']
SERVICE_ACCOUNT_FILE = 'cbt_credentials.json'
#SET HYPERPARAMETERS
VECTOR_OBS_SPEC = [4]
VISUAL_OBS_SPEC = [210,160,3]
NUM_ACTIONS=2
CONV_LAYER_PARAMS=((8,4,32),(4,2,64),(3,1,64))
FC_LAYER_PARAMS=(512,200)
LEARNING_RATE=0.00042
GAMMA = 0.9
if __name__ == '__main__':
#COMMAND-LINE ARGUMENTS
parser = argparse.ArgumentParser('Environment-To-Bigtable Script')
parser.add_argument('--gcp-project-id', type=str, default='for-robolab-cbai')
parser.add_argument('--cbt-instance-id', type=str, default='rab-rl-bigtable')
parser.add_argument('--cbt-table-name', type=str, default='breakout-experience-replay')
parser.add_argument('--bucket-id', type=str, default='rab-rl-bucket')
parser.add_argument('--prefix', type=str, default='breakout')
parser.add_argument('--tmp-weights-filepath', type=str, default='/tmp/model_weights_tmp.h5')
parser.add_argument('--train-epochs', type=int, default=1000000)
parser.add_argument('--train-steps', type=int, default=10)
parser.add_argument('--period', type=int, default=10)
parser.add_argument('--buffer-size', type=int, default=1008000)
parser.add_argument('--output-dir', type=str, default='/tmp/training/')
parser.add_argument('--log-time', default=False, action='store_true')
args = parser.parse_args()
#INSTANTIATE CBT TABLE AND GCS BUCKET
credentials = service_account.Credentials.from_service_account_file(SERVICE_ACCOUNT_FILE, scopes=SCOPES)
cbt_table, gcs_bucket = gcp_load_pipeline(args.gcp_project_id, args.cbt_instance_id, args.cbt_table_name, args.bucket_id, credentials)
#LOAD MODEL
model = DQN_Model(input_shape=VISUAL_OBS_SPEC,
num_actions=NUM_ACTIONS,
conv_layer_params=CONV_LAYER_PARAMS,
fc_layer_params=FC_LAYER_PARAMS,
learning_rate=LEARNING_RATE)
gcs_load_weights(model, gcs_bucket, args.prefix, args.tmp_weights_filepath)
#SETUP TENSORBOARD/LOGGING
train_log_dir = os.path.join(args.output_dir, 'logs/')
os.makedirs(os.path.dirname(train_log_dir), exist_ok=True)
loss_metrics = tf.keras.metrics.Mean('train_loss', dtype=tf.float32)
train_summary_writer = tf.summary.create_file_writer(train_log_dir)
if args.log_time is True:
time_logger = TimeLogger(["Fetch Data", "Parse Data", "Compute Loss", "Generate Grads"], num_cycles=args.train_steps)
#TRAINING LOOP
train_step = 0
exp_buff = ExperienceBuffer(args.buffer_size)
print("-> Starting training...")
for epoch in range(args.train_epochs):
if args.log_time is True: time_logger.reset()
#FETCH DATA
global_i = cbt_global_iterator(cbt_table)
rows = cbt_read_rows(cbt_table, args.prefix, args.train_steps, global_i)
if args.log_time is True: time_logger.log(0)
for row in tqdm(rows, "Trajectories {} - {}".format(global_i - args.train_steps, global_i - 1)):
#DESERIALIZE DATA
bytes_traj = row.cells['trajectory']['traj'.encode()][0].value
bytes_info = row.cells['trajectory']['info'.encode()][0].value
traj, info = Trajectory(), Info()
traj.ParseFromString(bytes_traj)
info.ParseFromString(bytes_info)
#FORMAT DATA
obs_shape = np.append(info.num_steps, info.visual_obs_spec).astype(int)
obs = np.asarray(traj.visual_obs).reshape(obs_shape)
exp_buff.add_trajectory(obs, traj.actions, traj.rewards, info.num_steps)
if args.log_time is True: time_logger.log(1)
#COMPUTE LOSS
with tf.GradientTape() as tape:
q_pred, q_next = model(exp_buff.obs), model(exp_buff.next_obs)
one_hot_actions = tf.one_hot(exp_buff.actions, NUM_ACTIONS)
q_pred = tf.reduce_sum(q_pred * one_hot_actions, axis=-1)
q_next = tf.reduce_max(q_next, axis=-1)
q_next = q_next * exp_buff.next_mask
q_target = exp_buff.rewards + tf.multiply(tf.constant(GAMMA, dtype=tf.float32), q_next)
loss = model.loss(q_pred, q_target)
if args.log_time is True: time_logger.log(2)
#GENERATE GRADIENTS
total_grads = tape.gradient(loss, model.trainable_weights)
model.opt.apply_gradients(zip(total_grads, model.trainable_weights))
if args.log_time is True: time_logger.log(3)
#TENSORBOARD LOGGING
loss_metrics(loss)
total_reward = np.sum(traj.rewards)
with train_summary_writer.as_default():
tf.summary.scalar('loss', loss_metrics.result(), step=train_step)
tf.summary.scalar('total reward', total_reward, step=train_step)
train_step += 1
if args.log_time is True: time_logger.print_logs()
#SAVE MODEL WEIGHTS
model_filename = args.prefix + '_model.h5'
gcs_save_weights(model, gcs_bucket, args.tmp_weights_filepath, model_filename)
print("-> Done!")
| 43.97619 | 138 | 0.683992 | 761 | 5,541 | 4.716163 | 0.283837 | 0.030092 | 0.05684 | 0.025355 | 0.12594 | 0.089997 | 0.078295 | 0.059905 | 0.035665 | 0 | 0 | 0.016271 | 0.201408 | 5,541 | 125 | 139 | 44.328 | 0.794802 | 0.041328 | 0 | 0 | 0 | 0 | 0.104925 | 0.018117 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.123596 | 0 | 0.123596 | 0.033708 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea63b7f20b737a712d62a87999a8dd1ca9b9e101 | 16,572 | py | Python | Quality analysis/Sync_data_v2.py | MahdadJafarzadeh/ssccoorriinngg | 63c726e9e7d0f6d13032415c76b8c3bb1ff2bee3 | [
"MIT"
] | 2 | 2020-04-28T12:50:26.000Z | 2020-05-13T08:52:42.000Z | Quality analysis/Sync_data_v2.py | MahdadJafarzadeh/ssccoorriinngg | 63c726e9e7d0f6d13032415c76b8c3bb1ff2bee3 | [
"MIT"
] | null | null | null | Quality analysis/Sync_data_v2.py | MahdadJafarzadeh/ssccoorriinngg | 63c726e9e7d0f6d13032415c76b8c3bb1ff2bee3 | [
"MIT"
] | 1 | 2020-07-14T13:48:56.000Z | 2020-07-14T13:48:56.000Z | # -*- codiEEG_dsddsdsng: utf-8 -*-
"""
Created on Mon Jun 29 20:08:11 2020
@author: mahjaf
"""
#%% Import libs
#####===================== Importiung libraries =========================#####
import mne
import numpy as np
from scipy.integrate import simps
from numpy import loadtxt
import h5py
import time
import os
#from ssccoorriinngg import ssccoorriinngg
import matplotlib.pyplot as plt
import pickle
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import RandomizedSearchCV
from sklearn.metrics import confusion_matrix, make_scorer, accuracy_score, precision_score, recall_score, f1_score, classification_report
import pandas as pd
import tensorflow as tf
from scipy import signal
from scipy.signal import butter, lfilter, periodogram, spectrogram, welch, filtfilt, iirnotch
from scipy.stats import pearsonr, spearmanr
import matplotlib.mlab as mlab
import pandas as pd
#%% Read in data (Somno + Zmax)
#####=========================== Reading data ============================#####
# Main path
main_path = "F:/Zmax_Data/features/"
# Read location of Somno data
subj_ids_somno = loadtxt(main_path + "SigQual_Somno_data_loc.txt", dtype = 'str',delimiter='\n')
# Read Zmax data
subj_ids_zmax = loadtxt(main_path + "SigQual_Zmax_data_loc.txt", dtype = 'str',delimiter='\n')
# Read subject_night id
subj_night = loadtxt(main_path + "Subject_Night.txt", dtype = 'str',delimiter='\n')
# read event markers path to sync data
sync_markers_path= "F:/Zmax_Data/features/Sync_periods.xlsx"
event_markers = pd.read_excel(sync_markers_path)
# Define filter
def butter_bandpass_filter(data, lowcut, highcut, fs, order = 2):
nyq = 0.5 * fs
low = lowcut /nyq
high = highcut/nyq
b, a = butter(order, [low, high], btype='band')
#print(b,a)
y = filtfilt(b, a, data)
return y
# Define spectrogram creator
def spectrogram_creation(sig1,sig2, fs):
from lspopt import spectrogram_lspopt
import numpy as np
import matplotlib.pyplot as plt
#==== plot 1st sig =======
f, t, Sxx = spectrogram_lspopt(x=sig1, fs=fs, c_parameter=20.0, nperseg=int(30*fs), \
scaling='density')
Sxx = 10 * np.log10(Sxx) #power to db
# Limit Sxx to the largest freq of interest:
f_sig1 = f[0:750]
Sxx_sig1 = Sxx[0:750, :]
fig, axs = plt.subplots(2,1, figsize=(26, 14))
plt.axes(axs[0])
plt.pcolormesh(t, f_sig1, Sxx_sig1)
plt.ylabel('Frequency [Hz]', size=15)
#plt.xlabel('Time [sec]', size=15)
plt.title('Somnoscreeen data (F4) - Multi-taper Spectrogram', size=20)
plt.colorbar()
# ==== plot 2nd sig ==== #
plt.axes(axs[1])
f, t, Sxx = spectrogram_lspopt(x=sig2, fs=fs, c_parameter=20.0, nperseg=int(30*fs), \
scaling='density')
Sxx = 10 * np.log10(Sxx) #power to db
# Limit Sxx to the largest freq of interest:
f_sig2 = f[0:750]
Sxx_sig2 = Sxx[0:750, :]
plt.pcolormesh(t, f_sig2, Sxx_sig2)
plt.ylabel('Frequency [Hz]', size=15)
plt.xlabel('Time [sec]', size=15)
plt.title('Zmax data (EEG right) - Multi-taper Spectrogram ', size=20)
plt.colorbar()
#==== 1st Way =======
#=== Maximize ====
figure = plt.gcf() # get current figure
figure.set_size_inches(32, 18)
plt.show()
#=== Maximize ====
return f_sig1, f_sig2, Sxx_sig1, Sxx_sig2
# save figure
def save_figure(directory, saving_name, dpi, saving_format = '.png',
full_screen = False):
if full_screen == True:
manager = plt.get_current_fig_manager()
manager.window.showMaximized()
plt.savefig(directory+saving_name+saving_format,dpi = dpi)
def save_dictionary(path, fname, labels_dic, features_dic):
import pickle
with open(path+fname+'.pickle',"wb") as f:
pickle.dump([features_dic, labels_dic], f)
#initializing dictionaries to save output
Sxx_somno_dic = dict()
Sxx_zmax_dic = dict()
f_spect_somno_dic = dict()
f_spect_zmax_dic = dict()
psd_somno_dic = dict()
psd_zmax_dic = dict()
f_psd_somno_dic = dict()
f_psd_zmax_dic = dict()
#####======================== Iterating through subjs=====================#####
subj_ids_somno = ["F:/Zmax_Data/Somnoscreen_Data/P_12/P12 night2_B.25.11.2018/P12_night2_B_markers_(1).edf"]
# Create for loop to iterate through all subjects
for idx, c_subj in enumerate(subj_ids_somno):
# define the current zmax data
curr_zmax = subj_ids_zmax[idx]
# define current somno data
curr_somno = c_subj
# Reading EEG left and right (Zmax)
data_L = mne.io.read_raw_edf(curr_zmax + "EEG L.edf", preload = True)
data_R = mne.io.read_raw_edf(curr_zmax + "EEG R.edf", preload = True)
# Read somno data
EEG_somno = mne.io.read_raw_edf(curr_somno, preload = True)
# Reading info header (Somno)
SomnoInfo = EEG_somno.info
AvailableChannels = SomnoInfo['ch_names']
ZmaxInfo = data_R.info
# Fs
fs_zmax = int(ZmaxInfo['sfreq'])
fs_somno = int(SomnoInfo['sfreq'])
#%% Plot filtered signals
#####======================== Data representation ========================#####
# =============================================================================
# data_L.plot(duration = 30, highpass = .1 , lowpass = 30 )
# data_R.plot(duration = 30, highpass = .1 , lowpass = 30 )
# EEG_somno.plot(duration = 30, highpass = .1 , lowpass = 30,n_channels = 4 )
# =============================================================================
#%% Resampling higher freq to lower
if fs_zmax != fs_somno:
if fs_zmax < fs_somno:
EEG_somno = EEG_somno.resample(int(fs_zmax), npad="auto")
else:
data_L = data_L.resample(int(fs_somno), npad="auto")
data_R = data_R.resample(int(fs_somno), npad="auto")
# Define resampled fs
fs_res = np.min([fs_zmax, fs_somno])
#%% Get data (resampled)
data_L_get = data_L.get_data()
data_R_get = data_R.get_data()
data_somno_get = EEG_somno.get_data()
#%% Filtering resampled data
data_L_resampled_filtered = butter_bandpass_filter(data_L_get, lowcut=.1, highcut=30, fs=fs_res, order = 2)
data_R_resampled_filtered = butter_bandpass_filter(data_R_get, lowcut=.1, highcut=30, fs=fs_res, order = 2)
EEG_somno_resampled_filtered = butter_bandpass_filter(data_somno_get, lowcut=.1, highcut=30, fs=fs_res, order = 2)
#%% Synchronization section
# ===================== start of LRLR for sync ========================= #
# Zmax
LRLR_start_zmax = event_markers['LRLR_start_zmax'][idx] #sec
LRLR_end_zmax = event_markers['LRLR_end_zmax'][idx] #sec
# Somno
LRLR_start_somno = event_markers['LRLR_start_somno'][idx] #sec
LRLR_end_somno = event_markers['LRLR_end_somno'][idx] #sec
# Define a period around sync point ro perform alignment
zmax_plotting_secs = [LRLR_start_zmax,LRLR_end_zmax]
somno_plotting_secs = [LRLR_start_somno, LRLR_end_somno]
# Finding corresponding samples of sync period
zmax_plotting_samples = np.arange(zmax_plotting_secs[0] *fs_res, zmax_plotting_secs[1] * fs_res)
somno_plotting_samples = np.arange(somno_plotting_secs[0] *fs_res, somno_plotting_secs[1] * fs_res)
# Convert (probable) floats int o int
somno_plotting_samples = somno_plotting_samples.astype(np.int32)
zmax_plotting_samples = zmax_plotting_samples.astype(np.int32)
# R EEG (Zmax) --> sync period
zmax_data_R = np.ravel(data_R_resampled_filtered)
# L EEG (Zmax) --> sync period
zmax_data_L = np.ravel(data_L_resampled_filtered)
# Define channel of interest
RequiredChannels = ['F4:A1'] # main electrodes
# init index of reeuired channel(s)
Idx = []
Idx_Mastoids = []
# Find index of required channel(s)
for indx, c in enumerate(AvailableChannels):
if c in RequiredChannels:
Idx.append(indx)
# pick Somno channel
Somno_reqChannel = EEG_somno_resampled_filtered[Idx,:]
# np.ravel somno signal(s)
Somno_reqChannel = np.ravel(Somno_reqChannel)
# plt R EEG (zmax) and required channel of Somno BEFORE sync
plt.figure()
figure = plt.gcf() # get current figure
plt.xlabel('Samples',size = 15)
plt.ylabel('Amp',size = 15)
figure.set_size_inches(32, 18)
sig_zmax = zmax_data_R[zmax_plotting_samples]
sig_somno = Somno_reqChannel[somno_plotting_samples]
# Compute correlation
corr = signal.correlate(sig_zmax, sig_somno)
# find lag
lag = np.argmax(np.abs(corr)) - len(zmax_data_L[zmax_plotting_samples]) + 1
# Plot before lag correction
plt.plot(np.arange(0, len(zmax_plotting_samples)), sig_zmax,label = 'Zmax R EEG', color = 'black')
plt.plot(np.arange(0, len(somno_plotting_samples)), sig_somno, label = 'Somno F4', color = 'gray', linestyle = ':')
plt.title('Syncing Somno and Zmax data (Sync period only)', size = 15)
# Plot after lag correction
#plt.plot(np.arange(0+lag, len(somno_plotting_samples)+lag), sig_somno, label = 'Somno F4 - synced',color = 'red')
plt.plot(np.arange(0, len(somno_plotting_samples)), Somno_reqChannel[somno_plotting_samples-lag], label = 'Somno F4 - synced',color = 'red')
#plt.plot(np.arange(0-lag, len(zmax_plotting_samples)-lag), sig_zmax, label = 'zmax - synced',color = 'cyan')
plt.legend(prop={"size":20})
# Save figure
save_figure(saving_format = '.png',
directory="F:/Zmax_Data/Results/SignalQualityAnalysis/sync_period/",
saving_name = subj_night[idx], dpi = 900,
full_screen = False)
# close current fig
plt.close()
#%% Compute correlation during sync period only
sync_period_s = Somno_reqChannel[somno_plotting_samples-lag]
sync_period_z = sig_zmax
# compute pearson correlation
pearson_corr,pval_pe = pearsonr(sync_period_s, sync_period_z)
print(f'Pearson corr during sync period between Zmax EEG R and Somno F4:A1\
is {pearson_corr}, p-value: {pval_pe}')
# Spearman Corr
Spearman_corr,pval_sp = spearmanr(sync_period_s, sync_period_z)
print(f'Spearman corr during sync period between Zmax EEG R and Somno F4:A1\
is {Spearman_corr}, p-value: {pval_sp}')
#%% Plot cross-correlation
fig, ax = plt.subplots(1,1, figsize=(26, 14))
ax.plot(np.arange(-len(zmax_data_L[zmax_plotting_samples])+1,len(zmax_data_L[zmax_plotting_samples])), corr, color = 'blue')
plt.title('Cross-correlation to find lag between Zmax & Somno during eye movements', size=15)
# Marking max correlation value to find lag
ymax = np.max(np.abs(corr))
if np.max(np.abs(corr)) != np.max(corr) :
ymax = -ymax
xpos = lag
xmax = lag
# Creating arrot to point to max
ax.annotate('max correlation', xy=(xmax, ymax), xytext=(xmax, ymax+ymax/10),
arrowprops=dict(facecolor='red', shrink=0.05),
)
# title, etc
plt.title('Cross-correlation during event emergence', size = 20)
plt.xlabel('Lag (samples)', size = 15)
plt.ylabel('Amplitude', size = 15)
plt.show()
# Save figure
save_figure(saving_format = '.png',
directory="F:/Zmax_Data/Results/SignalQualityAnalysis/Cross-corr/",
saving_name = subj_night[idx], dpi = 900,
full_screen = False)
# close current fig
plt.close()
#%% Plotting COMPLETE signals after synchronization
# rough lag
rough_lag = (LRLR_start_somno - LRLR_start_zmax) * fs_res
# Total lag = rough lag +- lag during sync
total_lag = int(rough_lag - lag)
# truncate the lag period from somno BEGINNING
truncated_beginning_somno = Somno_reqChannel[total_lag:]
# Truncate the end of LONGER signal
len_s = len(truncated_beginning_somno)
len_z = len(zmax_data_R)
# if somno data is larger
if len_s > len_z:
somno_final = truncated_beginning_somno[:len_z]
zmax_final = zmax_data_R
else:
zmax_final = zmax_data_R[:len_s]
somno_final = truncated_beginning_somno
# Calculate final length
common_length = np.min([len_s, len_z])
# Plot truncated sigs
plt.figure()
plt.plot(np.arange(0, common_length) / fs_res / 60, zmax_final, color = 'blue', label = 'Zmax R EEG')
plt.plot(np.arange(0, common_length) / fs_res / 60, somno_final, \
color = 'red', label = 'Somno F4-A1')
plt.title('Complete Zmax and Somno data after full sync', size = 20)
plt.xlabel('Time (mins)', size = 15)
plt.ylabel('Amplitude (v)', size = 15)
plt.legend(prop={"size":20}, loc = "upper right")
# compute pearson correlation
pearson_corr,pval_pe = pearsonr(somno_final, zmax_final)
print(f'Pearson corr during sync period between Zmax EEG R and Somno F4:A1\
is {pearson_corr}, p-value: {pval_pe}')
# close current fig
#plt.close()
#%% Computing Coherence of signals
plt.figure()
coh, f = plt.cohere(somno_final, zmax_final, Fs = fs_res, NFFT = 256)
plt.xlim([0, 30])
#%% Plot spectrgoram of somno vs Zmax
f_spect_s, f_spect_z, Sxx_s, Sxx_z = spectrogram_creation(somno_final, zmax_final, fs = fs_res)
# Save figure
save_figure(saving_format = '.png',
directory="F:/Zmax_Data/Results/SignalQualityAnalysis/spectrogram/",
saving_name = subj_night[idx], dpi = 900,
full_screen = False)
# close current fig
plt.close()
#%% Plot PSD
plt.figure()
figure = plt.gcf() # get current figure
figure.set_size_inches(26, 14)
# Global setting for axes values size
plt.rc('xtick',labelsize=16)
plt.rc('ytick',labelsize=16)
# Plot power spectrums
psd_z, f_psd_z = plt.psd(x=zmax_final,Fs = fs_res, label = 'Zmax', NFFT = 2 ** 11, scale_by_freq= True, linewidth = 2, color = 'blue')
psd_s, f_psd_s = plt.psd(x=somno_final,Fs = fs_res, label = 'Zmax',NFFT = 2 ** 11, scale_by_freq= True, linewidth = 2, color = 'red')
# ================== plot dashed lines of freq bins ========================= #
#Delta
plt.axvline(.5, linestyle = '--', color = 'black')
plt.axvline(4, linestyle = '--', color = 'black')
#Theta
plt.axvline(8, linestyle = '--', color = 'black')
# Alpha
plt.axvline(12, linestyle = '--', color = 'black')
# Title and labels
plt.title('Power spectral density throughout the night', size = 20)
plt.xlabel('Frequency (Hz)', size = 20)
plt.ylabel('Power spectral density (dB/ Hz)', size = 20)
# Legend
plt.legend(['Zmax EEG R', 'Somno F4'], prop = {'size':20})
# Deactivate grid
plt.grid(False)
# Adding labels
plt.text(1.5, -89, 'Delta',size =18)
plt.text(5, -89, 'Theta',size =18)
plt.text(9, -89, 'Alpha',size =18)
plt.text(13, -89, 'Beta',size =18)
# Limiting x-axis to 0-30 Hz
plt.xlim([0, 30])
# Save figure
save_figure(saving_format = '.png',
directory="F:/Zmax_Data/Results/SignalQualityAnalysis/PSD/",
saving_name = subj_night[idx], dpi = 900,
full_screen = False)
# close current fig
plt.close()
#%% Keep the PSD and spectrogram values for final normalization over subjs
# === 1. Spectrogram:
Sxx_somno_dic[subj_night[idx]] = Sxx_s
Sxx_zmax_dic[subj_night[idx]] = Sxx_z
f_spect_somno_dic[subj_night[idx]] = f_spect_s
f_spect_zmax_dic[subj_night[idx]] = f_spect_z
# === 2. PSD:
psd_somno_dic[subj_night[idx]] = psd_s
psd_zmax_dic[subj_night[idx]] = psd_z
f_psd_somno_dic[subj_night[idx]] = f_psd_s
f_psd_zmax_dic[subj_night[idx]] = f_psd_z
#%% Save final PSD and Freqs
path = "F:/Zmax_Data/features/"
save_dictionary(path, "SpectrogramValsSomno", Sxx_somno_dic, f_spect_somno_dic)
save_dictionary(path, "SpectrogramValsZmax", Sxx_zmax_dic, f_spect_zmax_dic)
save_dictionary(path, "PSDValsZmax", psd_zmax_dic, f_psd_zmax_dic)
save_dictionary(path, "PSDValsSomno", psd_somno_dic, f_psd_somno_dic)
| 36.183406 | 149 | 0.630099 | 2,262 | 16,572 | 4.399646 | 0.191866 | 0.018489 | 0.014469 | 0.012058 | 0.369976 | 0.293408 | 0.236133 | 0.203477 | 0.165695 | 0.159465 | 0 | 0.022866 | 0.229423 | 16,572 | 457 | 150 | 36.262582 | 0.75646 | 0.213191 | 0 | 0.209877 | 0 | 0.004115 | 0.106727 | 0.033478 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016461 | false | 0.016461 | 0.09465 | 0 | 0.119342 | 0.012346 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea679fc987f31f3fe9f99e23ca208ae357055c1e | 6,363 | py | Python | etc/coqc.py | herbelin/perennial | 49b044fa83b4df2dc23262571e79c1165006bdc8 | [
"MIT"
] | null | null | null | etc/coqc.py | herbelin/perennial | 49b044fa83b4df2dc23262571e79c1165006bdc8 | [
"MIT"
] | null | null | null | etc/coqc.py | herbelin/perennial | 49b044fa83b4df2dc23262571e79c1165006bdc8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# coqc wrapper
from __future__ import print_function
from datetime import datetime
from os import path
import re
import sqlite3
import subprocess
import sys
class NullDb:
def add_qed(self, fname, ident, time):
pass
def add_file(self, fname, time):
pass
def close(self):
pass
class StdoutDb:
def add_qed(self, fname, ident, time):
base = path.basename(fname)
if time > 0.1:
print("{:15s} {:20s} {:0.2f}".format(base, ident, time))
def add_file(self, fname, time):
if time > 0.1:
print("{} {:0.2f}".format(fname, time))
def close(self):
pass
class TimingDb:
def __init__(self, conn):
self.conn = conn
@classmethod
def from_file(cls, fname):
conn = sqlite3.connect(fname, isolation_level=None)
conn.execute(
"""CREATE TABLE IF NOT EXISTS qed_timings """
+ """(fname text NOT NULL, ident text NOT NULL, time real NOT NULL, """
+ """PRIMARY KEY (fname, ident) )"""
)
conn.execute(
"""CREATE TABLE IF NOT EXISTS file_timings """
+ """(fname text NOT NULL PRIMARY KEY, time real)"""
)
return cls(conn)
def add_qed(self, fname, ident, time):
self.conn.execute(
"""INSERT OR REPLACE INTO qed_timings VALUES (?,?,?)""",
(fname, ident, time),
)
def add_file(self, fname, time):
self.conn.execute(
"""INSERT OR REPLACE INTO file_timings VALUES (?,?)""", (fname, time)
)
def close(self):
self.conn.close()
class Classify:
DEF_RE = re.compile(
r"""(?:(Local|Global)\s+)?(?:Theorem|Lemma|Instance|Definition|Corollary|Remark|Fact|Program Lemma)\s+"""
+ r"""(?P<ident>\w(\w|')*)"""
)
OBLIGATION_RE = re.compile(r"""Next Obligation\.""")
TIME_RE = re.compile(
r"""Chars (?P<start>\d*) - (?P<end>\d*) \[.*\] """
+ r"""(?P<time>[0-9.]*) secs .*"""
)
QED_RE = re.compile(r"""(Time\s*)?Qed\.""")
obligation_count = 0
@classmethod
def is_qed(cls, s):
return cls.QED_RE.match(s) is not None
@classmethod
def get_def(cls, s):
m = cls.DEF_RE.match(s)
if m is not None:
return m.group("ident")
m = cls.OBLIGATION_RE.match(s)
if m is not None:
cls.obligation_count += 1
return "<obligation {}>".format(cls.obligation_count)
return None
@classmethod
def get_time(cls, s):
m = cls.TIME_RE.match(s)
if m is None:
return None
return (int(m.group("start")), int(m.group("end")), float(m.group("time")))
class CoqcFilter:
def __init__(self, vfile, db, contents, start):
self.vfile = vfile
self.contents = contents
self.db = db
self.start = start
self.curr_def = None
@classmethod
def from_coqargs(cls, args, db, contents=None, start=None):
vfile = None
for arg in args:
if arg.endswith(".v"):
vfile = arg
break
if start is None:
start = datetime.now()
return cls(vfile, db, contents, start)
@classmethod
def from_contents(cls, contents, db, start=None):
return cls("<in-memory>.v", db, contents, start)
def _read_vfile(self):
with open(self.vfile, "rb") as f:
self.contents = f.read()
def chars(self, start, end):
if not self.contents:
self._read_vfile()
return self.contents[start:end].decode("utf-8")
def update_def(self, ident):
"""Update current definition to ident."""
self.curr_def = ident
def update_timing(self, timing_info):
"""Add new timing info based on Classify.get_time."""
start, end, time = timing_info
code = self.chars(start, end)
ident = Classify.get_def(code)
if ident:
return self.update_def(ident)
if Classify.is_qed(code):
if self.curr_def is None:
print(
self.vfile,
"no proof ident {} - {}".format(start, end),
file=sys.stderr,
)
return
self.db.add_qed(self.vfile, self.curr_def, time)
return
def line(self, l):
"""Process a line of output from coqc."""
line = l.decode("utf-8")
timing_info = Classify.get_time(line)
if timing_info:
return self.update_timing(timing_info)
sys.stdout.write(line)
def done(self, end_t=None):
if end_t is None:
end_t = datetime.now()
delta = (end_t - self.start).total_seconds()
self.db.add_file(self.vfile, delta)
self.db.close()
def read_coqproject(fname):
args = []
with open(fname) as f:
for line in f:
args.extend(line.rstrip().split(" "))
return [arg for arg in args if arg != "-arg"]
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--proj", default=None, help="path to _CoqProject to use for options"
)
parser.add_argument(
"--no-timing", action="store_true", help="disable all timing tracking"
)
parser.add_argument(
"--timing-db", default=None, help="database to store timing info"
)
args, coq_args = parser.parse_known_args()
coqproject_file = args.proj
if coqproject_file is not None and path.exists(coqproject_file):
proj_args = read_coqproject(coqproject_file)
else:
proj_args = []
if args.no_timing:
db = NullDb()
elif args.timing_db:
db = TimingDb.from_file(args.timing_db)
else:
db = StdoutDb()
args = ["coqc"]
args.extend(proj_args)
args.append("-time")
args.extend(coq_args)
filter = CoqcFilter.from_coqargs(coq_args, db)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=sys.stderr)
try:
for line in iter(p.stdout.readline, b""):
filter.line(line)
except KeyboardInterrupt:
p.kill()
p.wait()
db.close()
sys.exit(p.returncode)
filter.done()
p.wait()
sys.exit(p.returncode)
| 26.623431 | 113 | 0.562942 | 812 | 6,363 | 4.286946 | 0.232759 | 0.020109 | 0.011491 | 0.013789 | 0.153117 | 0.11175 | 0.09164 | 0.05056 | 0 | 0 | 0 | 0.004751 | 0.305359 | 6,363 | 238 | 114 | 26.735294 | 0.782805 | 0.024202 | 0 | 0.217391 | 0 | 0 | 0.081863 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.130435 | false | 0.021739 | 0.043478 | 0.01087 | 0.309783 | 0.021739 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea6b5cd26782f77d3efb15126e06e2104fd23e6c | 2,737 | py | Python | RGBserver/IRcontroller/IRcontroller.py | Emilurenius/RGB-controller | 17816b81041406761bad9531c15fda000cd8f99d | [
"Unlicense"
] | null | null | null | RGBserver/IRcontroller/IRcontroller.py | Emilurenius/RGB-controller | 17816b81041406761bad9531c15fda000cd8f99d | [
"Unlicense"
] | null | null | null | RGBserver/IRcontroller/IRcontroller.py | Emilurenius/RGB-controller | 17816b81041406761bad9531c15fda000cd8f99d | [
"Unlicense"
] | null | null | null | import RPi.GPIO as GPIO
from datetime import datetime, timedelta
import pickle
pin = 11
buttons = []
#Sets up GPIO
GPIO.setmode(GPIO.BOARD)
GPIO.setup(pin, GPIO.IN)
#Gets binary value
def getBinary():
#Internal vars
num1s = 0 #Number of consecutive 1s read
binary = 1 #The bianry value
command = [] #The list to store pulse times in
previousValue = 0 #The last value
value = GPIO.input(pin) #The current value
#Waits for the sensor to pull pin low
while value:
value = GPIO.input(pin)
#Records start time
startTime = datetime.now()
while True:
#If change detected in value
if previousValue != value:
now = datetime.now()
pulseTime = now - startTime #Calculate the time of pulse
startTime = now #Reset start time
command.append((previousValue, pulseTime.microseconds)) #Store recorded data
#Updates consecutive 1s variable
if value:
num1s += 1
else:
num1s = 0
#Breaks program when the amount of 1s surpasses 10000
if num1s > 10000:
break
#Re-reads pin
previousValue = value
value = GPIO.input(pin)
#Converts times to binary
for (typ, tme) in command:
if typ == 1: #If looking at rest period
if tme > 1000: #If pulse greater than 1000us
binary = binary *10 +1 #Must be 1
else:
binary *= 10 #Must be 0
if len(str(binary)) > 34: #Sometimes, there is some stray characters
binary = int(str(binary)[:34])
return binary
#Conver value to hex
def convertHex(binaryValue):
tmpB2 = int(str(binaryValue),2) #Tempary propper base 2
return hex(tmpB2)
previousVal = False
mode = input(">>")
print(mode)
if mode == "test":
noiseFile = open("noise.pkl", "rb")
noise = pickle.load(noiseFile)
while True:
inData = convertHex(getBinary())
print(inData)
if inData not in noise:
if previousVal:
if previousVal == inData:
print("Same one again")
previousVal = inData
else:
print("New button!")
previousVal = inData
else:
print("Fist buttonpress")
previousVal = inData
elif mode == "noiseReduce":
noiseList = []
noiseFoundTime = datetime.now().timestamp()
while datetime.now().timestamp() - noiseFoundTime < 20:
noise = convertHex(getBinary())
print(noise)
if noise in noiseList:
noiseFound = False
else:
noiseFound = True
print("New noise signal found")
noiseList.append(noise)
if noiseFound:
noiseFoundTime = datetime.now().timestamp()
#else:
#now = datetime.now()
#This print statement does not do it's job
#print("checking for", datetime.now().timestamp() - noiseFoundTime * -1, "more seconds")
print("No more noise found!")
print("noise found:", noiseList)
open_file = open("noise.pkl", "wb")
pickle.dump(noiseList, open_file)
open_file.close() | 23.8 | 91 | 0.684326 | 369 | 2,737 | 5.067751 | 0.422764 | 0.041176 | 0.042781 | 0.030481 | 0.035294 | 0 | 0 | 0 | 0 | 0 | 0 | 0.023502 | 0.207161 | 2,737 | 115 | 92 | 23.8 | 0.838249 | 0.262331 | 0 | 0.209877 | 0 | 0 | 0.067303 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.024691 | false | 0 | 0.037037 | 0 | 0.08642 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea6c0183bd3751807be8e594cf2801ef55a7f750 | 2,243 | py | Python | MLPApproximator/MlpResultParser.py | HalfInner/MLPApproximator | 9bc08e14d8f50f323a0453f02d2230c4e4195bee | [
"MIT"
] | null | null | null | MLPApproximator/MlpResultParser.py | HalfInner/MLPApproximator | 9bc08e14d8f50f323a0453f02d2230c4e4195bee | [
"MIT"
] | null | null | null | MLPApproximator/MlpResultParser.py | HalfInner/MLPApproximator | 9bc08e14d8f50f323a0453f02d2230c4e4195bee | [
"MIT"
] | null | null | null | import glob
import sys
def main(argv):
if len(argv) != 2:
usage()
sys.exit(-1)
directory = argv[1]
for m_parameter in (3, 5, 7):
result = []
[result.append(parse_file(open(f, 'r'))) for f in glob.glob('{}/M{}*.txt'.format(directory, m_parameter))]
print(format(result).replace('.', ','))
return 0
def format(result):
epoch_array = [100, 200, 300, 400, 500, 600, 700, 800, 900, 1000]
data_out = ''
result = sorted(result, key=lambda tup: (tup[0], tup[1]))
previous = -1
for m, i, loss in result:
if previous != m:
data_out += '\n'
data_out += str(m)
previous = m
data_out += ' ' + str(loss)
header = '\nGAP ' + ' '.join(map(str, epoch_array))
return header + data_out
def parse_file(file_handler):
hidden_layer_number = 0
epochs = 0
loss = 0
for line in file_handler:
if not line:
continue
line = line.split()
if not line:
continue
if line[0] != 'Approximator:':
continue
if line[1] == 'hidden':
hidden_layer_number = int(line[2].split('=')[1])
continue
if line[1] == 'Epoch:':
epochs = int(line[2].split('/')[1])
continue
if 'Loss' in line[1]:
loss = float(line[1].split('=')[1][:-1])
continue
return hidden_layer_number, epochs, loss
def usage():
print(
"""
Welcome into result parser for MLP Approximator.
usage:
python MlpResultParser <directory_with_results>
Result of parsing is write on standard output. Where you can easily use it in excel.
It postprocess the result of command:
python -m unittest MLPApproximatorTexst.test_integration.TestIntegration
Format:
Name of file M -> M Parameter
Read only lines where Approximator is line prefix.
Read: {hidden layer number, epoch number, test loss}
Prototype! No validation!
""")
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 25.488636 | 114 | 0.528756 | 268 | 2,243 | 4.313433 | 0.410448 | 0.030277 | 0.058824 | 0.027682 | 0.041522 | 0.041522 | 0.041522 | 0 | 0 | 0 | 0 | 0.037879 | 0.352653 | 2,243 | 87 | 115 | 25.781609 | 0.758264 | 0 | 0 | 0.153846 | 0 | 0 | 0.040895 | 0 | 0.019231 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.038462 | 0 | 0.173077 | 0.038462 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea6dc67cd6b94914955a48d62b2eb1133fcf5f9c | 9,962 | py | Python | shapenet/scripts/train.py | vuamitom/shapenet | 9eb3dadc91801756cb3460707c37146c8176643e | [
"BSD-2-Clause"
] | null | null | null | shapenet/scripts/train.py | vuamitom/shapenet | 9eb3dadc91801756cb3460707c37146c8176643e | [
"BSD-2-Clause"
] | null | null | null | shapenet/scripts/train.py | vuamitom/shapenet | 9eb3dadc91801756cb3460707c37146c8176643e | [
"BSD-2-Clause"
] | 1 | 2020-09-25T08:55:12.000Z | 2020-09-25T08:55:12.000Z | from ..dataset import DataSet
from ..networks import ShapeNet
from .preprocess import view_img
import numpy as np
import torch
from tqdm import trange
from tqdm.auto import tqdm
import math
import os
from random import randrange
import re
BATCH_SIZE = 1
N_COMPONENTS = 68
TRAIN_EPOCHS = 1000
DEBUG_SINGLE_IMG = None
PCA_FILE = 'unrot_train_pca.npz'
def load_pca(pca_path, n_components):
return np.load(pca_path)['shapes'][:(n_components + 1)]
def load_dataset(path):
return DataSet(path, 1 if DEBUG_SINGLE_IMG is not None else None)
def create_nn(pca):
net = ShapeNet(pca)
input_device, output_device = None, None
# check device
if torch.cuda.is_available():
device_count = torch.cuda.device_count()
print('cuda available. no gpu = ', device_count)
if device_count > 1:
gpu_ids = [i for i in range(0, device_count)]
input_device = torch.device('cuda:%d' % gpu_ids[0])
net = torch.nn.DataParallel(net.to(input_device),
device_ids=gpu_ids,
output_device=gpu_ids[1]
)
output_device = torch.device('cuda:%d' % gpu_ids[1])
else:
input_device = torch.device('cuda:0')
net = net.to(input_device)
output_device = torch.device('cuda:0')
else:
print('gpu not available. train using cpu instead')
input_device = torch.device('cpu')
output_device = torch.device('cpu')
net = net.to(input_device)
return net, input_device, output_device
def create_optimizer(model, lr=0.0001):
# TODO: read more about mix-precision optimizer https://forums.fast.ai/t/mixed-precision-training/20720
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
return optimizer
def train_single_epoch(model, optimizer, criteria, dataset, input_device, output_device):
batch_size = BATCH_SIZE
if DEBUG_SINGLE_IMG is None:
total_batch = math.ceil(dataset.set_size() / batch_size)
else:
total_batch = 50
total_loss = 0
for i in trange(0, total_batch):
# if DEBUG_SINGLE_IMG is not None:
# data, labels = dataset.data[DEBUG_SINGLE_IMG], dataset.labels[DEBUG_SINGLE_IMG]
# data = data.reshape(data.shape[0], 1, *data.shape[1:])
# else:
data, labels = dataset.next_batch(batch_size)
data = torch.from_numpy(data).to(input_device).to(torch.float)
labels = torch.from_numpy(labels).to(output_device).to(torch.float)
model.train()
with torch.enable_grad():
preds = model(data)
#cal loss
loss_vals = {}
train_loss = 0
for key, fn in criteria.items():
_loss_val = fn(preds, labels)
loss_vals[key] = _loss_val.detach()
train_loss += _loss_val
total_loss += _loss_val
optimizer.zero_grad()
train_loss.backward()
optimizer.step()
# print('avg. train loss %.2f' % (total_loss / total_batch))
# total_loss = 0
# test
# t = randrange(0, 100)
# img = dataset.data[t]
# img = img.reshape(*img.shape[1:])
# lmks = predict(model, img, input_device)
# view_img(img, lmks, dataset.labels[t])
def eval(model, val_dataset, criteria, metrics, input_device, output_device):
eval_batch_size = 20
total_batch = math.ceil(val_dataset.set_size()/ eval_batch_size)
results = np.ndarray((val_dataset.set_size(), *val_dataset.get_label(0).shape))
last_idx = 0
loss_vals = {k:0 for k, _ in criteria.items()}
metric_vals = {k:0 for k,_ in metrics.items()}
for i in trange(0, total_batch):
data, labels = val_dataset.next_batch(eval_batch_size)
data = torch.from_numpy(data).to(input_device).to(torch.float)
labels = torch.from_numpy(labels).to(output_device).to(torch.float)
model.eval()
with torch.no_grad():
preds = model(data)
total_loss = 0
for key, fn in criteria.items():
_loss_val = fn(preds, labels)
loss_vals[key] += _loss_val.detach()
for key, metric_fn in metrics.items():
metric_vals[key] += metric_fn(preds, labels).detach()
results[last_idx:(last_idx + len(preds)), :, :] = preds.cpu()
metric_vals = {k:(v/total_batch) for k, v in metric_vals.items()}
loss_vals = {k:(v/total_batch) for k, v in loss_vals.items()}
return metric_vals, loss_vals, results
def load_pretrain_model(data_dir):
input_device = output_device = torch.device('cpu')
net = torch.jit.load(os.path.join(data_dir, 'pretrained_face.ptj'), map_location=input_device)
return net, None, input_device, output_device
def load_model(data_dir, lr):
model_dir = os.path.join(data_dir, 'model')
pca_path = os.path.join(data_dir, PCA_FILE)
n_components = N_COMPONENTS
# load PCA
pca = load_pca(pca_path, n_components)
# create network
net, input_device, output_device = create_nn(pca)
optimizer = create_optimizer(net, lr)
last_epoch = 0
checkpoints = [n for n in os.listdir(model_dir) if n.startswith('shapenet_epoch_')]
if len(checkpoints) > 0:
get_epoch = lambda x: int(re.search('\\d+', x).group(0))
checkpoints = sorted(checkpoints, key=get_epoch, reverse=True)
last_epoch_f = checkpoints[0]
print ('load saved state from ', last_epoch_f)
saved_state = torch.load(os.path.join(model_dir, last_epoch_f), map_location=input_device)
net.load_state_dict(saved_state['model'])
optimizer.load_state_dict(saved_state['optimizer'])
# for g in optimizer.param_groups:
# g['lr'] = lr
last_epoch = get_epoch(last_epoch_f)
return net, optimizer, input_device, output_device, last_epoch
def save_model(data_dir, model, optimizer, name):
model_dir = os.path.join(data_dir, 'model')
if not os.path.exists(model_dir):
os.mkdir(model_dir)
torch.save(dict(model=model.state_dict(),
optimizer=optimizer.state_dict()), os.path.join(model_dir, name))
def predict(model, img, input_device):
if len(img.shape) < 4:
data = np.array([img.reshape(1, *img.shape)])
else:
data = img
data = torch.from_numpy(data).to(input_device).to(torch.float)
model.eval()
with torch.no_grad():
preds = model(data)
return preds.cpu()[0]
def train(data_dir, train_data, val_data, lr, eval_only = False, num_epochs = TRAIN_EPOCHS):
print('start training. lr = ', lr)
net, optimizer, input_device, output_device, last_epoch = load_model(data_dir, lr)
# load data set
train_dataset = load_dataset(train_data) if not eval_only else None
val_dataset = load_dataset(val_data)
start_epoch = last_epoch
criteria = {"L1": torch.nn.L1Loss()}
metrics = {"MSE": torch.nn.MSELoss()}
if eval_only:
print('test on test set')
metric_vals, loss_vals, preds = eval(net, val_dataset, criteria, metrics, input_device, output_device)
print('val loss', loss_vals, ' metrics ', metric_vals)
else:
# train - just set the mode to 'train'
net.train()
save_freq = 1 if DEBUG_SINGLE_IMG is None else 100
# scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.5)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
mode='min', threshold=1e-4, threshold_mode='rel',
verbose=True,
factor=0.5, patience=5, cooldown=0, min_lr=0, eps=1e-8)
for epoch in range(start_epoch, start_epoch +num_epochs):
# train a single epoch
train_single_epoch(net, optimizer, criteria, train_dataset, input_device, output_device)
# save model after epoch
if (epoch + 1) % save_freq == 0 or epoch == start_epoch +num_epochs:
save_model(data_dir, net, optimizer, 'shapenet_epoch_%d.pth'% epoch)
# # test
# img = train_dataset.data[0]
# img = img.reshape(*img.shape[1:])
# lmks = predict(net, img, input_device)
# view_img(img, lmks, train_dataset.labels[0])
# validate
# print('eval at end of epoch')
metric_vals, loss_vals, preds = eval(net, val_dataset if DEBUG_SINGLE_IMG is None else train_dataset, criteria, metrics, input_device, output_device)
tqdm.write('val loss = %.2f metric = %.2f ' % (loss_vals['L1'], metric_vals['MSE']))
# TODO: save best
scheduler.step(metrics=metric_vals['MSE'])
def run_train():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--datadir",
help="Path to dataset dir",
type=str)
parser.add_argument("--pcafile",
help="pca file name",
type=str)
parser.add_argument("--evalonly", action="store_true",
help="do not train. only test on validation set",
default=False)
parser.add_argument("--learnrate", type=float, default=0.0001,
help="Learning rate")
args = parser.parse_args()
data_dir = args.datadir
evalonly = args.evalonly
lr = args.learnrate
if args.pcafile is not None:
PCA_FILE = args.pcafile
assert data_dir is not None
train_data = os.path.join(data_dir, 'labels_ibug_300W_train.npz')
val_data = os.path.join(data_dir, 'labels_ibug_300W_test.npz')
train(data_dir, train_data, val_data, lr, evalonly)
if __name__ == '__main__':
run_train()
| 40.331984 | 161 | 0.619755 | 1,337 | 9,962 | 4.387435 | 0.175767 | 0.048756 | 0.037675 | 0.050972 | 0.357484 | 0.264235 | 0.229969 | 0.177975 | 0.116604 | 0.083191 | 0 | 0.012764 | 0.268621 | 9,962 | 246 | 162 | 40.495935 | 0.792341 | 0.103192 | 0 | 0.166667 | 0 | 0 | 0.059438 | 0.00809 | 0 | 0 | 0 | 0.004065 | 0.005376 | 1 | 0.064516 | false | 0 | 0.064516 | 0.010753 | 0.172043 | 0.032258 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea6f6f1a33819b691f402357ca773b7d4d28d75e | 1,400 | py | Python | .github/workflows/remove_tracer_errors.py | wjsi/mars | a69fb19edfe748d4393b90ff2c4941a76c084596 | [
"Apache-2.0"
] | 2,413 | 2018-12-06T09:37:11.000Z | 2022-03-30T15:47:39.000Z | .github/workflows/remove_tracer_errors.py | wjsi/mars | a69fb19edfe748d4393b90ff2c4941a76c084596 | [
"Apache-2.0"
] | 1,335 | 2018-12-07T03:06:18.000Z | 2022-03-31T11:45:57.000Z | .github/workflows/remove_tracer_errors.py | wjsi/mars | a69fb19edfe748d4393b90ff2c4941a76c084596 | [
"Apache-2.0"
] | 329 | 2018-12-07T03:12:41.000Z | 2022-03-29T21:49:57.000Z | #!/usr/bin/env python
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Workaround for https://github.com/pytest-dev/pytest-cov/issues/406 :
we remove coverage files without cython tracers.
"""
import glob
import logging
import os
import sqlite3
logger = logging.getLogger(__name__)
def check_coverage_file(file_name):
try:
conn = sqlite3.connect(file_name)
tracers = list(conn.execute('SELECT * FROM tracer'))
if len(tracers) < 1:
raise ValueError('File containing no tracers')
except Exception as exc: # noqa: E722
logger.warning('Failed to resolve coverage file %s due to error %r',
file_name, exc)
os.unlink(file_name)
def main():
for cov_file in glob.glob('.coverage.*'):
check_coverage_file(cov_file)
if __name__ == '__main__':
main()
| 29.166667 | 76 | 0.702143 | 199 | 1,400 | 4.829146 | 0.61809 | 0.062435 | 0.027055 | 0.033299 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018919 | 0.207143 | 1,400 | 47 | 77 | 29.787234 | 0.846847 | 0.513571 | 0 | 0 | 0 | 0 | 0.174507 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.2 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea7026b93dd8456097d54f0545d2a9b126c0a459 | 4,695 | py | Python | stomatal_conductance_diff.py | ehultee/glacial-SPEI | 6f81db8a398fde626fc21b0c5e03ba97f131c0a0 | [
"MIT"
] | 4 | 2020-01-13T16:31:10.000Z | 2021-08-20T19:36:12.000Z | stomatal_conductance_diff.py | ehultee/glacial-SPEI | 6f81db8a398fde626fc21b0c5e03ba97f131c0a0 | [
"MIT"
] | null | null | null | stomatal_conductance_diff.py | ehultee/glacial-SPEI | 6f81db8a398fde626fc21b0c5e03ba97f131c0a0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""Quantify the effect of variable stomatal conductance
Created on Mon Jun 29 13:29:01 2020
@author: EHU
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# import sys
# sys.path.insert(0, 'Users/lizz/Documents/GitHub/glacial-SPEI')
import gSPEI as gSPEI
fpath_default = './data/SPEI_Files/'
fpath_conduct = './data/SPEI_Files/variable_stom_conduct/'
## Settings in filenames
integration_times = np.arange(3, 28, 4) # all SPEI integration times used
modelnames = ['CanESM2', 'CCSM4', 'CNRM-CM5', 'CSIRO-Mk3-6-0', 'GISS-E2-R', 'INMCM4', 'MIROC-ESM', 'NorESM1-M'] # all models used in comparison
scenarios = ['Rcp4p5', 'Rcp8p5'] # climate scenarios
## Basins in the order they are written
basin_names = ['INDUS','TARIM','BRAHMAPUTRA','ARAL SEA','COPPER','GANGES','YUKON','ALSEK','SUSITNA','BALKHASH','STIKINE','SANTA CRUZ',
'FRASER','BAKER','YANGTZE','SALWEEN','COLUMBIA','ISSYK-KUL','AMAZON','COLORADO','TAKU','MACKENZIE','NASS','THJORSA','JOEKULSA A F.',
'KUSKOKWIM','RHONE','SKEENA','OB','OELFUSA','MEKONG','DANUBE','NELSON RIVER','PO','KAMCHATKA','RHINE','GLOMA','HUANG HE','INDIGIRKA',
'LULE','RAPEL','SANTA','SKAGIT','KUBAN','TITICACA','NUSHAGAK','BIOBIO','IRRAWADDY','NEGRO','MAJES','CLUTHA','DAULE-VINCES',
'KALIXAELVEN','MAGDALENA','DRAMSELV','COLVILLE']
yrs = np.linspace(1900, 2101, num=2412)
## Read default and variable-conductance SPEI in to dicts
SPEI_by_model = {m: {} for m in modelnames} # create dictionary indexed by model name
for m in modelnames:
norunoff_f_m = fpath_default+'NRunoff_{}_{}_{}.txt'.format(integration_times[3], m, scenarios[0])
wrunoff_f_m = fpath_default+'WRunoff_{}_{}_{}.txt'.format(integration_times[3], m, scenarios[0])
SPEI_by_model[m]['NRunoff'] = np.loadtxt(norunoff_f_m)
SPEI_by_model[m]['WRunoff'] = np.loadtxt(wrunoff_f_m)
SPEI_by_model[m]['diff'] = SPEI_by_model[m]['WRunoff'] - SPEI_by_model[m]['NRunoff']
SPEI_by_model_C = {m: {} for m in modelnames} # create dictionary indexed by model name
for m in modelnames:
norunoff_f_m = fpath_conduct+'NRunoff_{}_{}_{}_Conduct.txt'.format(integration_times[3], m, scenarios[0])
wrunoff_f_m = fpath_conduct+'WRunoff_{}_{}_{}_Conduct.txt'.format(integration_times[3], m, scenarios[0])
SPEI_by_model_C[m]['NRunoff'] = np.loadtxt(norunoff_f_m)
SPEI_by_model_C[m]['WRunoff'] = np.loadtxt(wrunoff_f_m)
SPEI_by_model_C[m]['diff'] = SPEI_by_model_C[m]['WRunoff'] - SPEI_by_model_C[m]['NRunoff']
## Re-structure dictionary and create pandas DataFrames aggregated by basin
SPEI_by_basin = gSPEI.sort_models_to_basins(SPEI_by_model)
SPEI_by_basin_C = gSPEI.sort_models_to_basins(SPEI_by_model_C)
## Compute pairwise difference in glacial effect due to stomatal conductance
vcd = {b: [] for b in basin_names}
vcd_mean = []
vcd_perdiff = [] #compute percent change in glacial effect
for b in basin_names:
df = SPEI_by_basin[b]['diff']
df1 = SPEI_by_basin_C[b]['diff']
df_diff = pd.DataFrame.subtract(df1, df).mean()
perdiff = pd.Series.divide(df_diff, df.mean()) #per-model percent difference
vcd[b].append(v for v in df_diff.values) #per-model mean
vcd_mean.append(df_diff.mean()) #mean difference across models for each basin
vcd_perdiff.append(perdiff.mean())
## Compare with pairwise difference in NRunoff - does stomatal conductance matter there?
vcd_NR = {b: [] for b in basin_names}
vcd_mean_NR = []
vcd_perdiff_NR = [] #compute percent change in glacial effect
for b in basin_names:
df = SPEI_by_basin[b]['NRunoff']
df1 = SPEI_by_basin_C[b]['NRunoff']
df_diff = pd.DataFrame.subtract(df1, df).mean()
perdiff = pd.Series.divide(df_diff, df.mean()) #per-model percent difference
vcd_NR[b].append(v for v in df_diff.values) #per-model mean
vcd_mean_NR.append(df_diff.mean()) #mean difference across models for each basin
vcd_perdiff_NR.append(perdiff.mean())
## Plot histograms of difference
plt.rc('xtick', labelsize=14)
plt.rc('ytick', labelsize=14)
plt.rc('axes', titlesize=18, labelsize=16)
fig, ax = plt.subplots(1)
ax.axvline(0, ls=':', lw=3, color='Grey')
ax.hist(vcd_perdiff, alpha=0.5)
ax.set(title='SPEI glacial effect (SPEI$_W$ - SPEI$_N$)',
xlabel='Normalized difference due to stomatal conductance', ylabel='Count',
ylim=(0,20), yticks=(0, 5, 10, 15, 20))
plt.tight_layout()
plt.show()
fig1, ax1 = plt.subplots(1)
ax1.axvline(0, ls=':', lw=3, color='Grey')
ax1.hist(vcd_perdiff_NR, alpha=0.5)
ax1.set(title='SPEI$_N$ series, no runoff',
xlabel='Normalized difference due to stomatal conductance', ylabel='Count',
ylim=(0,40), yticks=(0, 10, 20, 30, 40))
plt.tight_layout()
plt.show() | 46.029412 | 143 | 0.713312 | 727 | 4,695 | 4.419532 | 0.352132 | 0.037348 | 0.04793 | 0.026144 | 0.477124 | 0.446001 | 0.422969 | 0.408341 | 0.372238 | 0.36788 | 0 | 0.024739 | 0.121832 | 4,695 | 102 | 144 | 46.029412 | 0.754548 | 0.215122 | 0 | 0.194444 | 0 | 0 | 0.247739 | 0.026309 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.055556 | 0 | 0.055556 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea708555e00d729ca30fb3ed510f82c3241dbe26 | 990 | py | Python | BlogProject/dashboard/urls.py | nayonacademy/testgit | eb1b11bab085a6b9e3d930d452fd2267fa64a8ac | [
"MIT"
] | 1 | 2019-04-27T13:35:09.000Z | 2019-04-27T13:35:09.000Z | BlogProject/dashboard/urls.py | nayonacademy/testgit | eb1b11bab085a6b9e3d930d452fd2267fa64a8ac | [
"MIT"
] | 5 | 2019-07-03T21:22:10.000Z | 2021-06-10T21:17:50.000Z | BlogProject/dashboard/urls.py | nayonacademy/testgit | eb1b11bab085a6b9e3d930d452fd2267fa64a8ac | [
"MIT"
] | 1 | 2019-09-01T17:49:39.000Z | 2019-09-01T17:49:39.000Z | from django.urls import path
from dashboard import views
urlpatterns = [
path('login', views.bloglogin, name="login"),
path('logout', views.bloglogout, name="logout"),
path('', views.dashboard, name="dashboard"),
path('posts', views.showAllpost, name='showallpost'),
path('category', views.category, name='category'),
path('category_status/<int:pk>', views.category_status, name='category_status'),
#path('category_update/<int:pk>',views.category_update,name='category_update'),
path('posts/<int:pk>', views.updatePost, name='updatePost'),
path('posts/delete/<int:pk>', views.postdelete, name='postdelete'),
path('category/delete/<int:pk>', views.category_delete, name='category_delete'),
path('category/edit/<int:pk>', views.category_edit, name='category_edit'),
path('posts/add', views.newPost, name='newpost'),
path('posts/update/<int:pk>', views.postupdate, name='postupdate'),
path('settings', views.settings, name='settings')
] | 52.105263 | 84 | 0.69798 | 122 | 990 | 5.581967 | 0.229508 | 0.051395 | 0.10279 | 0.105727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.109091 | 990 | 19 | 85 | 52.105263 | 0.772109 | 0.078788 | 0 | 0 | 0 | 0 | 0.322368 | 0.122807 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.117647 | 0 | 0.117647 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea7272fcc9127931c2bb7a5c8577988eb915bf2d | 2,779 | py | Python | wiki/routes/articles.py | sharebears/pulsar-wiki | 956d4a3a5dd256421c4e3df6227982c7f1a842b8 | [
"MIT"
] | null | null | null | wiki/routes/articles.py | sharebears/pulsar-wiki | 956d4a3a5dd256421c4e3df6227982c7f1a842b8 | [
"MIT"
] | null | null | null | wiki/routes/articles.py | sharebears/pulsar-wiki | 956d4a3a5dd256421c4e3df6227982c7f1a842b8 | [
"MIT"
] | null | null | null | import flask
from voluptuous import All, Any, Length, Range, Schema
from core import APIException
from core.utils import require_permission, validate_data
from wiki.models import (
WikiArticle,
WikiLanguage,
WikiRevision,
WikiTranslation,
)
from wiki.permissions import WikiPermissions
from . import bp
app = flask.current_app
VIEW_ARTICLE_SCHEMA = Schema({'language': All(str, Length(max=128))})
@bp.route('/wiki/articles/<int:id>', methods=['GET'])
@require_permission(WikiPermissions.VIEW)
@validate_data(VIEW_ARTICLE_SCHEMA)
def view_wiki_article(id: int, language: str):
if language:
return flask.jsonify(
WikiTranslation.from_attrs(
article_id=id,
language_id=WikiLanguage.from_language(language).id,
)
)
return flask.jsonify(
WikiArticle.from_pk(
pk=id,
_404=True,
include_dead=flask.g.user.has_permission(
WikiPermissions.VIEW_DELETED
),
)
)
CREATE_ARTICLE_SCHEMA = Schema(
{
'language': All(str, Length(max=128)),
'article_id': Any(int, None),
'title': All(str, Range(min=1, max=128)),
'contents': All(str, Length(max=1000000000)),
}
)
@bp.route('/wiki/create', methods=['POST'])
@require_permission(WikiPermissions.CREATE)
@validate_data(CREATE_ARTICLE_SCHEMA)
def create_wiki_article(
title: str, contents: str, language: str = None, article_id: int = None
):
if language and WikiArticle.is_valid(article_id):
language_id = WikiLanguage.from_language(language, error=True)
wiki = WikiTranslation.new(
article_id=article_id,
title=title,
language_id=language_id,
contents=contents,
user_id=flask.g.user.id,
)
else:
wiki = WikiArticle.new(
title=title, contents=contents, user_id=flask.g.user.id
)
return flask.jsonify(wiki)
EDIT_ARTICLE_SCHEMA = Schema(
{
'title': All(str, Range(min=1, max=128)),
'language': All(str, Length(max=128)),
'contents': All(str, Length(max=1000000000)),
}
)
@bp.route('/wiki/modify/<int:id>', methods=['PUT'])
@require_permission(WikiPermissions.EDIT)
@validate_data(EDIT_ARTICLE_SCHEMA)
def edit_wiki_article(id: int, title: str, language: str, contents: str):
wiki = WikiArticle.from_pk(id, _404=True)
if not wiki:
raise APIException(f'WikiArticle {id} does not exist.')
language_id = WikiLanguage.from_language(language, error=True)
wiki = WikiRevision.new(
article_id=id,
title=title,
language_id=language_id,
editor_id=flask.g.user.id,
contents=contents,
)
return wiki
| 27.79 | 75 | 0.645916 | 330 | 2,779 | 5.269697 | 0.230303 | 0.046578 | 0.034503 | 0.043128 | 0.320874 | 0.312823 | 0.301323 | 0.238068 | 0.172513 | 0.057504 | 0 | 0.020293 | 0.237496 | 2,779 | 99 | 76 | 28.070707 | 0.800378 | 0 | 0 | 0.211765 | 0 | 0 | 0.056855 | 0.015833 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035294 | false | 0 | 0.082353 | 0 | 0.164706 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea72ea4cb5120ff8eea0a68cb459ede6bd52335f | 1,428 | py | Python | unidef/languages/fix/fix_parser.py | qiujiangkun/unidef | 6d3ca31a6b1d498f38f483d4174f79f7fe920f65 | [
"MIT"
] | 4 | 2021-11-08T10:01:19.000Z | 2022-03-17T06:27:14.000Z | unidef/languages/fix/fix_parser.py | qiujiangkun/unidef | 6d3ca31a6b1d498f38f483d4174f79f7fe920f65 | [
"MIT"
] | null | null | null | unidef/languages/fix/fix_parser.py | qiujiangkun/unidef | 6d3ca31a6b1d498f38f483d4174f79f7fe920f65 | [
"MIT"
] | null | null | null | import json
import os
import sys
import unicodedata
import quickfix
from unidef.languages.common.type_model import *
from unidef.models.input_model import ExampleInput, InputDefinition
from unidef.parsers import Parser
from unidef.utils.loader import load_module
class FixParserImpl(Parser):
BASE_DIR = "quickfix"
def accept(self, fmt: InputDefinition) -> bool:
return (
isinstance(fmt, ExampleInput)
and fmt.format.lower().startswith("fix")
and load_module("quickfix")
)
def get_dictionary(self, version: str) -> "quickfix.DataDictionary":
if not os.path.exists(FixParserImpl.BASE_DIR):
os.system(
f"git clone https://github.com/quickfix/quickfix {FixParserImpl.BASE_DIR}"
)
return quickfix.DataDictionary(f"{FixParserImpl.BASE_DIR}/spec/{version}.xml")
def parse(self, name: str, fmt: ExampleInput) -> DyType:
content = fmt.text
dct = self.get_dictionary(fmt.format)
kvs = content.split("|")[:-1]
fields = []
for kv in kvs:
k, v = kv.split("=")
nm = dct.getFieldName(int(k), "")[0]
try:
ty = infer_type_from_example(json.loads(v))
except:
ty = Types.String
fields.append(FieldType(field_name=nm, field_type=ty))
return StructType(name=name, fields=fields)
| 32.454545 | 90 | 0.62465 | 166 | 1,428 | 5.283133 | 0.524096 | 0.04561 | 0.068415 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001907 | 0.265406 | 1,428 | 43 | 91 | 33.209302 | 0.834128 | 0 | 0 | 0 | 0 | 0 | 0.110644 | 0.063025 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081081 | false | 0 | 0.243243 | 0.027027 | 0.459459 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea7445774a76b295fdd194f73174b2b15e8d187c | 1,261 | py | Python | ironic/drivers/modules/drac/resource_uris.py | Tehsmash/ironic | a34c351639e960af92a3608fbc9249dfce5c6057 | [
"Apache-2.0"
] | null | null | null | ironic/drivers/modules/drac/resource_uris.py | Tehsmash/ironic | a34c351639e960af92a3608fbc9249dfce5c6057 | [
"Apache-2.0"
] | null | null | null | ironic/drivers/modules/drac/resource_uris.py | Tehsmash/ironic | a34c351639e960af92a3608fbc9249dfce5c6057 | [
"Apache-2.0"
] | null | null | null | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Resource URIs and helper functions for the classes implemented by the DRAC
WS-Man API.
"""
DCIM_ComputerSystem = ('http://schemas.dell.com/wbem/wscim/1/cim-schema/2'
'/DCIM_ComputerSystem')
DCIM_BootSourceSetting = ('http://schemas.dell.com/wbem/wscim/1/cim-schema/2/'
'DCIM_BootSourceSetting')
DCIM_BootConfigSetting = ('http://schemas.dell.com/wbem/wscim/1/cim-schema/2/'
'DCIM_BootConfigSetting')
DCIM_BIOSService = ('http://schemas.dell.com/wbem/wscim/1/cim-schema/2/'
'DCIM_BIOSService')
DCIM_LifecycleJob = ('http://schemas.dell.com/wbem/wscim/1/cim-schema/2/'
'DCIM_LifecycleJob')
| 38.212121 | 78 | 0.689136 | 171 | 1,261 | 5.023392 | 0.48538 | 0.069849 | 0.087311 | 0.104773 | 0.24447 | 0.24447 | 0.24447 | 0.24447 | 0.24447 | 0.24447 | 0 | 0.013889 | 0.200634 | 1,261 | 32 | 79 | 39.40625 | 0.838294 | 0.483743 | 0 | 0 | 0 | 0 | 0.547468 | 0.06962 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea750b3ff66d5229162942f5b58896a7afa175fc | 6,774 | py | Python | tests/test_reco_ranking.py | vishalbelsare/jurity | 8061d4cc3b1b13bafdb13e9dd2367c8ad941fa0e | [
"Apache-2.0"
] | 18 | 2021-02-01T23:51:00.000Z | 2022-01-19T15:41:32.000Z | tests/test_reco_ranking.py | vishalbelsare/jurity | 8061d4cc3b1b13bafdb13e9dd2367c8ad941fa0e | [
"Apache-2.0"
] | 11 | 2021-02-08T20:19:07.000Z | 2022-02-09T22:55:49.000Z | tests/test_reco_ranking.py | vishalbelsare/jurity | 8061d4cc3b1b13bafdb13e9dd2367c8ad941fa0e | [
"Apache-2.0"
] | 6 | 2021-04-27T17:17:53.000Z | 2021-09-02T22:50:58.000Z | # -*- coding: utf-8 -*-
# Copyright FMR LLC <opensource@fidelity.com>
# SPDX-License-Identifier: Apache-2.0
import unittest
import numpy as np
import pandas as pd
from jurity.recommenders import RankingRecoMetrics
from jurity.recommenders.ndcg import idcg
from jurity.utils import Constants
class TestRankingRecommenders(unittest.TestCase):
def test_ndcg(self):
# First, test the IDCG value
idcg_val = idcg(3)
self.assertEqual(1. / np.log2(2) + 1. / np.log2(3) + 1. / np.log2(4), idcg_val)
# Then, test NDCG
# User 1 has items 1, 4, 2 relevant and was recommended items 1, 3, 2
# User 2 checks for no relevant items, shouldn't contribute to the metric
# User 3 checks for no recommendations, is 0
# User 4 has items 1, 2 relevant and was recommended items 3, 4; is 0
actual = pd.DataFrame({Constants.user_id: [1, 1, 1, 1, 3, 4, 4],
Constants.item_id: [1, 2, 3, 4, 3, 1, 2],
'click': [1, 1, 0, 1, 1, 1, 1]})
predicted = pd.DataFrame({Constants.user_id: [1, 1, 1, 2, 4, 4],
Constants.item_id: [1, 2, 3, 3, 3, 4],
'click': [0.8, 0.7, 0.75, 0.7, 0.6, 0.4]})
metric = RankingRecoMetrics.NDCG(click_column='click', k=3)
results = metric.get_score(actual, predicted, return_extended_results=True)
self.assertEqual(((1. / np.log2(2) + 1. / np.log2(4)) / idcg_val) / 3, results['ndcg'])
self.assertEqual(3, results['support'])
def test_precision(self):
# User 1 was recommended items 1, 3, 2 and has items 1, 4 relevant
# User 2 and 3 check for no relevant items
# User 4 checks for no recommendations
actual = pd.DataFrame({Constants.user_id: [1, 1, 1, 3, 4],
Constants.item_id: [1, 2, 4, 1, 3],
'click': [1, 0, 1, 0, 1]})
predicted = pd.DataFrame({Constants.user_id: [1, 1, 1, 2, 3],
Constants.item_id: [1, 2, 3, 3, 1],
'click': [0.8, 0.7, 0.75, 0.7, 0.5]})
metric = RankingRecoMetrics.Precision(click_column='click', k=2)
results = metric.get_score(actual, predicted, return_extended_results=True)
self.assertEqual(0.5, results['precision'])
self.assertEqual(1, results['support'])
precision_3 = RankingRecoMetrics.Precision(click_column='click', k=3)
result_3 = precision_3.get_score(actual, predicted)
self.assertEqual(1. / 3, result_3)
def test_precision_at_max_recs(self):
"""Tests Precision@k for the case when all users have exactly k recommendations.
When all users have exactly k recommendations,
there isn't an extra ``user_id`` index generated when sorting for the largest ``k`` scores.
"""
actual = pd.DataFrame({Constants.user_id: [0], Constants.item_id: [0], 'click': [True]})
predicted = pd.DataFrame(
{Constants.user_id: [0, 0, 0], Constants.item_id: [0, 1, 2], 'click': [0, -1, -2]})
self.assertEqual(1., RankingRecoMetrics.Precision('click', k=1).get_score(actual, predicted))
self.assertEqual(0.5, RankingRecoMetrics.Precision('click', k=2).get_score(actual, predicted))
self.assertEqual(1. / 3, RankingRecoMetrics.Precision('click', k=3).get_score(actual, predicted))
def test_map(self):
# User 1 got items 1,3,2,4 as recommendations. Items 1 and 4 are relevant.
# User 2 checks for no relevant items
# user 3 checks for no recommendations
actual = pd.DataFrame({Constants.user_id: [1, 1, 1, 3],
Constants.item_id: [1, 2, 4, 3],
'click': [1, 0, 1, 1]})
predicted = pd.DataFrame({Constants.user_id: [1, 1, 1, 1, 2],
Constants.item_id: [1, 2, 3, 4, 3],
'click': [0.8, 0.7, 0.75, 0.65, 0.7]})
metric = RankingRecoMetrics.MAP(click_column='click', k=2)
results = metric.get_score(actual, predicted, return_extended_results=True)
self.assertEqual(0.5, results['map'])
self.assertEqual(1, results['support'])
map_3 = RankingRecoMetrics.MAP(click_column='click', k=3).get_score(actual, predicted)
self.assertEqual(0.5, map_3)
map_4 = RankingRecoMetrics.MAP(click_column='click', k=4).get_score(actual, predicted)
self.assertEqual(0.75, map_4)
def test_recall(self):
# User 1 has items 1, 4, 2 relevant and was recommended items 1, 3, 2, so they should be included in the support
# User 2 checks for no relevant items, so they shouldn't be included in the support
# User 3 & 4 checks for no recommendations, but they should be included in the support
actual = pd.DataFrame({Constants.user_id: [1, 1, 1, 1, 3, 4],
Constants.item_id: [1, 2, 3, 4, 3, 1],
'click': [1, 1, 0, 1, 1, 1]})
predicted = pd.DataFrame({Constants.user_id: [1, 1, 1, 2],
Constants.item_id: [1, 2, 3, 3],
'click': [0.8, 0.7, 0.75, 0.7]})
metric = RankingRecoMetrics.Recall(click_column='click', k=2)
results = metric.get_score(actual, predicted, return_extended_results=True)
self.assertEqual(1. / 9, results['recall'])
self.assertEqual(3, results['support'])
recall_3 = RankingRecoMetrics.Recall(click_column='click', k=3).get_score(actual, predicted)
self.assertEqual(2. / 9, recall_3)
def test_recall_at_max_recs(self):
"""Tests Recall@k for the case when all users have exactly k recommendations.
When all users have exactly k recommendations, there isn't an extra ``user_id`` index generated when sorting
for the largest ``k`` scores.
"""
actual = pd.DataFrame({Constants.user_id: [0, 0],
Constants.item_id: [1, 2],
'click': [True, True]})
predicted = pd.DataFrame({Constants.user_id: [0, 0, 0, 0],
Constants.item_id: [0, 1, 2, 3],
'click': [0, -1, -2, -3]})
self.assertEqual(0., RankingRecoMetrics.Recall('click', k=1).get_score(actual, predicted))
self.assertEqual(0.5, RankingRecoMetrics.Recall('click', k=2).get_score(actual, predicted))
self.assertEqual(1., RankingRecoMetrics.Recall('click', k=3).get_score(actual, predicted))
self.assertEqual(1., RankingRecoMetrics.Recall('click', k=4).get_score(actual, predicted))
| 48.385714 | 120 | 0.584145 | 926 | 6,774 | 4.184665 | 0.12635 | 0.013935 | 0.054194 | 0.089032 | 0.762581 | 0.691613 | 0.601032 | 0.526452 | 0.509677 | 0.481032 | 0 | 0.061119 | 0.285061 | 6,774 | 139 | 121 | 48.733813 | 0.739005 | 0.206082 | 0 | 0.1 | 0 | 0 | 0.034906 | 0 | 0 | 0 | 0 | 0 | 0.25 | 1 | 0.075 | false | 0 | 0.075 | 0 | 0.1625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea7943645920f75c8961373d4218f344eba12a7e | 8,816 | py | Python | lidar_getdist.py | AdroitAnandAI/ADAS-Collision-Avoidance-System-on-Indian-Roads | 31c233a086b7b0cabcd527b51f98029cb9a58e5c | [
"CC0-1.0"
] | 2 | 2022-01-04T19:41:59.000Z | 2022-01-15T13:51:59.000Z | lidar_getdist.py | AdroitAnandAI/ADAS-Collision-Avoidance-System-on-Indian-Roads | 31c233a086b7b0cabcd527b51f98029cb9a58e5c | [
"CC0-1.0"
] | null | null | null | lidar_getdist.py | AdroitAnandAI/ADAS-Collision-Avoidance-System-on-Indian-Roads | 31c233a086b7b0cabcd527b51f98029cb9a58e5c | [
"CC0-1.0"
] | null | null | null | """
The getObjectDistance (angle_min, angle_max) function will get the
distance of LIDAR points between angle_min and angle_max. Then it takes
the median of distances and return it. This function is used as a
supporting method for the ADAS system after bounding box detection of
objects. The x_min and x_max of bounding boxes are used to compute the
angle_min and angle_max which is passed in to the getObjectDistance() in
this module to estimate the object distance.
The code is extensively modified from the base code provided by ADAFruit.
Adafruit invests time and resources providing this open source code.
Please support Adafruit and open source hardware by purchasing
products from Adafruit!
Written by Dave Astels for Adafruit Industries
Copyright (c) 2019 Adafruit Industries
Licensed under the MIT license.
All text above must be included in any redistribution.
"""
import os
from math import cos, sin, pi, floor
import math
import paho.mqtt.client as mqtt
import pygame
from adafruit_rplidar import RPLidar
import numpy as np
# Screen width & height
W = 640
H = 480
SCAN_BYTE = b'\x20'
SCAN_TYPE = 129
import ledshim
import colorsys
import time
from sys import exit
def make_gaussian(fwhm):
x = np.arange(0, ledshim.NUM_PIXELS, 1, float)
y = x[:, np.newaxis]
x0, y0 = 3.5, (ledshim.NUM_PIXELS / 2) - 0.5
fwhm = fwhm
gauss = np.exp(-4 * np.log(2) * ((x - x0) ** 2 + (y - y0) ** 2) / fwhm ** 2)
return gauss
def flashLight():
print('inside flashlight')
for _ in range(10):
ledshim.set_clear_on_exit()
for z in list(range(1, 10)[::-1]) + list(range(1, 10)):
fwhm = 15.0 / z
gauss = make_gaussian(fwhm)
print('after make gaussian')
start = time.time()
y = 4
for x in range(ledshim.NUM_PIXELS):
h = 0.5
s = 1.0
v = gauss[x, y]
rgb = colorsys.hsv_to_rgb(h, s, v)
r, g, b = [int(255.0 * i) for i in rgb]
ledshim.set_pixel(x, r, g, b)
ledshim.show()
end = time.time()
t = end - start
if t < 0.04:
time.sleep(0.04 - t)
# Setup the RPLidar
PORT_NAME = '/dev/ttyUSB0'
lidar = RPLidar(None, PORT_NAME)
# used to scale data to fit on the screen
max_distance = 0
#pylint: disable=redefined-outer-name,global-statement
def process_data(data):
global max_distance
lcd.fill((0,0,0))
point = ( int(W / 2) , int(H / 2) )
pygame.draw.circle(lcd,pygame.Color(255, 255, 255),point,10 )
pygame.draw.circle(lcd,pygame.Color(100, 100, 100),point,100 , 1 )
pygame.draw.line( lcd,pygame.Color(100, 100, 100) , ( 0, int(H/2)),( W , int(H/2) ) )
pygame.draw.line( lcd,pygame.Color(100, 100, 100) , ( int(W/2),0),( int(W/2) , H ) )
for angle in range(360):
distance = data[angle]
if distance > 0: # ignore initially ungathered data points
max_distance = max([min([5000, distance]), max_distance])
radians = angle * pi / 180.0
x = distance * cos(radians)
y = distance * sin(radians)
point = ( int(W / 2) + int(x / max_distance * (W/2)), int(H/2) + int(y / max_distance * (H/2) ))
pygame.draw.circle(lcd,pygame.Color(255, 0, 0),point,2 )
pygame.display.update()
scan_data = [0]*360
def _process_scan(raw):
'''Processes input raw data and returns measurment data'''
new_scan = bool(raw[0] & 0b1)
inversed_new_scan = bool((raw[0] >> 1) & 0b1)
quality = raw[0] >> 2
if new_scan == inversed_new_scan:
raise RPLidarException('New scan flags mismatch')
check_bit = raw[1] & 0b1
if check_bit != 1:
raise RPLidarException('Check bit not equal to 1')
angle = ((raw[1] >> 1) + (raw[2] << 7)) / 64.
distance = (raw[3] + (raw[4] << 8)) / 4.
return new_scan, quality, angle, distance
def lidar_measurments(self, max_buf_meas=500):
lidar.set_pwm(800)
status, error_code = self.health
cmd = SCAN_BYTE
self._send_cmd(cmd)
dsize, is_single, dtype = self._read_descriptor()
if dsize != 5:
raise RPLidarException('Wrong info reply length')
if is_single:
raise RPLidarException('Not a multiple response mode')
if dtype != SCAN_TYPE:
raise RPLidarException('Wrong response data type')
while True:
raw = self._read_response(dsize)
self.log_bytes('debug', 'Received scan response: ', raw)
if max_buf_meas:
data_in_buf = self._serial_port.in_waiting
if data_in_buf > max_buf_meas*dsize:
self.log('warning',
'Too many measurments in the input buffer: %d/%d. '
'Clearing buffer...' %
(data_in_buf//dsize, max_buf_meas))
self._serial_port.read(data_in_buf//dsize*dsize)
yield _process_scan(raw)
def lidar_scans(self, max_buf_meas=500, min_len=5):
scan = []
iterator = lidar_measurments(lidar,max_buf_meas)
for new_scan, quality, angle, distance in iterator:
if new_scan:
if len(scan) > min_len:
yield scan
scan = []
if quality > 0 and distance > 0:
scan.append((quality, angle, distance))
def getObjectDistance (angle_min, angle_max):
minDist = 0
lidar = RPLidar(None, PORT_NAME)
try:
for scan in lidar_scans(lidar):
for (_, angle, distance) in scan:
# print("Angle = " + str(angle) + "distance == " + str(distance))
scan_data[min([359, floor(angle)])] = distance
# fetching all non zero distance values between subtended angles
allDists = [scan_data[i] for i in range(360)
if i >= angle_min and i <= angle_max and scan_data[i] > 0]
# if half the distance values are filled in then break
if (2 * len(allDists) > angle_max - angle_min):
minDist = np.median(allDists)
lidar.stop()
lidar.disconnect()
return minDist
except KeyboardInterrupt:
print('Stopping LIDAR Scan')
def roundtoTen(x):
return int(math.ceil(x/10.0)) * 10
def on_connect(client, userdata, flags, rc):
print("Connected with result code " + str(rc))
client.subscribe("object/getdistance")
def on_message(client, userdata, msg):
# print(msg.payload.decode())
word = msg.payload.decode()
# objAttributes contains label,
# theta min and max separated by |
objAttributes = word.split('|')
now = time.localtime()
if (now.tm_min * 60 + now.tm_sec - int(objAttributes[3]) >= 1):
return
theta1 = float(objAttributes[1])
theta2 = float(objAttributes[2])
dist = getObjectDistance(int(theta1) + 90 + 59, int(theta2) + 90 + 59)
# convert distance from mm to cms
dist = round(float (dist / 1000), 1)
# print('The distance to the object = ' + str(dist))
theta_mid = int((theta1 + theta2) / 2)
print(' Object is at an angle of ' + str(theta_mid))
# if near then announce an alert!
# Passing the hue value on MQTT. 0 = Red. 0.3 = Green
if (dist < 2.0):
# print('setting alert msg')
announceText = "ALERT ALERT "
client.publish("object/flashlight", "0.0")
else:
announceText = ""
client.publish("object/flashlight", "0.3")
announceText = announceText + str(objAttributes[0]) + ' at ' + str(dist) + ' meters. '
# theta_mid can vary from 0 to 62 degrees
if theta_mid > 40:
# print('Right Side')
os.system(
'espeak \"' + announceText + str (roundtoTen(abs(theta_mid - 31))) + ' degrees right\"')
elif theta_mid < 21:
# print('Left Side')
os.system(
'espeak \"' + announceText + str (roundtoTen(abs(31 - theta_mid))) + ' degrees left\"')
else:
# theta_mid will be > 20 and < 40, if here
if theta_mid < 30:
direction = ' Right '
else:
direction = ' Left '
# Alert to slide to opposite direction at theta + 30 degrees
os.system(
'espeak \"' + announceText + 'Slide ' + direction + str(abs(roundtoTen(abs(31 - theta_mid)) + 30)) + 'degrees \"')
client = mqtt.Client()
client.connect("localhost", 1883, 600)
client.on_connect = on_connect
client.on_message = on_message
try:
client.loop_forever()
# To catch SigINT
except KeyboardInterrupt:
client.disconnect()
| 32.29304 | 126 | 0.590177 | 1,182 | 8,816 | 4.298646 | 0.30203 | 0.015745 | 0.011809 | 0.007085 | 0.133045 | 0.054123 | 0.046448 | 0.046448 | 0.028341 | 0 | 0 | 0.04135 | 0.297754 | 8,816 | 272 | 127 | 32.411765 | 0.779357 | 0.199864 | 0 | 0.080925 | 0 | 0 | 0.075367 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057803 | false | 0 | 0.063584 | 0.00578 | 0.150289 | 0.028902 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea7a789bf54899bee965eb9f86c7971e0d351407 | 683 | py | Python | y_md5_check.py | wholetonegames/panda3d-ness-rpg | 8d81e8418c1bc20706b5b3f4c0631fe9bd76a65e | [
"MIT"
] | 1 | 2021-02-01T03:09:28.000Z | 2021-02-01T03:09:28.000Z | y_md5_check.py | wholetonegames/panda3d-ness-rpg | 8d81e8418c1bc20706b5b3f4c0631fe9bd76a65e | [
"MIT"
] | null | null | null | y_md5_check.py | wholetonegames/panda3d-ness-rpg | 8d81e8418c1bc20706b5b3f4c0631fe9bd76a65e | [
"MIT"
] | null | null | null | import hashlib
from panda3d.core import (VirtualFileSystem,
Multifile,
Filename)
def md5_sum(filename, blocksize=65536):
hash = hashlib.md5()
with open(filename, "rb") as f:
for block in iter(lambda: f.read(blocksize), b""):
hash.update(block)
return hash.hexdigest()
def md5_contents(filename):
m = Multifile()
m.openRead(filename)
payload = []
for i in range(m.getNumSubfiles()):
payload.append(m.get_subfile_name(i))
m.close()
return payload
if __name__ == "__main__":
m = 'assets.mf'
print(md5_sum(m))
for x in md5_contents(m):
print(x)
| 22.766667 | 58 | 0.58858 | 84 | 683 | 4.619048 | 0.559524 | 0.030928 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022774 | 0.292826 | 683 | 29 | 59 | 23.551724 | 0.780538 | 0 | 0 | 0 | 0 | 0 | 0.027818 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.086957 | 0 | 0.26087 | 0.086957 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea7aef1361e4141fb4b0e7ffddc4e2eb42d9db89 | 3,142 | py | Python | contrib/philipsPM2830.py | coburnw/pm2800-ivi | a45d06392f042d0fad444c0ad8eae6aaf457c2c6 | [
"MIT"
] | null | null | null | contrib/philipsPM2830.py | coburnw/pm2800-ivi | a45d06392f042d0fad444c0ad8eae6aaf457c2c6 | [
"MIT"
] | 2 | 2017-05-13T20:42:37.000Z | 2017-05-18T16:23:12.000Z | contrib/philipsPM2830.py | coburnw/pm2800-ivi | a45d06392f042d0fad444c0ad8eae6aaf457c2c6 | [
"MIT"
] | null | null | null | """
Python Interchangeable Virtual Instrument Library
philipsPM2830.py
Copyright (c) 2017 Coburn Wightman
Derived from rigolDP800.py
Copyright (c) 2013-2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .philipsPM2800 import *
class philipsPM2830(philipsPM2800):
"Philips/Fluke Linear PM2830 series IVI DC power supply driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', '')
super(philipsPM2830, self).__init__(*args, **kwargs)
# PM283x series comes in singles and doubles only
self._output_count = 2
# Available output modules for the PM283x series
self._output_spec = [
{
'range': {
'P8V': (8.0, 15.0)
},
'ovp_max': 10.0,
'ocp_max': 15.1,
'voltage_max': 8.0,
'current_max': 15.0
},
{
'range': {
'P60V': (60.0, 2.0)
},
'ovp_max': 62.0,
'ocp_max': 2.1,
'voltage_max': 60.0,
'current_max': 2.0
},
{
'range': {
'P120V': (120.0, 1.0)
},
'ovp_max': 122.0,
'ocp_max': 1.1,
'voltage_max': 120.0,
'current_max': 1.0
}
]
#self._memory_size = 10
self._identity_description = "Philips/Fluke Autoranging PM2810 series IVI DC power supply driver"
self._identity_identifier = ""
self._identity_revision = ""
self._identity_vendor = ""
self._identity_instrument_manufacturer = "Philips NV"
self._identity_instrument_model = ""
self._identity_instrument_firmware_revision = ""
self._identity_specification_major_version = 3
self._identity_specification_minor_version = 0
self._identity_supported_instrument_models = ['PM2831', 'PM2832']
self._init_outputs()
| 33.784946 | 105 | 0.608211 | 368 | 3,142 | 5.019022 | 0.491848 | 0.06497 | 0.01137 | 0.017325 | 0.030319 | 0.030319 | 0 | 0 | 0 | 0 | 0 | 0.054318 | 0.314449 | 3,142 | 92 | 106 | 34.152174 | 0.803157 | 0.436983 | 0 | 0.06383 | 0 | 0 | 0.163826 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.021277 | false | 0 | 0.021277 | 0 | 0.06383 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea7cdf26d83245cbf22e50ca7c85be1f39449377 | 2,397 | py | Python | bot.py | Sean-C-Casey/DATA552-Project | 3732bef4262b23b99eed9953fe1f254ba73af3b0 | [
"MIT"
] | null | null | null | bot.py | Sean-C-Casey/DATA552-Project | 3732bef4262b23b99eed9953fe1f254ba73af3b0 | [
"MIT"
] | null | null | null | bot.py | Sean-C-Casey/DATA552-Project | 3732bef4262b23b99eed9953fe1f254ba73af3b0 | [
"MIT"
] | null | null | null | import logging
import discord
from discord.ext import commands, tasks
from settings import BOT_TOKEN, LOGGER_NAME, LOG_FILE, UPDATE_INTERVAL
from reddit import RedditConnection
class NotificationCog(commands.Cog):
def __init__(self, bot: commands.Bot) -> None:
self.bot = bot
self.reddit: RedditConnection
self.logger = logging.getLogger(LOGGER_NAME)
# Since RedditConnection must be awaited, which we can't do in constructor
@commands.Cog.listener()
async def on_ready(self):
self.reddit = await RedditConnection.get_connection()
self.monitor_news.start()
@commands.command(name="ping")
async def test(self, ctx: commands.Context) -> None:
log_msg = f"Pinged by user {ctx.author}"
self.logger.info(log_msg)
print(log_msg)
await ctx.send("Hello!")
@commands.command(name="get-news")
async def get_news(self, ctx: commands.Context) -> None:
results = await self.reddit.get_top_n(3)
result = results[0]
msg = f"{result.url}\n\nReddit discussion: https://www.reddit.com{result.permalink}"
await ctx.send(msg)
@tasks.loop(hours=UPDATE_INTERVAL)
async def monitor_news(self) -> None:
server: discord.Guild = self.bot.get_guild(952745969391386625)
if server is None:
self.logger.error("Could not find notification server")
print("WTF!")
channel: discord.TextChannel = discord.utils.get(server.channels, name="dev")
results = await self.reddit.get_top_n(3)
result = results[0]
msg = f"{result.url}\n\nReddit discussion: https://www.reddit.com{result.permalink}"
await channel.send(msg)
@monitor_news.before_loop
async def before_check(self):
await self.bot.wait_until_ready()
self.logger.info("Beginning news monitoring loop")
if __name__ == "__main__" and BOT_TOKEN is not None:
logger = logging.getLogger(LOGGER_NAME)
logger.setLevel(logging.INFO)
log_handler = logging.FileHandler(
filename=LOG_FILE,
encoding="utf-8",
mode="w"
)
log_handler.setFormatter(
logging.Formatter('%(asctime)s | %(name)s | %(levelname)s: %(message)s')
)
logger.addHandler(log_handler)
bot = commands.Bot("!")
cog = NotificationCog(bot)
bot.add_cog(cog)
bot.run(BOT_TOKEN) | 34.73913 | 92 | 0.66166 | 306 | 2,397 | 5.039216 | 0.388889 | 0.02594 | 0.018158 | 0.036316 | 0.220493 | 0.145266 | 0.145266 | 0.145266 | 0.145266 | 0.145266 | 0 | 0.012352 | 0.223196 | 2,397 | 69 | 93 | 34.73913 | 0.815789 | 0.030038 | 0 | 0.105263 | 0 | 0.035088 | 0.142857 | 0.018933 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017544 | false | 0 | 0.087719 | 0 | 0.122807 | 0.035088 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea811c96e15046967f303b3383a008dda7865f80 | 8,402 | py | Python | morse-stf/unittest/conv2d_test.py | alipay/Antchain-MPC | f6916465e1da5722ca7efadc4eeaca13ec229707 | [
"Apache-2.0"
] | 33 | 2021-11-23T09:04:03.000Z | 2022-03-14T07:56:31.000Z | morse-stf/unittest/conv2d_test.py | qizhi-zhang/Antchain-MPC | f551170f68b0baff328e6594484e9832230fe719 | [
"Apache-2.0"
] | null | null | null | morse-stf/unittest/conv2d_test.py | qizhi-zhang/Antchain-MPC | f551170f68b0baff328e6594484e9832230fe719 | [
"Apache-2.0"
] | 6 | 2021-11-25T12:38:41.000Z | 2022-02-23T03:29:51.000Z | #!/usr/bin/env python
# coding=utf-8
import tensorflow as tf
import numpy as np
# 加载动态库
model = tf.load_op_library('/Users/jiaofuzhang/morse-stf/cops/_stf_int64conv2D_macos.so')
p_model = tf.load_op_library('/Users/jiaofuzhang/morse-stf/cops/_stf_int64pooling_macos.so')
# ST_model = tf.load_op_library('/Users/jiaofuzhang/morse-stf/cops/_std_pooling_macos.so')
from tensorflow.python.util.compat import collections_abc
def conv2d_forward():
"""
测试conv2d前向计算过程
:return:
"""
# 生成int64 tensor
input = tf.constant(value=[i for i in range(28 * 28)], shape=[1, 28, 28, 1], dtype=tf.int64)
filter = tf.constant(value=[i % 11 for i in range(5 * 5 * 32)], shape=[5, 5, 1, 32], dtype=tf.int64)
filter2 = tf.constant(value=[i % 17 for i in range(5 * 5 * 32 * 64)], shape=[5, 5, 32, 64], dtype=tf.int64)
# 将数据转化为float类型测试数据结果
f_input = tf.cast(input, dtype=tf.float64)
f_filter = tf.cast(filter, dtype=tf.float64)
f_filter2 = tf.cast(filter2, dtype=tf.float64)
print(filter)
# 测试自定义op
op = model.int64_conv2d(input, filter, strides=[1, 1, 1, 1], padding="SAME")
op = tf.nn.max_pool(op, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
op = model.int64_conv2d(op, filter2, strides=[1, 1, 1, 1], padding="SAME")
op = tf.nn.max_pool(op, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
# print(op)
# 标准Conv2D操作
fop = tf.nn.conv2d(f_input, f_filter, strides=[1, 1, 1, 1], padding='SAME')
fop = tf.nn.max_pool(fop, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
fop = tf.nn.conv2d(fop, f_filter2, strides=[1, 1, 1, 1], padding='SAME')
fop = tf.nn.max_pool(fop, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# print(fop)
# 比较两个输出结果是否相同
fop_c = tf.cast(fop, dtype=tf.int64)
op = op.numpy()
fop_c = fop_c.numpy()
if (fop_c == op).all():
print('conv2d forward success')
else:
print("conv2d forward fail")
def conv2d_backprop_input():
# 卷积核
kernel = tf.constant(
[
[[[3]], [[4]]],
[[[5]], [[6]]]
]
, tf.float32
)
# 某一函数针对sigma的导数
partial_sigma = tf.constant(
[
[
[[-1], [1], [3]],
[[2], [-2], [-4]],
[[-3], [4], [1]]
]
]
, tf.float32
)
# 针对未知变量的导数的方向计算
partial_x = tf.compat.v1.nn.conv2d_backprop_input((1, 3, 3, 1), kernel, partial_sigma, [1, 1, 1, 1], 'SAME')
# print(partial_x)
# 转化为int64位的操作
int64_kernel = tf.cast(kernel, dtype=tf.int64)
int64_partial_sigma = tf.cast(partial_sigma, dtype=tf.int64)
op_back_input = model.int64_conv2d_backprop_input(input_sizes=(1, 3, 3, 1),
filter=int64_kernel,
strides=[1, 1, 1, 1],
padding='SAME'
)
# print(op_back_input)
# 比较两个输出结果是否相同
partial_x_to_INT64 = tf.cast(partial_x, dtype=tf.int64)
# 转化为numpy
op_back_input = op_back_input.numpy()
partial_x_to_INT64 = partial_x_to_INT64.numpy()
if (op_back_input == partial_x_to_INT64).all():
print("Backprop Input success!")
else:
print("Backprop Input fail!")
def conv2d_backprop_filter():
# 张量
x = tf.constant(
[
[
[[1], [2], [3]],
[[4], [5], [6]],
[[7], [8], [9]]
]
]
, tf.float32
)
# 某一函数针对sigma的导数
partial_sigma = tf.constant(
[
[
[[-1], [-2], [1]],
[[-3], [-4], [2]],
[[-2], [1], [3]]
]
]
, tf.float32
)
# 针对未知变量的导数的方向计算
partial_sigma_k = tf.compat.v1.nn.conv2d_backprop_filter(x, (2, 2, 1, 1), partial_sigma, [1, 1, 1, 1], 'SAME')
# print(partial_sigma_k)
# 转化为INT64位操作
INT64_x = tf.cast(x, dtype=tf.int64)
INT64_partial_sigma = tf.cast(partial_sigma, dtype=tf.int64)
# 加载动态库
op_back_filter = model.int64_conv2d_backprop_filter(INT64_x, (2, 2, 1, 1), INT64_partial_sigma, [1, 1, 1, 1],
'SAME')
# print(op_back_filter)
# 比较结果是否一样
partial_sigma_k_Int64 = tf.cast(partial_sigma_k, dtype=tf.int64)
# 转化为Numpy
partial_sigma_k_Int64 = partial_sigma_k_Int64.numpy()
op_back_filter = op_back_filter.numpy()
if (op_back_filter == partial_sigma_k_Int64).all():
print("Backprop filter success!")
else:
print("Backprop filtert fail!")
def _get_sequence(value, n, channel_index, name):
"""Formats a value input for gen_nn_ops."""
if value is None:
value = [1]
elif not isinstance(value, collections_abc.Sized):
value = [value]
current_n = len(value)
if current_n == n + 2:
return value
elif current_n == 1:
value = list((value[0],) * n)
elif current_n == n:
value = list(value)
else:
raise ValueError("{} should be of length 1, {} or {} but was {}".format(
name, n, n + 2, current_n))
if channel_index == 1:
return [1, 1] + value
else:
return [1] + value + [1]
def average_pooling_op():
# 生成int64 tensor
input = tf.constant(value=[-i for i in range(4 * 4)], shape=[1, 4, 4, 1], dtype=tf.int64)
# print(input)
# 将数据转化为float类型测试数据结果
f_input = tf.cast(input, dtype=tf.float64)
print(f_input.shape)
channel_index = 3
ksize = _get_sequence(2, 2, channel_index, "ksize")
strides = _get_sequence(2, 2, channel_index, "strides")
# 测试标准结果
out = tf.nn.avg_pool2d(f_input, ksize=2, strides=2, padding="VALID")
print(f_input)
# float_out = ST_model.float64_avg_pool(f_input, ksize=ksize, strides=strides, padding="VALID")
# exit()
conv_out = tf.nn.conv2d(f_input, filters=np.ones([2, 2, 1, 1])/2/2, strides=strides, padding="VALID")
print("比较conv2d和avgpool")
# print(out)
# print(conv_out)
# print(conv_out)
# 测试自定义op
n_out = p_model.int64_avg_pool(input, ksize=ksize, strides=strides, padding="VALID")
print(n_out)
print(conv_out)
print("比较梯度")
# 测试梯度
n_grad = p_model.int64_avg_pool_grad(orig_input_shape=[1, 4, 4, 1], grad=n_out, ksize=ksize, strides=strides,
padding="VALID",data_format = "NHWC")
print(n_grad)
conv_grad = tf.compat.v1.nn.conv2d_backprop_input(
input_sizes=(1, 4, 4, 1),
filter=np.ones([2, 2, 1, 1])/2/2,
out_backprop=conv_out,
strides=strides,
padding="VALID"
)
print(tf.cast(conv_grad, dtype=tf.int64))
# print(n_grad.shape)
# print(n_grad)
# print(out)
# print(n_out)
# exit()
# 测试结果是否相同
out = tf.cast(out, dtype=tf.int64)
out = out.numpy()
n_out = n_out.numpy()
# print(out)
# print(n_out)
if (n_out == out).all():
print("hi")
else:
print("cry")
def cal_sum_pooling():
strides = [1, 1, 1, 1]
ksize = [1, 2, 2, 1]
x = tf.random.uniform(shape=[1, 3, 3, 1], minval=0, maxval=1 << 62, dtype='int64')
print(x)
y = p_model.int64_avg_pool(x, ksize=ksize, strides=strides, padding="VALID")
print(y)
exit()
plosspy1 = tf.random.uniform(shape=[1, 2, 2, 1], minval=0, maxval=1 << 62, dtype='int64')
plosspy2 = tf.random.uniform(shape=[1, 2, 2, 1], minval=0, maxval=1 << 62, dtype='int64')
plosspy = plosspy2+plosspy1
plosspx = p_model.sum_pool_grad(orig_input_shape=[1, 3, 3, 1], grad=plosspy,
ksize=ksize, strides=strides, padding="VALID", data_format="NHWC")
plosspx1 = p_model.sum_pool_grad(orig_input_shape=[1, 3, 3, 1], grad=plosspy1,
ksize=ksize, strides=strides, padding="VALID", data_format="NHWC")
plosspx2 = p_model.sum_pool_grad(orig_input_shape=[1, 3, 3, 1], grad=plosspy2,
ksize=ksize, strides=strides, padding="VALID", data_format="NHWC")
y = plosspx1+plosspx2
print(plosspx)
print(y)
# print(plosspx/4)
if __name__ == "__main__":
# conv2d_forward()
# conv2d_backprop_input()
# conv2d_backprop_filter()
# average_pooling_op()
cal_sum_pooling()
| 32.565891 | 114 | 0.569626 | 1,164 | 8,402 | 3.919244 | 0.151203 | 0.014467 | 0.011837 | 0.009645 | 0.460324 | 0.396317 | 0.364095 | 0.311048 | 0.268523 | 0.188733 | 0 | 0.063076 | 0.277315 | 8,402 | 257 | 115 | 32.692607 | 0.688241 | 0.108665 | 0 | 0.136905 | 0 | 0 | 0.062492 | 0.016062 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0 | 0.017857 | 0 | 0.071429 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea815721844ae83bf39a44a67475aa7016bff91d | 3,749 | py | Python | core/captcha.py | hvnobug/12306_ticket | 111f77adf4da0cfa3101413e282258914973e0d6 | [
"MIT"
] | 6 | 2021-01-07T09:47:26.000Z | 2022-02-21T17:32:04.000Z | core/captcha.py | hvnobug/12306_ticket | 111f77adf4da0cfa3101413e282258914973e0d6 | [
"MIT"
] | 2 | 2021-01-07T09:51:52.000Z | 2021-01-12T05:44:19.000Z | core/captcha.py | hvnobug/12306_ticket | 111f77adf4da0cfa3101413e282258914973e0d6 | [
"MIT"
] | 2 | 2021-01-06T08:38:48.000Z | 2021-01-06T09:58:26.000Z | import sys
import time
import requests
from selenium.common.exceptions import WebDriverException
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
import config
from core import browser
from utils import console
class Captcha:
def _get_captcha(self):
time.sleep(0.5)
# 获取验证码图片
self._code_img_ele = browser.find_element_by_id('J-loginImg')
base64_img = self._code_img_ele.get_attribute('src')
self._code_img = base64_img[len('data:image/jpg;base64,'):]
def code_captcha(self):
# 获取验证码图片
self._get_captcha()
# 获取打码坐标
points = self._parse_captcha()
# 开始打码
self._verify_captcha(points)
time.sleep(0.5)
def _verify_captcha(self, points):
# 遍历列表,使用动作链对每一个列表元素对应的x,y指定的位置进行点击操作
for i in range(len(points) // 2):
ActionChains(browser).move_to_element_with_offset(
self._code_img_ele,
float(points[i * 2]),
float(points[i * 2 + 1])
).click().perform()
time.sleep(1)
def _parse_captcha(self):
# 这里也可以使用自建打码服务器
code_url = f'{config.code_server["scheme"]}://{config.code_server["host"]}{config.code_server["path"]}'
data = {"imageFile": self._code_img}
count = 0
while count < 10:
count += 1
try:
resp = requests.post(code_url, data=data)
if resp.status_code is 200:
resp_json = resp.json()
if resp_json and resp_json.get("code") is 0:
return code_xy(resp_json.get("data")).split(',')
except Exception as e:
print(e)
console.print(f'[red]打码失败[/red] - [{count}]')
sys.exit(-1)
def code_xy(select):
"""
根据打码服务器返回的 offset 获取页面中真实的坐标
"""
post = []
offsets_x = 0 # 选择的答案的left值,通过浏览器点击8个小图的中点得到的,这样基本没问题
offsets_y = 0 # 选择的答案的top值
for offset in select:
if offset == '1':
offsets_y = 77
offsets_x = 40
elif offset == '2':
offsets_y = 77
offsets_x = 112
elif offset == '3':
offsets_y = 77
offsets_x = 184
elif offset == '4':
offsets_y = 77
offsets_x = 256
elif offset == '5':
offsets_y = 149
offsets_x = 40
elif offset == '6':
offsets_y = 149
offsets_x = 112
elif offset == '7':
offsets_y = 149
offsets_x = 184
elif offset == '8':
offsets_y = 149
offsets_x = 256
else:
pass
post.append(offsets_x)
post.append(offsets_y)
rand_code = str(post).replace(']', '').replace('[', '').replace("'", '').replace(' ', '')
console.print(f":thumbs_up: 验证码识别成功: [{rand_code}]")
return rand_code
# 滑块验证码
def slider_captcha():
try_time = 1
div = browser.find_el_if_exist('nc_1_n1z', by=By.ID)
while div and div.is_displayed():
if try_time > 10:
console.print('滑块验证码验证失败 . . .', style='bold red')
sys.exit(-1)
# 动作链
action = ActionChains(browser)
# 点击长按指定的标签
action.click_and_hold(div)
# 处理滑动模块
for i in range(5):
# perform()立即执行动作链操作
# move_by_offset(x,y):x水平方向 y竖直方向
try:
action.move_by_offset(350, 0).perform() # 速度为30mm
except WebDriverException:
time.sleep(0.5)
time.sleep(0.5)
action.release()
try_time += 1
div = browser.find_el_if_exist('nc_1_n1z', by=By.ID)
| 29.992 | 111 | 0.541744 | 443 | 3,749 | 4.374718 | 0.329571 | 0.04128 | 0.02838 | 0.022704 | 0.159959 | 0.044376 | 0.044376 | 0.044376 | 0.044376 | 0.044376 | 0 | 0.039543 | 0.345692 | 3,749 | 124 | 112 | 30.233871 | 0.75051 | 0.064817 | 0 | 0.265306 | 0 | 0 | 0.073073 | 0.031933 | 0 | 0 | 0 | 0 | 0 | 1 | 0.061224 | false | 0.010204 | 0.091837 | 0 | 0.183673 | 0.040816 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea84f86b6a675c94029f8a4da5d63ad472188324 | 4,588 | py | Python | mujoco/toy_env.py | mrbermell/seed_rl | 9562e178fb8c16d2551d9e5d59594a7f908655dd | [
"Apache-2.0"
] | 733 | 2019-10-14T11:38:22.000Z | 2022-03-24T14:55:50.000Z | mujoco/toy_env.py | mrbermell/seed_rl | 9562e178fb8c16d2551d9e5d59594a7f908655dd | [
"Apache-2.0"
] | 76 | 2019-10-30T14:18:17.000Z | 2021-12-10T11:52:15.000Z | mujoco/toy_env.py | mrbermell/seed_rl | 9562e178fb8c16d2551d9e5d59594a7f908655dd | [
"Apache-2.0"
] | 141 | 2019-10-14T11:38:25.000Z | 2022-02-27T10:36:56.000Z | # coding=utf-8
# Copyright 2019 The SEED Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Toy environments for sanity testing algorithms."""
import collections
import gym
import numpy as np
import tensorflow as tf
class ToyEnv(gym.Env):
"""Environment in which we need to output observations."""
def __init__(self, horizon=3, n_actions=3):
"""Initialize environment.
Args:
horizon: number of timesteps the observations have to be remembered.
n_actions: dimensionality of actions.
"""
self.horizon = horizon
self.observation_space = gym.spaces.Box(-np.inf, np.inf, [n_actions+1])
self.action_space = gym.spaces.Box(-1, 1, [n_actions])
def _get_obs(self):
self._obs = np.random.uniform(
-1, 1, size=self.observation_space.shape[0] - 1).astype(np.float32)
return np.concatenate([self._obs, [0.]], axis=0).astype(np.float32)
def step(self, action):
assert self.action_space.contains(np.clip(action, -1, 1))
self.t += 1
reward = -float(sum((action-self._obs)**2))
return self._get_obs(), reward, self.t >= self.horizon, None
def reset(self):
self.t = 0
return self._get_obs()
def render(self):
pass
class ToyMemoryEnv(gym.Env):
"""Environment in which we need to output observations from previous steps."""
def __init__(self, horizon=3, n_actions=3):
"""Initialize environment.
Args:
horizon: number of timesteps we need to retain observations in memory.
n_actions: dimensionality of actions.
"""
self.horizon = horizon
self.n_actions = n_actions
self.observation_space = gym.spaces.Box(-np.inf, np.inf, [n_actions+1])
self.action_space = gym.spaces.Box(-1, 1, [n_actions])
def _get_obs(self):
if self.t < self.horizon:
return np.concatenate([self.memory[self.t], [0.]],
axis=0).astype(np.float32)
else:
return np.zeros(self.n_actions+1, dtype=np.float32)
def step(self, action):
assert self.action_space.contains(action)
if self.t == 2*self.horizon:
return np.zeros(self.n_actions+1), 0., True, None
if self.t < self.horizon:
reward = float(0.)
else:
reward = -float(sum((action-self.memory[self.t - self.horizon])**2))
self.t += 1
return self._get_obs(), reward, False, None
def reset(self):
self.memory = np.random.uniform(
-1, 1, size=(self.horizon, self.n_actions)).astype(np.float32)
self.t = 0
return self._get_obs()
def render(self):
pass
class BitFlippingEnv(gym.GoalEnv):
"""Goal-based environment in which actions correspond to switching bits.
Based on https://arxiv.org/pdf/1707.01495.pdf.
"""
def __init__(self, n_bits=10, horizon=20):
self._n_bits = n_bits
self._horizon = horizon
self.observation_space = gym.spaces.Dict(
achieved_goal=gym.spaces.Box(low=0, high=1, shape=[n_bits]),
desired_goal=gym.spaces.Box(low=0, high=1, shape=[n_bits]),
observation=gym.spaces.Box(low=0, high=1, shape=[horizon + 1]))
self.action_space = gym.spaces.Discrete(n_bits + 1)
def reset(self):
self.state = np.random.randint(2, size=self._n_bits).astype(np.float32)
self.goal = np.random.randint(2, size=self._n_bits).astype(np.float32)
self.t = 0
return self._get_obs()
def _get_obs(self):
obs = {'achieved_goal': self.state.copy(),
'desired_goal': self.goal.copy(),
'observation': tf.one_hot(self.t, self._horizon + 1).numpy()}
assert self.observation_space.contains(obs)
return collections.OrderedDict(sorted(obs.items()))
def step(self, action):
assert self.action_space.contains(action)
if action != self._n_bits:
self.state[action] = 1 - self.state[action]
self.t += 1
reward = self.compute_reward(self.state, self.goal)
return self._get_obs(), reward, self.t >= self._horizon, {}
def compute_reward(self, achieved_goal, desired_goal, info=None):
return tf.clip_by_value(tf.reduce_sum(
-tf.cast(achieved_goal != desired_goal, tf.float32), axis=-1), -1, 0)
| 33.246377 | 80 | 0.677637 | 682 | 4,588 | 4.431085 | 0.258065 | 0.023163 | 0.027796 | 0.031767 | 0.486764 | 0.439775 | 0.420582 | 0.38683 | 0.362674 | 0.303772 | 0 | 0.023681 | 0.190061 | 4,588 | 137 | 81 | 33.489051 | 0.789559 | 0.249564 | 0 | 0.439024 | 0 | 0 | 0.010711 | 0 | 0 | 0 | 0 | 0 | 0.04878 | 1 | 0.182927 | false | 0.02439 | 0.04878 | 0.012195 | 0.414634 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea8a75ef222f061a5abf08802cb3bf6ecd4b58ea | 20,325 | py | Python | dataset_generator.py | Xtuden-com/si-score | d9460c772a6b8fd4871d9ed95bb77633334f3802 | [
"Apache-2.0"
] | 17 | 2020-09-29T15:25:17.000Z | 2022-03-29T10:27:56.000Z | dataset_generator.py | Xtuden-com/si-score | d9460c772a6b8fd4871d9ed95bb77633334f3802 | [
"Apache-2.0"
] | 1 | 2022-01-16T14:14:16.000Z | 2022-01-16T14:48:35.000Z | dataset_generator.py | isabella232/si-score | abd52d68c4d1aacf2f1bb7d32ed0ff18b0781529 | [
"Apache-2.0"
] | 3 | 2020-11-16T12:51:21.000Z | 2022-01-16T11:50:34.000Z | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Class to generate synthetic dataset.
This is used in `generator_script.py`.
Editing that script is probably the easiest way to generate
a synthetic dataset.
Example of how to use this class:
```
import dataset_generator as synthetic
import tensorflow.io.gfile as gfile
from os import path
config = {
'coord': [(0.0, 0.0), (0.5, 0.0), (0.0, 0.5), (0.5, 0.5)],
'area': [0.2],
'rotation': [0],
'bg_resolution': [(500, 500)],
}
dataset_name = 'test'
dataset_dir = './test/'
new_dataset_dir = path.join(dataset_dir, dataset_name, '')
if not gfile.exists(new_dataset_dir):
gfile.makedirs(new_dataset_dir)
dataset = synthetic.Dataset(
config=config,
new_dataset_dir=new_dataset_dir,
num_bgs_per_fg_instance=2)
dataset.generate_dataset()
```
"""
import csv
import functools
import io
import itertools
from multiprocessing import pool
import operator
from os import path
import label_str_to_imagenet_classes as label_dict
import numpy as np
import PIL
from PIL import Image
import tensorflow.io.gfile as gfile
ROOT_DIR = '.'
DEFAULT_FG_DIR = path.join(ROOT_DIR, 'foregrounds', '')
DEFAULT_BG_DIR = path.join(ROOT_DIR, 'backgrounds', '')
def load_image(fname):
with open(fname, mode='rb') as f:
s = f.read()
image = Image.open(io.BytesIO(s))
image.load()
return image
def validate_config(config):
"""Checks config file has all required fields.
Raises an error if this is not the case.
Args:
config: dict.
Returns:
config: dict. (unchanged)
"""
if 'area' not in config.keys():
raise ValueError('Must specify area.')
if 'coord' not in config.keys():
raise ValueError('Must specify coordinates `coord`.')
if 'rotation' not in config.keys():
raise ValueError('Must specify rotation angle `rotation`.')
if 'bg_resolution' not in config.keys():
raise ValueError('Must specify bg_resolution.')
bg_res = config['bg_resolution']
if not isinstance(bg_res, list) or not isinstance(bg_res[0], tuple) or len(
bg_res[0]) != 2:
raise TypeError('bg_resolution should be a list of tuples (width, height).')
return config
def resize_fg(fg, bg, fg_target_size):
"""Resizes foregrounds to `fg_target_size`% of the background area."""
# Resize foreground to have area = fg_size**2 * background_area.
fg_copy = fg.copy()
fg_area = fg.height * fg.width
bg_area = bg.width * bg.height
fg_area_ratio = fg_area / bg_area
resize_factor = np.sqrt(fg_target_size / fg_area_ratio)
fg_copy = fg_copy.resize(
(int(fg.width * resize_factor), int(fg.height * resize_factor)),
PIL.Image.BILINEAR)
return fg_copy
def paste_fg_on_bg(fg, bg, x_coord, y_coord):
"""Pastes foreground on background at offset (x_coord, y_coord).
x_coord, y_coord are floats in range [0, 1].
Args:
fg: foreground image of type PIL image. Examples of PIL image types include
PIL.PngImagePlugin.PngImageFile and PIL.JpegImagePlugin.JpegImageFile.
bg: background image of type PIL image.
x_coord: float in range [0, 1]. x-coord offset from top left, for pasting
foreground.
y_coord: float in range [0, 1]. y-coord offset from top left, for pasting
foreground.
Returns:
Background: PIL image (e.g. type PIL.JpegImagePlugin.JpegImageFile).
"""
bg_copy = bg.copy()
bg_copy.paste(fg, box=(x_coord, y_coord), mask=fg)
return bg_copy
def resize_bg(bg, tgt_width, tgt_height):
"""Resizes bg to width = tgt_width, height = tgt_height."""
return bg.resize((tgt_width, tgt_height), PIL.Image.BILINEAR)
def crop_image_to_square(img):
"""Crops image to the largest square that fits inside img.
Crops from the top left corner.
Args:
img: image of type PIL image, e.g. PIL.JpegImagePlugin.JpegImageFile.
Returns:
Square image of same type as input image.
"""
side_length = min(img.height, img.width)
return img.crop((0, 0, side_length, side_length))
def calc_top_left_coordinates(fg, bg, x_coord, y_coord):
"""Returns coordinates of top left corner of object.
Input coordinates are coordinates of centre of object scaled in the range
[0, 1].
Args:
fg: PIL image. Foreground image.
bg: PIL image. Background image.
x_coord: central x-coordinate of foreground object scaled between 0 and 1.
0 = leftmost coordinate of image, 1 = rightmost coordinate of image.
y_coord: central y-coordinate of foreground object scaled between 0 and 1.
0 = topmost coordinate of image, 1 = bottommost coordinate of image.
"""
x_coord = int(x_coord * bg.width)
y_coord = int(y_coord * bg.height)
# x_coord, y_coord should be at the centre of the object.
x_coord_start = int(x_coord - fg.width*0.5)
y_coord_start = int(y_coord - fg.height*0.5)
return x_coord_start, y_coord_start
def calc_pct_inside_image(fg, bg, x_coord_start, y_coord_start):
"""Calculate the percentage of the object that is inside the image.
This calculation is based on the bounding box of the object
as opposed to object pixels.
Args:
fg: PIL image. Foreground image.
bg: PIL image. Background image.
x_coord_start: leftmost x-coordinate of foreground object.
y_coord_start: topmost y-coordinate of foreground object.
Returns:
Float between 0.0 and 1.0 inclusive, indicating the percentage of the
object that is in the image.
"""
x_coord_end = x_coord_start + fg.width
y_coord_end = y_coord_start + fg.height
x_obj_start = max(x_coord_start, 0)
x_obj_end = min(x_coord_end, bg.width)
y_obj_start = max(y_coord_start, 0)
y_obj_end = min(y_coord_end, bg.height)
object_area = fg.width * fg.height
area_inside_image = (x_obj_end - x_obj_start) * (y_obj_end - y_obj_start)
pct_inside_image = area_inside_image / object_area
return pct_inside_image
def generate_instance_tuples(num_per_class_list):
"""Generate list of tuples [(class_index, instance_index)...]."""
num_classes = len(num_per_class_list)
class_and_instance_indices = []
for i in range(num_classes):
num_instances = num_per_class_list[i]
class_and_instance_indices.extend([(i, j) for j in range(num_instances)])
return class_and_instance_indices
def rotate_image(img, rotation_angle):
"""Rotate image by rotation_angle counterclockwise."""
return img.rotate(
rotation_angle, resample=PIL.Image.BICUBIC, expand=True)
def write_backgrounds_csv(new_dataset_dir, backgrounds_dir):
"""Write CSV mapping background ints to bg filenames."""
bg_filenames = gfile.glob(path.join(backgrounds_dir, '*'))
bg_filenames = [fname.split('/')[-1] for fname in bg_filenames]
csv_filepath = path.join(new_dataset_dir, 'backgrounds.csv')
with open(csv_filepath, 'w') as f:
writer = csv.writer(f)
writer.writerow(['int', 'label'])
for i, fname in enumerate(bg_filenames):
writer.writerow([i, fname])
class Dataset:
"""Generates 2.5D synthetic data and saves it to `new_dataset_dir`.
Use the `Dataset.generate_dataset()` method to generate the dataset and
save it to `new_dataset_dir`.
"""
def __init__(self,
config,
new_dataset_dir,
foregrounds_dir=DEFAULT_FG_DIR,
backgrounds_dir=DEFAULT_BG_DIR,
num_bgs_per_fg_instance=2,
min_pct_inside_image=0.95):
"""Initialise dataset.
Args:
config: ConfigDict.
Sample config: {'coords':[0.5, 0.5],
'area':[0.5],
'rotation':[0],
'bg_resolution:(1000,1000)}
new_dataset_dir: string, directory to save images in.
foregrounds_dir: string, path to foregrounds directory.
directory has form `foregrounds_dir/{class_name}/{image.jpg}`.
backgrounds_dir: string, path to backgrounds directory.
directory has form `backgrounds_dir/{bg_name}.jpg`.
num_bgs_per_fg_instance: int, number of background images sampled to be
combined with each foreground object instance. Max value = number of
backgrounds provided.
min_pct_inside_image: float in [0, 1], minimum percentage of object that
needs to be inside the image. If a generated image
does not meet this criteria, it is not included.
"""
self.config = validate_config(config)
self.new_dataset_dir = new_dataset_dir
self.num_bgs_per_fg_instance = num_bgs_per_fg_instance
self.min_pct_inside_image = min_pct_inside_image
self.bg_sizes = self.config['bg_resolution'] # width, height
self.multiple_background_resolutions = False
if len(self.bg_sizes) > 1:
self.multiple_background_resolutions = True
self.metadata_filepath = path.join(new_dataset_dir, 'metadata.csv')
self.metadata_header = [
'image_id', 'x_coord', 'y_coord', 'area', 'rotation',
'foreground_class', 'foreground_instance', 'background',
'bg_resolution_width', 'bg_resolution_height', 'pct_inside_image'
]
self._thread_pool = pool.ThreadPool(100)
self._load_foregrounds(foregrounds_dir)
self._load_backgrounds(backgrounds_dir)
self._make_root_class_dirs()
write_backgrounds_csv(self.new_dataset_dir, backgrounds_dir)
def _generate_image_metadata(self):
"""Generates image metadata as cartesian product of attributes.
Uses self.config and sets metadata as self.image_metadata.
"""
fgs_bgs = self._generate_fg_bg_instance_tuples()
config_lists = [self.config['coord'],
self.config['area'],
self.config['rotation'],
fgs_bgs,
self.config['bg_resolution']
]
image_metadata = itertools.product(*config_lists)
self.num_images = functools.reduce(operator.mul, map(len, config_lists), 1)
self.image_metadata = []
for i, row in enumerate(image_metadata):
temp_dict = {'id': i,
'x_coord': row[0][0],
'y_coord': row[0][1],
'area': row[1],
'rotation': row[2],
'fg_class': row[3][0][0],
'fg_instance': row[3][0][1],
'bg_instance': row[3][1],
'bg_resolution': row[4]}
self.image_metadata.append(temp_dict)
def _preprocess_background(self, bg):
"""Crops background to a square and resizes background if applicable.
Only resizes background if exactly one background resolution is provided
in the config.
Args:
bg: background of type PIL image, e.g. PIL.JpegImagePlugin.JpegImageFile.
Returns:
Resized background of type PIL.Image.Image.
"""
bg = crop_image_to_square(bg)
# If only one bg size is given [(width, height)]
if not self.multiple_background_resolutions:
if bg.width != self.bg_sizes[0][0] or bg.height != self.bg_sizes[0][1]:
bg = bg.resize((self.bg_sizes[0][0], self.bg_sizes[0][1]),
PIL.Image.BILINEAR)
return bg
def _load_foregrounds(self, foregrounds_dir):
"""Loads foregrounds from a directory.
Args:
foregrounds_dir: path to directory containing foregrounds.
Directory of the form `foregrounds_dir`/$OBJECT_CLASS/$FILE_NAME.
Produces:
self.fg_classes: a list of names of foreground object classes, e.g.
['ambulance', 'bagel', ...]
self.num_fgs_per_class: a dict of the form {foreground_obj_class_name:
num_fgs_in_that_class}
self.fgs: a list of the form [fg0, fg1, ...] where the foregrounds are
`PIL.PngImagePlugin.PngImageFile`s.
self.fgs_dict: a dict of the form {fg_class_name: [img0, img1, ...]} where
the images are `PIL.PngImagePlugin.PngImageFile`s.
"""
if not gfile.exists(foregrounds_dir):
raise ValueError(
f'Foregrounds directory {foregrounds_dir} does not exist.')
fg_fnames = gfile.glob(path.join(foregrounds_dir, '*/*'))
fg_labels = [x.split('/')[-2] for x in fg_fnames] # e.g. 'car', 'cow'
self.fg_classes = sorted(list(set(fg_labels)))
self.num_fgs_per_class = {
fg_class: len(gfile.glob(path.join(foregrounds_dir, fg_class, '*')))
for fg_class in self.fg_classes
}
self.num_fgs_per_class_list = [
self.num_fgs_per_class[fg_class] for fg_class in self.fg_classes
]
self.fgs = self._thread_pool.map(load_image, fg_fnames)
self.fgs_dict = {fg_class: [] for fg_class in self.fg_classes}
for i, label in enumerate(fg_labels):
self.fgs_dict[label].append(self.fgs[i])
print('Foregrounds loaded.')
def _load_backgrounds(self, backgrounds_dir):
"""Loads backgrounds from a directory.
Args:
backgrounds_dir: path to directory containing foregrounds.
Dir of the form `backrounds_dir`/$BACKGROUND_TYPE/$FILE_NAME.
Produces:
self.bgs: a list of the form [bg0, bg1, ...] where the backgrounds
are `PIL.Image.Image`s.
self.num_bgs: int, number of backgrounds.
"""
if not gfile.exists(backgrounds_dir):
raise ValueError(
f'Backgrounds directory {backgrounds_dir} does not exist.')
bg_fnames = gfile.glob(path.join(backgrounds_dir, '*'))
self.bgs = self._thread_pool.map(load_image, bg_fnames)
self.bgs = self._thread_pool.map(self._preprocess_background, self.bgs)
self.num_bgs = len(self.bgs)
print('Backgrounds loaded.')
def _make_root_class_dirs(self):
"""Make dataset root dir and subdir for each class."""
if not gfile.exists(self.new_dataset_dir):
gfile.makedirs(self.new_dataset_dir)
for class_name in self.fg_classes:
class_dir = path.join(self.new_dataset_dir, class_name, '')
if not gfile.exists(class_dir):
gfile.mkdir(class_dir)
def _generate_and_write_images_and_metadata(self, batch_size=128):
"""Generate and save images and write metadata for a dataset."""
metadata_batch = []
for i, single_image_metadata in enumerate(self.image_metadata):
metadata_batch.append(single_image_metadata)
if (i + 1) % batch_size == 0:
self._generate_and_write_images_and_metadata_batch(metadata_batch)
metadata_batch = []
self._generate_and_write_images_and_metadata_batch(metadata_batch)
def _generate_and_write_images_and_metadata_batch(self, metadata_batch):
"""Generate and save images and write metadata for a batch of images."""
metadata_batch = self._thread_pool.map(self._generate_and_write_image,
metadata_batch)
self._write_batch_metadata(metadata_batch)
def _generate_fg_bg_instance_tuples(self):
"""Generate tuples with fg and bg instances to describe imgs to generate.
Each tuple is ((fg_class_int, fg_instance_int), bg_instance_int).
Returns:
List of such tuples.
"""
fg_tuples = generate_instance_tuples(self.num_fgs_per_class_list)
fg_bg_tuples = []
for fg_tuple in fg_tuples:
bgs = np.random.choice(
self.num_bgs, self.num_bgs_per_fg_instance, replace=False)
fg_bg_tuples.extend([(fg_tuple, bg) for bg in bgs])
return fg_bg_tuples
def generate_dataset(self, batch_size=128):
"""Generate and write synthetic dataset and associated metadata."""
self._generate_image_metadata()
self._write_metadata_header()
self._write_foreground_classes_csv()
self._write_foreground_classes_imagenet_ints_csv()
self._generate_and_write_images_and_metadata(batch_size=batch_size)
print('Dataset generated.')
def _generate_and_write_image(self, image_metadata):
"""Generate image from metadata and write to dataset directory."""
fg_class, fg_instance = image_metadata['fg_class'], image_metadata[
'fg_instance']
bg_num = image_metadata['bg_instance']
x_coord, y_coord = image_metadata['x_coord'], image_metadata['y_coord']
fg_tgt_area = image_metadata['area']
rotation_angle = image_metadata['rotation']
bg_resolution = image_metadata['bg_resolution']
fg = self.fgs_dict[self.fg_classes[fg_class]][fg_instance]
bg = self.bgs[bg_num]
if self.multiple_background_resolutions:
bg = resize_bg(bg, bg_resolution[0], bg_resolution[1])
fg = resize_fg(fg, bg, fg_tgt_area) # fg_target_size, uses background size
fg = rotate_image(fg, rotation_angle)
x_coord_start, y_coord_start = calc_top_left_coordinates(
fg, bg, x_coord, y_coord)
pct_inside_image = calc_pct_inside_image(fg, bg, x_coord_start,
y_coord_start)
if pct_inside_image < self.min_pct_inside_image:
return None
pct_inside_image = round(pct_inside_image, 4)
image = paste_fg_on_bg(fg, bg, x_coord_start, y_coord_start)
# write image to directory
image_id = image_metadata['id']
label = self.fg_classes[fg_class]
file_path = '{}/{}/{}.jpg'.format(self.new_dataset_dir, label, image_id)
with open(file_path, 'wb') as f:
try:
image.save(f)
except TypeError:
print('Failed to generate image num {}: fg: {} {}, bg: {}'.format(
image_id, fg_class, fg_instance, bg_num))
print('Problem is likely that one of the foreground or background '
'images has an incompatible format. Loading and saving them as '
'JPG images using PIL may solve this problem.')
image_metadata.update(
{'pct_inside_image': pct_inside_image})
return image_metadata
def _write_metadata_header(self):
"""Writes metadata header to self.metadata_filepath."""
with open(self.metadata_filepath, 'w') as f:
writer = csv.writer(f)
writer.writerow(self.metadata_header)
def _write_batch_metadata(self, batch_metadata):
"""Appends batch of metadata to self.metadata_filepath.
Args:
batch_metadata: list of lists. Each list is of the form
[(x_coord, y_coord), area, rotation_angle, ((foreground_class_int),
foreground_instance_int), background_instance_int),
(bg_width, bg_height)],
or is `None` if pct_inside_image < min_pct_inside_image.
"""
with open(self.metadata_filepath, 'a') as f:
writer = csv.writer(f)
for row in batch_metadata:
# `row` is None if pct_inside_image is not large enough
if row is not None:
csv_row = [row['id']] # image ID, int
csv_row.extend([row['x_coord'], row['y_coord']])
csv_row.extend([row['area'], row['rotation']]) # area, rotation
csv_row.extend(
[row['fg_class'], row['fg_instance'], row['bg_instance']])
bg_resolution = row['bg_resolution']
csv_row.extend([bg_resolution[0], bg_resolution[1]])
csv_row.extend([row['pct_inside_image']])
writer.writerow(csv_row)
def _write_foreground_classes_csv(self):
"""Write CSV that lists foreground class names and integer indices.
Indices are those used in metadata.
"""
csv_filepath = path.join(self.new_dataset_dir,
'foreground_classes_metadata_indices.csv')
with open(csv_filepath, 'w') as f:
writer = csv.writer(f)
writer.writerow(['int', 'label'])
for i, label in enumerate(self.fg_classes):
writer.writerow([i, label])
def _write_foreground_classes_imagenet_ints_csv(self):
"""Write CSV that lists foreground class names and integer indices.
Indices are those used in metadata.
"""
csv_filepath = path.join(self.new_dataset_dir, 'foreground_classes.csv')
with open(csv_filepath, 'w') as f:
writer = csv.writer(f)
writer.writerow(['int', 'label'])
for label in self.fg_classes:
index = label_dict.label_str_to_imagenet_classes[label]
writer.writerow([index, label])
| 35.909894 | 80 | 0.688708 | 2,919 | 20,325 | 4.543679 | 0.132922 | 0.013572 | 0.020584 | 0.009048 | 0.293976 | 0.222122 | 0.158109 | 0.125236 | 0.098997 | 0.090402 | 0 | 0.008648 | 0.209151 | 20,325 | 565 | 81 | 35.973451 | 0.816474 | 0.360738 | 0 | 0.0681 | 0 | 0 | 0.094948 | 0.004892 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09319 | false | 0 | 0.043011 | 0 | 0.189964 | 0.017921 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea8bb38f6967b2f47dba7a59f05c5a33fb7946c4 | 3,033 | py | Python | Driver/ScriptDriver.py | isikhar/xia2 | b81a98ab5debfa10f0fceb413ee7701f44d21470 | [
"BSD-3-Clause"
] | null | null | null | Driver/ScriptDriver.py | isikhar/xia2 | b81a98ab5debfa10f0fceb413ee7701f44d21470 | [
"BSD-3-Clause"
] | null | null | null | Driver/ScriptDriver.py | isikhar/xia2 | b81a98ab5debfa10f0fceb413ee7701f44d21470 | [
"BSD-3-Clause"
] | null | null | null | import os
import subprocess
from xia2.Driver.DefaultDriver import DefaultDriver
from xia2.Driver.DriverHelper import script_writer
class ScriptDriver(DefaultDriver):
def __init__(self):
super(ScriptDriver, self).__init__()
self._script_command_line = []
self._script_standard_input = []
self._script_name = self._name
self._script_status = 0
# this is opened by the close() method and read by output
# from self._script_name.xout
self._output_file = None
def reset(self):
DefaultDriver.reset(self)
self._script_command_line = []
self._script_standard_input = []
self._script_status = 0
# this is opened by the close() method and read by output
# from self._script_name.xout
self._output_file = None
def set_name(self, name):
"""Set the name to something sensible."""
self._script_name = name
def start(self):
"""This is pretty meaningless in terms of running things through
scripts..."""
for c in self._command_line:
self._script_command_line.append(c)
def check(self):
"""NULL overloading of the default check method."""
return True
def _input(self, record):
self._script_standard_input.append(record)
def _output(self):
return self._output_file.readline()
def _status(self):
return self._script_status
def close(self):
"""This is where most of the work will be done - in here is
where the script itself gets written and run, and the output
file channel opened when the process has finished..."""
script_writer(
self._working_directory,
self._script_name,
self._executable,
self._script_command_line,
self._working_environment,
self._script_standard_input,
)
if os.name == "posix":
pipe = subprocess.Popen(
["bash", "%s.sh" % self._script_name], cwd=self._working_directory
)
else:
pipe = subprocess.Popen(
["%s.bat" % self._script_name], cwd=self._working_directory, shell=True
)
self._script_status = pipe.wait()
# at this stage I should read the .xstatus file to determine if the
# process has indeed finished - though it should have done...
try:
xstatus_file = os.path.join(
self._working_directory, "%s.xstatus" % self._script_name
)
self._script_status = int(open(xstatus_file, "r").read())
except Exception:
# this could happen on windows if the program in question
# is a batch file...
self._script_status = 0
self._output_file = open(
os.path.join(self._working_directory, "%s.xout" % self._script_name), "r"
)
def kill(self):
"""This is meaningless..."""
pass
| 28.083333 | 87 | 0.603033 | 361 | 3,033 | 4.806094 | 0.33795 | 0.132565 | 0.072622 | 0.048415 | 0.268012 | 0.253602 | 0.253602 | 0.175216 | 0.175216 | 0.175216 | 0 | 0.002384 | 0.308605 | 3,033 | 107 | 88 | 28.345794 | 0.824988 | 0.236729 | 0 | 0.180328 | 0 | 0 | 0.017272 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.163934 | false | 0.016393 | 0.065574 | 0.032787 | 0.295082 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea8ef424f4f7a85c4d9fe1a9796285adae6fe144 | 24,490 | py | Python | card_box.py | thierry7100/BoxGenerator | 0142f67508887d725f63b194212ccb86e390768c | [
"MIT"
] | 1 | 2019-11-27T08:39:19.000Z | 2019-11-27T08:39:19.000Z | card_box.py | thierry7100/BoxGenerator | 0142f67508887d725f63b194212ccb86e390768c | [
"MIT"
] | null | null | null | card_box.py | thierry7100/BoxGenerator | 0142f67508887d725f63b194212ccb86e390768c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf8
# We will use the inkex module with the predefined Effect base class.
import inkex
# The simplestyle module provides functions for style parsing.
import simplestyle
import math
objStyle = simplestyle.formatStyle(
{'stroke': '#000000',
'stroke-width': 0.1,
'fill': 'none'
})
class inkcape_path:
def __init__(self, Offset, group, Label=None):
self.offsetX = Offset[0]
self.offsetY = Offset[1]
self.Path = ''
self.group = group
self.Label = Label
def MoveTo(self, x, y):
#Return string 'M X Y' where X and Y are updated values from parameters
self.Path += ' M ' + str(round(x-self.offsetX, 3)) + ',' + str(round(y-self.offsetY, 3))
def LineTo(self, x, y):
#Return string 'L X Y' where X and Y are updated values from parameters
self.Path += ' L ' + str(round(x-self.offsetX, 3)) + ',' + str(round(y-self.offsetY, 3))
def LineToRel(self, x, y):
#Return string 'L X Y' where X and Y are updated values from parameters
self.Path += ' l ' + str(round(x, 3)) + ',' + str(round(y, 3))
def LineToHRel(self, x):
#Return string 'h X' where X are updated values from parameters
self.Path += ' h ' + str(round(x, 3))
def LineToVRel(self, y):
#Return string 'v Y' where X and Y are updated values from parameters
self.Path += ' v ' + str(round(y, 3))
def Line(self, x1, y1, x2, y2):
#Return string M X1 Y1 L X2 Y2
self.Path += ' L ' + str(round(x1-self.offsetX, 3)) + ',' + str(round(y1-self.offsetY, 3)) + ' L ' + str(round(x2-self.offsetX, 3)) + ',' + str(round(y2-self.offsetY, 3))
def LineRel(self, x1, y1, x2, y2):
#Return string M X1 Y1 L X2 Y2
self.Path += ' l ' + str(round(x1, 3)) + ',' + str(round(y1, 3)) + ' L ' + str(round(x2, 3)) + ',' + str(round(y2, 3))
def Bezier(self, xc1, yc1, xc2, yc2, x, y):
#Return string C XC1 YC1 XC2 YC2 X Y
self.Path += ' C ' + str(round(xc1-self.offsetX, 3)) + ',' + str(round(yc1-self.offsetY, 3)) + ' ' + str(round(xc2-self.offsetX, 3)) + ',' + str(round(yc2-self.offsetY, 3))+ ' ' + str(round(x-self.offsetX, 3)) + ',' + str(round(y-self.offsetY, 3))
def BezierRel(self, xc1, yc1, xc2, yc2, x, y):
#Return string c XC1 YC1 XC2 YC2 X Y
self.Path += ' c ' + str(round(xc1, 3)) + ',' + str(round(yc1, 3)) + ' ' + str(round(xc2, 3)) + ',' + str(round(yc2, 3))+ ' ' + str(round(x, 3)) + ',' + str(round(y, 3))
def Close(self):
self.Path += ' z'
def GenPath(self):
if self.Label:
line_attribs = {'style': objStyle, 'id' : self.Label, 'd': self.Path}
else:
line_attribs = {'style': objStyle, 'd': self.Path}
inkex.etree.SubElement(self.group, inkex.addNS('path', 'svg'), line_attribs)
class CardBox(inkex.Effect):
"""
Creates a new layer with the drawings for a parametrically generaded box.
"""
def __init__(self):
inkex.Effect.__init__(self)
self.knownUnits = ['in', 'pt', 'px', 'mm', 'cm', 'm', 'km', 'pc', 'yd', 'ft']
self.OptionParser.add_option('--unit', action = 'store',
type = 'string', dest = 'unit', default = 'mm',
help = 'Unit, should be one of ')
self.OptionParser.add_option('--thickness', action = 'store',
type = 'float', dest = 'thickness', default = '3.0',
help = 'Material thickness')
self.OptionParser.add_option('--n_slot', action = 'store',
type = 'int', dest = 'n_slot', default = '2',
help = 'Number of slots for cards')
self.OptionParser.add_option('--z', action = 'store',
type = 'float', dest = 'z', default = '40.0',
help = "inner height")
self.OptionParser.add_option('--y_card', action = 'store',
type = 'float', dest = 'y_card', default = '89.0',
help = "Cards' height")
self.OptionParser.add_option('--x_card', action = 'store',
type = 'float', dest = 'x_card', default = '58.0',
help = "Cards' width")
self.OptionParser.add_option('--burn', action = 'store',
type = 'float', dest = 'burn', default = '0.1',
help = 'laser burn size')
# Debug Output file
self.fDebug = None
try:
inkex.Effect.unittouu # unitouu has moved since Inkscape 0.91
except AttributeError:
try:
def unittouu(self, unit):
return inkex.unittouu(unit)
except AttributeError:
pass
def DebugMsg(self, s):
if self.fDebug:
self.fDebug.write(s)
def drawHNotch(self, path, notch_length, notch_height, burn, startinternal, delta_first=0):
'''
Draw an hozizontal single notch with size notch_length and height notch_height
given startinternal, we know if the starting point is "inside" (startinternal==1) or outside
to move backwards or forward, change sign of sizenotch
'''
if startinternal != 1:
startinternal = -1
if notch_length < 0:
startinternal = startinternal * -1
path.LineToHRel(delta_first+notch_length - burn * startinternal)
path.LineToVRel(notch_height)
path.LineToHRel(notch_length + burn * startinternal)
path.LineToVRel(-notch_height)
def drawVNotch(self, path, notch_length, notch_height, burn, startinternal, delta_first=0):
'''
Draw a vertical single notch with size notch_length and height notch_height
given startinternal, we know if the starting point is "inside" (startinternal==1) or outside
to move backwards or forward, change sign of sizenotch
'''
if startinternal != 1:
startinternal = -1
if notch_length < 0:
startinternal = startinternal * -1
path.LineToVRel(delta_first+notch_length - burn * startinternal)
path.LineToHRel(notch_height)
path.LineToVRel(notch_length + burn * startinternal)
path.LineToHRel(-notch_height)
def drawLineHNotches(self, path, nb_notch, notch_length, notch_height, burn, startinternal):
if notch_length > 0:
sign = startinternal
else:
sign = -startinternal
self.drawHNotch(path, notch_length, notch_height, burn, startinternal, sign*burn/2)
for i in range(1, nb_notch):
self.drawHNotch(path, notch_length, notch_height, burn, startinternal, 0)
def drawLineVNotches(self, path, nb_notch, notch_length, notch_height, burn, startinternal):
if notch_length > 0:
sign = startinternal
else:
sign = -startinternal
self.drawVNotch(path, notch_length, notch_height, burn, startinternal, sign*burn/2)
for i in range(1, nb_notch):
self.drawVNotch(path, notch_length, notch_height, burn, startinternal, 0)
def drawHole(self, path, x0, y0, dx, dy, burn):
'''
Add a rectangle starting at x0,y0 and with a length dx and width dy to the current path
burn is the burn factor, so actual coordinates are modified by burn/2
'''
path.MoveTo(x0+burn/2, y0+burn/2)
path.LineToVRel(dy-burn)
path.LineToHRel(dx-burn)
path.LineToVRel(-dy+burn)
path.LineToHRel(-dx+burn)
def gen_top(self, length, top_notch_size, nb_top_notch, width, thickness, burn, xOffset, yOffset, parent):
'''
Generate top element. This is a rectangle with notches on one x edge
'''
path = inkcape_path((xOffset, yOffset), parent, 'TOP')
path.MoveTo(0,-thickness)
#first line with notches
self.drawLineHNotches(path, nb_top_notch, top_notch_size, thickness, burn, -1)
path.LineTo(length, -thickness)
#then the three others edges of the rectangle
path.LineTo(length, width)
path.LineToHRel(-length)
path.LineTo(0, -thickness)
path.Close()
path.GenPath()
def gen_bottom(self, n_slots, x_card, length, top_notch_size_x, nb_top_notch_x, width, top_notch_size_y, nb_top_notch_y, thickness, burn, xOffset, yOffset, parent):
'''
Generate bottom element. This is a rectangle with notches on all edges
We also need to draw holes in this element to accept the "walls" between card slots
'''
path = inkcape_path((xOffset, yOffset), parent, 'BOTTOM')
path.MoveTo(0,0)
#first H line with notches
self.drawLineHNotches(path, nb_top_notch_x, top_notch_size_x, -thickness, burn, 1)
path.LineTo(length, 0)
#then the second edge of the rectangle
self.drawLineVNotches(path, nb_top_notch_y, top_notch_size_y, thickness, burn, 1)
path.LineTo(length, width)
#then third edge
self.drawLineHNotches(path, nb_top_notch_x, -top_notch_size_x, thickness, burn, 1)
path.LineTo(0, width)
#and the last one
self.drawLineVNotches(path, nb_top_notch_y, -top_notch_size_y, -thickness, burn, 1)
path.LineTo(0, 0)
#now the holes used to fix the walls
for i in range(1, n_slots):
#For each wall, draw holes corresponding at each notch_y
for j in range(nb_top_notch_y):
self.drawHole(path, i*(x_card+thickness), j*2*top_notch_size_y + top_notch_size_y, thickness, top_notch_size_y, burn)
path.Close()
path.GenPath()
def gen_front(self, n_slots, x_card, length, top_notch_size_x, nb_top_notch_x, zbox, edge_notch_size, nb_edge_notch, thickness, burn, xOffset, yOffset, parent):
'''
box front, this is is a rectangle with notches on 3 edges (not on top) and with holes for the walls
'''
path = inkcape_path((xOffset, yOffset), parent, 'FRONT')
path.MoveTo(-thickness,0)
#first H line without notches
path.LineTo(length+thickness, 0)
#Second line (V)
path.LineTo(length+thickness, 3*thickness)
self.drawLineVNotches(path, nb_edge_notch, edge_notch_size, -thickness, burn, -1)
path.LineTo(length+thickness, zbox+2*thickness)
#Third line (H) with notches
path.LineTo(length, zbox+2*thickness)
self.drawLineHNotches(path, nb_top_notch_x, -top_notch_size_x, -thickness, burn, -1)
path.LineTo(-thickness, zbox+2*thickness)
#and last one
path.LineTo(-thickness, zbox-thickness)
self.drawLineVNotches(path, nb_edge_notch, -edge_notch_size, thickness, burn, -1)
path.LineTo(-thickness, 0)
#now the holes used to fix the walls
for i in range(1, n_slots):
#For each wall, draw holes corresponding at each edge_notch
for j in range(nb_edge_notch):
self.drawHole(path, i*(x_card+thickness), j*2*edge_notch_size + edge_notch_size+2*thickness, thickness, edge_notch_size, burn)
path.Close()
path.GenPath()
def gen_back(self, n_slots, x_card, length, top_notch_size_x, nb_top_notch_x, zbox, edge_notch_size, nb_edge_notch, thickness, burn, xOffset, yOffset, parent):
'''
box back, this is is a rectangle with notches on 3 edges (not on top) and with holes for the walls
Last edge has a cut able to accept top side
'''
path = inkcape_path((xOffset, yOffset), parent, 'BACK')
path.MoveTo(-thickness, thickness)
#first H line without notches
path.LineToHRel(thickness)
path.LineToVRel(thickness)
path.LineToHRel(length)
path.LineToVRel(-thickness)
path.LineTo(length+thickness, thickness)
#Second line (V)
path.LineTo(length+thickness, 3*thickness)
self.drawLineVNotches(path, nb_edge_notch, edge_notch_size, -thickness, burn, -1)
path.LineTo(length+thickness, zbox+2*thickness)
#Third line (H) with notches
path.LineTo(length, zbox+2*thickness)
self.drawLineHNotches(path, nb_top_notch_x, -top_notch_size_x, -thickness, burn, -1)
path.LineTo(-thickness, zbox+2*thickness)
#and last one
path.LineTo(-thickness, zbox-thickness)
self.drawLineVNotches(path, nb_edge_notch, -edge_notch_size, thickness, burn, -1)
path.LineTo(-thickness, thickness)
#now the holes used to fix the walls
for i in range(1, n_slots):
#For each wall, draw holes corresponding at each edge_notch
for j in range(nb_edge_notch):
self.drawHole(path, i*(x_card+thickness), j*2*edge_notch_size + edge_notch_size+2*thickness, thickness, edge_notch_size, burn)
path.Close()
path.GenPath()
def gen_side(self, id_path, y_card, top_notch_size_y, nb_top_notch_y, zbox, edge_notch_size, nb_edge_notch, thickness, burn, xOffset, yOffset, parent):
'''
box side, this is is a rectangle with notches on all edges
'''
path = inkcape_path((xOffset, yOffset), parent, id_path)
path.MoveTo(0,-thickness)
#first H line with notches
self.drawLineHNotches(path, nb_top_notch_y, top_notch_size_y, thickness, burn, -1)
path.LineTo(y_card, -thickness)
#Second line (V)
path.LineTo(y_card, 2*thickness)
self.drawLineVNotches(path, nb_edge_notch, edge_notch_size, thickness, burn, 1)
path.LineTo(y_card, zbox+thickness)
#Third line (H)
self.drawLineHNotches(path, nb_top_notch_y, -top_notch_size_y, -thickness, burn, -1)
path.LineTo(0, zbox+thickness)
#and last one
path.LineTo(0, zbox-2*thickness)
self.drawLineVNotches(path, nb_edge_notch, -edge_notch_size, -thickness, burn, 1)
path.LineTo(0, 0)
path.Close()
path.GenPath()
def gen_internal_wall(self, index, y_card, top_notch_size_y, nb_top_notch_y, zbox, edge_notch_size, nb_edge_notch, thickness, burn, xOffset, yOffset, parent):
'''
box internal wall, this is is a rectangle with notches on 3 edges and an opening
'''
opening_size = 20
if y_card < 30:
opening_size = y_card * 0.7
elif y_card > 100:
opening_size = y_card * 0.2
z_opening = opening_size / 2
if z_opening > (zbox - thickness)*0.75:
z_opening = (zbox - thickness)*0.75
path = inkcape_path((xOffset, yOffset), parent, 'WALL_'+str(index+1))
path.MoveTo(0,0)
#first H line up to opening
path.LineToHRel((y_card - opening_size)/2)
#Then draw opening
path.LineToVRel(zbox-z_opening-thickness)
#Then the curve
path.Bezier((y_card - opening_size)/2, zbox-z_opening-thickness+z_opening*0.551916, y_card/2-opening_size*0.275958, zbox-thickness, y_card/2, zbox-thickness)
path.Bezier( y_card/2+opening_size*0.275958, zbox-thickness, (y_card+opening_size)/2, zbox-z_opening*(1-0.551916)-thickness, (y_card+opening_size)/2, zbox-z_opening-thickness)
path.LineTo((y_card+opening_size)/2, 0)
path.LineTo(y_card, 0)
#Second line (V)
self.drawLineVNotches(path, nb_edge_notch, edge_notch_size, thickness, burn, 1)
path.LineTo(y_card, zbox)
#Third line (H)
self.drawLineHNotches(path, nb_top_notch_y, -top_notch_size_y, thickness, burn, 1)
path.LineTo(0, zbox)
#and last one
path.LineTo(0, zbox-3*thickness)
self.drawLineVNotches(path, nb_edge_notch, -edge_notch_size, -thickness, burn, 1)
path.LineTo(0, 0)
path.Close()
path.GenPath()
def gen_internal_side(self, id_path, y_card, zbox, thickness, xOffset, yOffset, parent):
'''
internal box side, this is is a rectangle with no notches but an opening in the top center
'''
opening_size = 20
if y_card < 30:
opening_size = y_card * 0.7
elif y_card > 100:
opening_size = y_card * 0.2
z_opening = opening_size / 2
if z_opening > (zbox - thickness)*0.75:
z_opening = (zbox - thickness)*0.75
path = inkcape_path((xOffset, yOffset), parent, id_path)
path.MoveTo(0,0)
#first H line up to opening
path.LineToHRel((y_card - opening_size)/2)
#Then draw opening
path.LineToVRel(zbox-z_opening-thickness)
#Then the curve
path.Bezier((y_card - opening_size)/2, zbox-z_opening-thickness+z_opening*0.551916, y_card/2-opening_size*0.275958, zbox-thickness, y_card/2, zbox-thickness)
path.Bezier( y_card/2+opening_size*0.275958, zbox-thickness, (y_card+opening_size)/2, zbox-z_opening*(1-0.551916)-thickness, (y_card+opening_size)/2, zbox-z_opening-thickness)
path.LineTo((y_card+opening_size)/2, 0)
path.LineTo(y_card, 0)
#Second line (V)
path.LineTo(y_card, zbox)
#Third line (H)
path.LineTo(0, zbox)
#and last one
path.LineTo(0, 0)
path.Close()
path.GenPath()
def gen_hinge(self, id_path, delta_l, size, top_notch_size, nb_top_notch, thickness, burn, xOffset, yOffset, parent):
'''
side hinge, this is is a line of notches
'''
path = inkcape_path((xOffset, yOffset), parent, id_path)
path.MoveTo(-delta_l,0)
#first H line without notches
path.LineTo(size+delta_l, 0)
#Second line (V)
path.LineTo(size+delta_l, thickness)
if delta_l > 0:
path.LineToHRel(-delta_l)
#Third line (H)
self.drawLineHNotches(path, nb_top_notch, -top_notch_size, thickness, burn, 1)
path.LineTo(-delta_l, thickness)
#and last one
path.LineTo(-delta_l, 0)
path.Close()
path.GenPath()
def effect(self):
"""
Draws a card box box, based on provided parameters
"""
# input sanity check
error = False
if self.options.x_card < 20:
inkex.errormsg('Error: card size should be at least 20 x 20 mm')
error = True
if self.options.y_card < 20:
inkex.errormsg('Error: card size should be at least 20 x 20 mm')
error = True
if self.options.thickness < 1 or self.options.thickness > 10:
inkex.errormsg('Error: thickness should be at least 1mm and less than 10mm')
error = True
if error:
exit()
# convert units
unit = self.options.unit
n_slots = self.options.n_slot
zbox = self.unittouu(str(self.options.z) + unit)
x_card = self.unittouu(str(self.options.x_card) + unit)
y_card = self.unittouu(str(self.options.y_card) + unit)
thickness = self.unittouu(str(self.options.thickness) + unit)
burn = self.unittouu(str(self.options.burn) + unit)
svg = self.document.getroot()
docWidth = self.unittouu(svg.get('width'))
docHeigh = self.unittouu(svg.attrib['height'])
layer = inkex.etree.SubElement(svg, 'g')
layer.set(inkex.addNS('label', 'inkscape'), 'Card Box')
layer.set(inkex.addNS('groupmode', 'inkscape'), 'layer')
group = inkex.etree.SubElement(layer, 'g')
try:
self.fDebug = open( 'DebugCardBox.txt', 'w')
except IOError:
pass
self.DebugMsg("Start processing\n")
#compute total internal length
internal_length = x_card * n_slots + (n_slots-1)*thickness + 2*thickness
#compute top notch size_x
if internal_length < 10:
inkex.errormsg('Error: each edge should be at least 10mm long')
elif internal_length < 20: # Only one notch
nb_top_notch_x = 1
top_notch_size_x = internal_length / 3
elif internal_length < 80:
nb_top_notch_x = int((internal_length-5) / 8)
top_notch_size_x = internal_length/(2*nb_top_notch_x+1)
elif internal_length < 150:
nb_top_notch_x = int((internal_length-5) / 12)
top_notch_size_x = internal_length/(2*nb_top_notch_x+1)
else:
nb_top_notch_x = int((internal_length-5) / 15)
top_notch_size_x = internal_length/(2*nb_top_notch_x+1)
#compute top notch size_y
if y_card < 10:
inkex.errormsg('Error: each edge should be at least 10mm long')
elif y_card < 20: # Only one notch
nb_top_notch_y = 1
top_notch_size_y = y_card / 3
elif y_card < 80:
nb_top_notch_y = int((y_card-5) / 8)
top_notch_size_y = y_card/(2*nb_top_notch_y+1)
elif y_card < 150:
nb_top_notch_y = int((y_card-5) / 12)
top_notch_size_y = y_card/(2*nb_top_notch_y+1)
else:
nb_top_notch_y = int((y_card-5) / 15)
top_notch_size_y = y_card/(2*nb_top_notch_y+1)
#compute edge notch length, height and number
if zbox < 10+4*thickness:
inkex.errormsg('Error: box height too small, should be at least 4*thickness+10')
elif zbox-4*thickness < 20: # Only one notch
nb_edge_notch = 1
edge_notch_size = (zbox-4*thickness) / 3
elif zbox-4*thickness < 40:
nb_edge_notch = int((zbox-4*thickness-5) / 8)
edge_notch_size = (zbox-4*thickness)/(2*nb_edge_notch+1)
elif zbox-4*thickness < 80:
nb_edge_notch = int((zbox-4*thickness-5) / 12)
edge_notch_size = (zbox-4*thickness)/(2*nb_edge_notch+1)
else:
nb_edge_notch = int((zbox-4*thickness-5) / 15)
edge_notch_size = (zbox-4*thickness)/(2*nb_edge_notch+1)
#generate top
self.gen_top(internal_length, top_notch_size_x, nb_top_notch_x, y_card, thickness, burn, 0, 0, group)
#generate bottom, drawingis right from previous one with 2 mm interval
self.gen_bottom(n_slots, x_card, internal_length, top_notch_size_x, nb_top_notch_x, y_card, top_notch_size_y, nb_top_notch_y, thickness, burn, -internal_length -thickness - 2, 0, group)
#Generate front side, drawing is below the top
self.gen_front(n_slots, x_card, internal_length, top_notch_size_x, nb_top_notch_x, zbox, edge_notch_size, nb_edge_notch, thickness, burn, 0, -y_card - 2, group)
#Generate back side
self.gen_back(n_slots, x_card, internal_length, top_notch_size_x, nb_top_notch_x, zbox, edge_notch_size, nb_edge_notch, thickness, burn, -internal_length - 2*thickness - 5, -y_card-thickness - 5, group)
#generate left and right side
self.gen_side('LEFT', y_card, top_notch_size_y, nb_top_notch_y, zbox, edge_notch_size, nb_edge_notch, thickness, burn, 0, - zbox -y_card-4*thickness - 7, group)
self.gen_side('RIGHT', y_card, top_notch_size_y, nb_top_notch_y, zbox, edge_notch_size, nb_edge_notch, thickness, burn, -y_card-2*thickness - 5, -zbox-y_card-4*thickness - 7, group)
#Then internal LEFT and internal RIGHT, which are close to left and right edges
self.gen_internal_side('LEFT_INTERNAL', y_card, zbox-thickness, thickness, 0, - 2*zbox - y_card-5*thickness - 9, group)
self.gen_internal_side('RIGHT_INTERNAL', y_card, zbox-thickness, thickness, -y_card-2*thickness - 5, - 2*zbox - y_card-5*thickness - 9, group)
#Then internal walls
for i in range(n_slots-1):
self.gen_internal_wall(i, y_card, top_notch_size_y, nb_top_notch_y, zbox-thickness, edge_notch_size, nb_edge_notch, thickness, burn, i*(-y_card-2*thickness-5), - 3*zbox -y_card-4*thickness - 11, group)
#then Side hinges
self.gen_hinge('LEFT_HINGE', 0, y_card, top_notch_size_y, nb_top_notch_y, thickness, burn, 0, - 4*zbox -y_card-4*thickness - 13, group)
self.gen_hinge('RIGHT_HINGE', 0, y_card, top_notch_size_y, nb_top_notch_y, thickness, burn, -y_card-2*thickness - 5, - 4*zbox -y_card-4*thickness - 13, group)
#and at last back_hinge
self.gen_hinge('BACK_HINGE', thickness, internal_length, top_notch_size_x, nb_top_notch_x, thickness, burn, -internal_length - 2*thickness - 5, -y_card-thickness - 2, group)
#Close Debug file if open
if self.fDebug:
self.fDebug.close()
# Create effect instance and apply it.
effect = CardBox()
effect.affect()
| 45.184502 | 255 | 0.623969 | 3,451 | 24,490 | 4.242538 | 0.096204 | 0.048084 | 0.030736 | 0.019534 | 0.704119 | 0.663479 | 0.610887 | 0.59108 | 0.528243 | 0.511577 | 0 | 0.026642 | 0.262801 | 24,490 | 541 | 256 | 45.268022 | 0.784314 | 0.14949 | 0 | 0.387464 | 0 | 0 | 0.044207 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.082621 | false | 0.005698 | 0.008547 | 0.002849 | 0.099715 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea90a057893ca2593717408133204f920c37d55f | 1,825 | py | Python | examples/debugging/basic_ila.py | zyp/luna | 063b1d9b92e7202cb6b286b6862169d261b9d508 | [
"BSD-3-Clause"
] | 2 | 2021-07-23T20:07:31.000Z | 2021-09-28T03:13:43.000Z | examples/debugging/basic_ila.py | hxkrrzq/luna | e56a3eef6a9fa138755512bec1252725601425c1 | [
"BSD-3-Clause"
] | null | null | null | examples/debugging/basic_ila.py | hxkrrzq/luna | e56a3eef6a9fa138755512bec1252725601425c1 | [
"BSD-3-Clause"
] | 2 | 2021-06-26T06:06:52.000Z | 2022-01-19T22:36:19.000Z | #!/usr/bin/env python3
#
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <info@greatscottgadgets.com>
# SPDX-License-Identifier: BSD-3-Clause
import sys
from nmigen import *
from apollo_fpga import create_ila_frontend
from luna import top_level_cli
from luna.gateware.platform import NullPin
from luna.gateware.utils.cdc import synchronize
from luna.gateware.debug.ila import SyncSerialILA
class ILAExample(Elaboratable):
""" Gateware module that demonstrates use of the internal ILA. """
def __init__(self):
self.counter = Signal(28)
self.ila = SyncSerialILA(signals=[self.counter], sample_depth=32)
def emit_analysis_vcd(self, filename='-'):
frontend = create_ila_frontend(self.ila)
frontend.emit_vcd(filename)
def elaborate(self, platform):
m = Module()
m.submodules += self.ila
# Clock divider / counter.
m.d.sync += self.counter.eq(self.counter + 1)
# Set our ILA to trigger each time the counter is at a random value.
# This shows off our example a bit better than counting at zero.
m.d.comb += self.ila.trigger.eq(self.counter == 7)
# Grab our I/O connectors.
leds = [platform.request_optional("led", i, default=NullPin(), dir="o") for i in range(0, 6)]
spi_bus = synchronize(m, platform.request('debug_spi'))
# Attach the LEDs and User I/O to the MSBs of our counter.
m.d.comb += Cat(leds).eq(self.counter[-7:-1])
# Connect our ILA up to our board's aux SPI.
m.d.comb += self.ila.spi.connect(spi_bus)
# Return our elaborated module.
return m
if __name__ == "__main__":
example = top_level_cli(ILAExample)
example.emit_analysis_vcd()
| 30.416667 | 104 | 0.648767 | 254 | 1,825 | 4.53937 | 0.507874 | 0.057242 | 0.041631 | 0.017346 | 0.02255 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011662 | 0.248219 | 1,825 | 59 | 105 | 30.932203 | 0.828717 | 0.287123 | 0 | 0 | 0 | 0 | 0.017174 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.259259 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea9128c8928a1f082ea50190c6c87ae717408dd4 | 5,439 | py | Python | Python/agecalculate.py | studentorganizationofind/hacktoberfest2021 | 2587bfd3a1cecd4ae190f23411994dc9f27d4f0d | [
"CC0-1.0"
] | 6 | 2022-01-01T17:12:32.000Z | 2022-02-22T07:42:57.000Z | Python/agecalculate.py | studentorganizationofind/hacktoberfest2021 | 2587bfd3a1cecd4ae190f23411994dc9f27d4f0d | [
"CC0-1.0"
] | 3 | 2021-12-10T12:11:04.000Z | 2022-03-18T15:45:56.000Z | Python/agecalculate.py | studentorganizationofind/hacktoberfest2021 | 2587bfd3a1cecd4ae190f23411994dc9f27d4f0d | [
"CC0-1.0"
] | 4 | 2022-01-18T05:19:50.000Z | 2022-02-15T22:56:30.000Z | # import all functions from the tkinter
from tkinter import *
# import messagebox class from tkinter
from tkinter import messagebox
# Function for clearing the
# contents of all text entry boxes
def clearAll() :
# deleting the content from the entry box
dayField.delete(0, END)
monthField.delete(0, END)
yearField.delete(0, END)
givenDayField.delete(0, END)
givenMonthField.delete(0, END)
givenYearField.delete(0, END)
rsltDayField.delete(0, END)
rsltMonthField.delete(0, END)
rsltYearField.delete(0, END)
# function for checking error
def checkError() :
# if any of the entry field is empty
# then show an error message and clear
# all the entries
if (dayField.get() == "" or monthField.get() == ""
or yearField.get() == "" or givenDayField.get() == ""
or givenMonthField.get() == "" or givenYearField.get() == "") :
# show the error message
messagebox.showerror("Input Error")
# clearAll function calling
clearAll()
return -1
# function to calculate Age
def calculateAge() :
# check for error
value = checkError()
# if error is occur then return
if value == -1 :
return
else :
# take a value from the respective entry boxes
# get method returns current text as string
birth_day = int(dayField.get())
birth_month = int(monthField.get())
birth_year = int(yearField.get())
given_day = int(givenDayField.get())
given_month = int(givenMonthField.get())
given_year = int(givenYearField.get())
# if birth date is greater then given birth_month
# then donot count this month and add 30 to the date so
# as to subtract the date and get the remaining days
month =[31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
if (birth_day > given_day):
given_month = given_month - 1
given_day = given_day + month[birth_month-1]
# if birth month exceeds given month, then
# donot count this year and add 12 to the
# month so that we can subtract and find out
# the difference
if (birth_month > given_month):
given_year = given_year - 1
given_month = given_month + 12
# calculate day, month, year
calculated_day = given_day - birth_day;
calculated_month = given_month - birth_month;
calculated_year = given_year - birth_year;
# calculated day, month, year write back
# to the respective entry boxes
# insert method inserting the
# value in the text entry box.
rsltDayField.insert(10, str(calculated_day))
rsltMonthField.insert(10, str(calculated_month))
rsltYearField.insert(10, str(calculated_year))
# Driver Code
if __name__ == "__main__" :
# Create a GUI window
gui = Tk()
# Set the background colour of GUI window
gui.configure(background = "light green")
# set the name of tkinter GUI window
gui.title("Age Calculator")
# Set the configuration of GUI window
gui.geometry("525x260")
# Create a Date Of Birth : label
dob = Label(gui, text = "Date Of Birth", bg = "blue")
# Create a Given Date : label
givenDate = Label(gui, text = "Given Date", bg = "blue")
# Create a Day : label
day = Label(gui, text = "Day", bg = "light green")
# Create a Month : label
month = Label(gui, text = "Month", bg = "light green")
# Create a Year : label
year = Label(gui, text = "Year", bg = "light green")
# Create a Given Day : label
givenDay = Label(gui, text = "Given Day", bg = "light green")
# Create a Given Month : label
givenMonth = Label(gui, text = "Given Month", bg = "light green")
# Create a Given Year : label
givenYear = Label(gui, text = "Given Year", bg = "light green")
# Create a Years : label
rsltYear = Label(gui, text = "Years", bg = "light green")
# Create a Months : label
rsltMonth = Label(gui, text = "Months", bg = "light green")
# Create a Days : label
rsltDay = Label(gui, text = "Days", bg = "light green")
# Create a Resultant Age Button and attached to calculateAge function
resultantAge = Button(gui, text = "Resultant Age", fg = "Black", bg = "Red", command = calculateAge)
# Create a Clear All Button and attached to clearAll function
clearAllEntry = Button(gui, text = "Clear All", fg = "Black", bg = "Red", command = clearAll)
# Create a text entry box for filling or typing the information.
dayField = Entry(gui)
monthField = Entry(gui)
yearField = Entry(gui)
givenDayField = Entry(gui)
givenMonthField = Entry(gui)
givenYearField = Entry(gui)
rsltYearField = Entry(gui)
rsltMonthField = Entry(gui)
rsltDayField = Entry(gui)
# grid method is used for placing
# the widgets at respective positions
# in table like structure .
dob.grid(row = 0, column = 1)
day.grid(row = 1, column = 0)
dayField.grid(row = 1, column = 1)
month.grid(row = 2, column = 0)
monthField.grid(row = 2, column = 1)
year.grid(row = 3, column = 0)
yearField.grid(row = 3, column = 1)
givenDate.grid(row = 0, column = 4)
givenDay.grid(row = 1, column = 3)
givenDayField.grid(row = 1, column = 4)
givenMonth.grid(row = 2, column = 3)
givenMonthField.grid(row = 2, column = 4)
givenYear.grid(row = 3, column = 3)
givenYearField.grid(row = 3, column = 4)
resultantAge.grid(row = 4, column = 2)
rsltYear.grid(row = 5, column = 2)
rsltYearField.grid(row = 6, column = 2)
rsltMonth.grid(row = 7, column = 2)
rsltMonthField.grid(row = 8, column = 2)
rsltDay.grid(row = 9, column = 2)
rsltDayField.grid(row = 10, column = 2)
clearAllEntry.grid(row = 12, column = 2)
# Start the GUI
gui.mainloop()
| 26.531707 | 101 | 0.68303 | 780 | 5,439 | 4.711538 | 0.219231 | 0.041905 | 0.035918 | 0.044082 | 0.085442 | 0.041633 | 0 | 0 | 0 | 0 | 0 | 0.023562 | 0.204082 | 5,439 | 204 | 102 | 26.661765 | 0.825364 | 0.305571 | 0 | 0 | 0 | 0 | 0.074094 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032258 | false | 0 | 0.021505 | 0 | 0.075269 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea91c58f01740c0a61641449936c64966fc4d04e | 888 | py | Python | tensorpack/tfutils/unit_tests.py | dan-anghel/tensorpack | 86fcffbc167e2b703b9abd17d41388311c90fe7c | [
"Apache-2.0"
] | 1 | 2021-09-25T15:36:07.000Z | 2021-09-25T15:36:07.000Z | tensorpack/tfutils/unit_tests.py | misads/tensorpack | 35599506a7fc31a619d6f2be3844c2bd8dd9c1b1 | [
"Apache-2.0"
] | 1 | 2022-02-10T06:36:29.000Z | 2022-02-10T06:36:29.000Z | tensorpack/tfutils/unit_tests.py | misads/tensorpack | 35599506a7fc31a619d6f2be3844c2bd8dd9c1b1 | [
"Apache-2.0"
] | 1 | 2020-04-21T08:37:38.000Z | 2020-04-21T08:37:38.000Z | #-*- coding: utf-8 -*-
import six
import unittest
import tensorflow as tf
from ..utils import logger
from .scope_utils import under_name_scope
class ScopeUtilsTest(unittest.TestCase):
@under_name_scope(name_scope='s')
def _f(self, check=True):
if check:
assert tf.get_default_graph().get_name_scope().endswith('s')
return True
def test_under_name_scope(self):
self.assertTrue(self._f())
with self.assertRaises(AssertionError):
self._f() # name conflict
@unittest.skipIf(six.PY2, "assertLogs not supported in Python 2")
def test_under_name_scope_warning(self):
x = tf.placeholder(tf.float32, [3])
tf.nn.relu(x, name='s')
with self.assertLogs(logger=logger._logger, level='WARNING'):
self._f(check=False, name_scope='s')
if __name__ == '__main__':
unittest.main()
| 26.117647 | 72 | 0.661036 | 118 | 888 | 4.711864 | 0.466102 | 0.113309 | 0.100719 | 0.057554 | 0.07554 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008621 | 0.216216 | 888 | 33 | 73 | 26.909091 | 0.79023 | 0.039414 | 0 | 0 | 0 | 0 | 0.06463 | 0 | 0 | 0 | 0 | 0 | 0.217391 | 1 | 0.130435 | false | 0 | 0.217391 | 0 | 0.434783 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea92a762fcdc0ef6f68202c15d6298c57886531f | 2,697 | py | Python | mapper.py | litleleprikon/bachelor_paper | 31712e94bdae7ae26ce4629700784e5391249d0c | [
"MIT"
] | null | null | null | mapper.py | litleleprikon/bachelor_paper | 31712e94bdae7ae26ce4629700784e5391249d0c | [
"MIT"
] | null | null | null | mapper.py | litleleprikon/bachelor_paper | 31712e94bdae7ae26ce4629700784e5391249d0c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from sys import stdin, stderr
import re
import numpy as np
from pymongo import MongoClient
__author__ = 'litleleprikon'
HASHTAG_RE = re.compile(r'#[-a-z0-9+&@#/%?=~_()|!:,.;]+', re.IGNORECASE)
FIRST_CAP_RE = re.compile('(.)([A-Z][a-z]+)')
ALL_CAP_RE = re.compile('([a-z0-9])([A-Z])')
PROFILE_RE = re.compile(r'@[-a-z0-9+&@#/%?=~_()|!:,.;]+', re.IGNORECASE)
LINK_RE = re.compile(r'(\(.*?)?\b((?:https?)://[-a-z0-9+&@#/%?=~_()|!:,.;]*[-a-z0-9+&@#/%=~_()|])',
re.IGNORECASE)
SPLIT_RE = re.compile(r" |(?<! |[',\\.:!()@/<>])(?=[',\\.:!()@/<>])|(?<=[',\\.:!()@/<>])(?![',\\.:!()@/<>])",
re.IGNORECASE)
# Texas, USA I'm bot rude I'm outspoken
# Example Sundar Pichai: Why @Google can afford to be patient http://for.tn/1HRW3Wb
# Example That’s a wrap. Catch up with everything you missed during today’s keynote: http://g.co/go/io2015blog #io15
# Example a spot-on addition: http://pamelaclark.tumblr.com/post/89366678584/36 … RT @gvanrossum: "When a woman tells you something is sexist, believe her." http://pamelaclark.tumblr.com/post/871137111
# Example Play for Families will make it easier for #sdf parents to find games, books, movies & more for their kids. http://goo.gl/dYSakn #io15
def convert(tags):
for tag in tags:
s1 = FIRST_CAP_RE.sub(r'\1 \2', tag[1:])
yield ALL_CAP_RE.sub(r'\1 \2', s1).lower()
def main():
client = MongoClient()
dictionary = client.sentiment.dictionary
for line in stdin:
try:
key, value = line.split('\t', maxsplit=1)
hashtags = HASHTAG_RE.findall(value)
if len(hashtags):
hashtags = convert(hashtags)
for tag in hashtags:
value = HASHTAG_RE.sub(tag, value, count=1)
value = PROFILE_RE.sub('', value)
value = LINK_RE.sub('', value)
value = map(str.lower, SPLIT_RE.split(value))
words = [i for i in value if i != '' and i != ' ']
words_list = dictionary.find({'word': {'$in': words}})
happy_vaues, sad_values = [], []
for word in words_list:
happy_values.append(word['happy'])
sad_values.append(word['sad'])
happy_log_value = np.sum(happy_values)
sad_log_value = np.sum(sad_values)
prob_happy = np.reciprocal(np.exp(sad_log_value - happy_log_value) + 1)
result = np.round(prob_happy)
print('{0}\t{1}'.format(key, result))
except Exception as ex:
stderr.write('\n[ERROR]\n{}\n{}[/ERROR]\n'.format(ex, line))
if __name__ == '__main__':
main()
| 37.985915 | 201 | 0.567297 | 367 | 2,697 | 4.038147 | 0.455041 | 0.016194 | 0.044534 | 0.032389 | 0.121457 | 0.052632 | 0.037787 | 0.037787 | 0.037787 | 0 | 0 | 0.027145 | 0.235076 | 2,697 | 70 | 202 | 38.528571 | 0.689772 | 0.229514 | 0 | 0.043478 | 0 | 0.021739 | 0.160542 | 0.113636 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.086957 | 0 | 0.130435 | 0.021739 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea94bfabd103a08959e89b9caf33b75d013788ef | 12,367 | py | Python | roles/lib_openshift/src/test/unit/test_oc_route.py | ferjosem362/openshift | 41ee91326a9f533396bc876d399d4e7c50c9ea38 | [
"Apache-2.0"
] | null | null | null | roles/lib_openshift/src/test/unit/test_oc_route.py | ferjosem362/openshift | 41ee91326a9f533396bc876d399d4e7c50c9ea38 | [
"Apache-2.0"
] | null | null | null | roles/lib_openshift/src/test/unit/test_oc_route.py | ferjosem362/openshift | 41ee91326a9f533396bc876d399d4e7c50c9ea38 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python2
'''
Unit tests for oc route
'''
# To run:
# ./oc_serviceaccount.py
#
# .
# Ran 1 test in 0.002s
#
# OK
import os
import six
import sys
import unittest
import mock
# Removing invalid variable names for tests so that I can
# keep them brief
# pylint: disable=invalid-name,no-name-in-module
# Disable import-error b/c our libraries aren't loaded in jenkins
# pylint: disable=import-error,wrong-import-position
# place class in our python path
module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
sys.path.insert(0, module_path)
from oc_route import OCRoute, locate_oc_binary # noqa: E402
class OCRouteTest(unittest.TestCase):
'''
Test class for OCServiceAccount
'''
def setUp(self):
''' setup method will create a file and set to known configuration '''
pass
@mock.patch('oc_route.locate_oc_binary')
@mock.patch('oc_route.Utils.create_tmpfile_copy')
@mock.patch('oc_route.OCRoute._run')
def test_list_route(self, mock_cmd, mock_tmpfile_copy, mock_oc_binary):
''' Testing getting a route '''
# Arrange
# run_ansible input parameters
params = {
'kubeconfig': '/etc/origin/master/admin.kubeconfig',
'state': 'list',
'debug': False,
'name': 'test',
'namespace': 'default',
'tls_termination': 'passthrough',
'dest_cacert_path': None,
'cacert_path': None,
'cert_path': None,
'key_path': None,
'dest_cacert_content': None,
'cacert_content': None,
'cert_content': None,
'key_content': None,
'service_name': 'testservice',
'host': 'test.openshift.com',
'wildcard_policy': None,
'weight': None,
'port': None
}
route_result = '''{
"kind": "Route",
"apiVersion": "v1",
"metadata": {
"name": "test",
"namespace": "default",
"selfLink": "/oapi/v1/namespaces/default/routes/test",
"uid": "1b127c67-ecd9-11e6-96eb-0e0d9bdacd26",
"resourceVersion": "439182",
"creationTimestamp": "2017-02-07T01:59:48Z"
},
"spec": {
"host": "test.example",
"to": {
"kind": "Service",
"name": "test",
"weight": 100
},
"port": {
"targetPort": 8443
},
"tls": {
"termination": "passthrough"
},
"wildcardPolicy": "None"
},
"status": {
"ingress": [
{
"host": "test.example",
"routerName": "router",
"conditions": [
{
"type": "Admitted",
"status": "True",
"lastTransitionTime": "2017-02-07T01:59:48Z"
}
],
"wildcardPolicy": "None"
}
]
}
}'''
# Return values of our mocked function call. These get returned once per call.
mock_cmd.side_effect = [
# First call to mock
(0, route_result, ''),
]
mock_oc_binary.side_effect = [
'oc'
]
mock_tmpfile_copy.side_effect = [
'/tmp/mock.kubeconfig',
]
# Act
results = OCRoute.run_ansible(params, False)
# Assert
self.assertFalse(results['changed'])
self.assertEqual(results['state'], 'list')
self.assertEqual(results['results'][0]['metadata']['name'], 'test')
# Making sure our mock was called as we expected
mock_cmd.assert_has_calls([
mock.call(['oc', '-n', 'default', 'get', 'route', 'test', '-o', 'json'], None),
])
@mock.patch('oc_route.locate_oc_binary')
@mock.patch('oc_route.Utils.create_tmpfile_copy')
@mock.patch('oc_route.Yedit._write')
@mock.patch('oc_route.OCRoute._run')
def test_create_route(self, mock_cmd, mock_write, mock_tmpfile_copy, mock_oc_binary):
''' Testing getting a route '''
# Arrange
# run_ansible input parameters
params = {
'kubeconfig': '/etc/origin/master/admin.kubeconfig',
'state': 'present',
'debug': False,
'name': 'test',
'namespace': 'default',
'tls_termination': 'edge',
'dest_cacert_path': None,
'cacert_path': None,
'cert_path': None,
'key_path': None,
'dest_cacert_content': None,
'cacert_content': 'testing',
'cert_content': 'testing',
'key_content': 'testing',
'service_name': 'testservice',
'host': 'test.openshift.com',
'wildcard_policy': None,
'weight': None,
'port': None
}
route_result = '''{
"apiVersion": "v1",
"kind": "Route",
"metadata": {
"creationTimestamp": "2017-02-07T20:55:10Z",
"name": "test",
"namespace": "default",
"resourceVersion": "517745",
"selfLink": "/oapi/v1/namespaces/default/routes/test",
"uid": "b6f25898-ed77-11e6-9755-0e737db1e63a"
},
"spec": {
"host": "test.openshift.com",
"tls": {
"caCertificate": "testing",
"certificate": "testing",
"key": "testing",
"termination": "edge"
},
"to": {
"kind": "Service",
"name": "testservice",
"weight": 100
},
"wildcardPolicy": "None"
},
"status": {
"ingress": [
{
"conditions": [
{
"lastTransitionTime": "2017-02-07T20:55:10Z",
"status": "True",
"type": "Admitted"
}
],
"host": "test.openshift.com",
"routerName": "router",
"wildcardPolicy": "None"
}
]
}
}'''
test_route = '''\
kind: Route
spec:
tls:
caCertificate: testing
termination: edge
certificate: testing
key: testing
to:
kind: Service
name: testservice
weight: 100
host: test.openshift.com
wildcardPolicy: None
apiVersion: v1
metadata:
namespace: default
name: test
'''
# Return values of our mocked function call. These get returned once per call.
mock_cmd.side_effect = [
# First call to mock
(1, '', 'Error from server: routes "test" not found'),
(1, '', 'Error from server: routes "test" not found'),
(0, 'route "test" created', ''),
(0, route_result, ''),
]
mock_oc_binary.side_effect = [
'oc'
]
mock_tmpfile_copy.side_effect = [
'/tmp/mock.kubeconfig',
]
mock_write.assert_has_calls = [
# First call to mock
mock.call('/tmp/test', test_route)
]
# Act
results = OCRoute.run_ansible(params, False)
# Assert
self.assertTrue(results['changed'])
self.assertEqual(results['state'], 'present')
self.assertEqual(results['results']['results'][0]['metadata']['name'], 'test')
# Making sure our mock was called as we expected
mock_cmd.assert_has_calls([
mock.call(['oc', '-n', 'default', 'get', 'route', 'test', '-o', 'json'], None),
mock.call(['oc', '-n', 'default', 'create', '-f', mock.ANY], None),
mock.call(['oc', '-n', 'default', 'get', 'route', 'test', '-o', 'json'], None),
])
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
@mock.patch('os.environ.get')
def test_binary_lookup_fallback(self, mock_env_get, mock_path_exists):
''' Testing binary lookup fallback '''
mock_env_get.side_effect = lambda _v, _d: ''
mock_path_exists.side_effect = lambda _: False
self.assertEqual(locate_oc_binary(), 'oc')
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
@mock.patch('os.environ.get')
def test_binary_lookup_in_path(self, mock_env_get, mock_path_exists):
''' Testing binary lookup in path '''
oc_bin = '/usr/bin/oc'
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_path_exists.side_effect = lambda f: f == oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
@mock.patch('os.environ.get')
def test_binary_lookup_in_usr_local(self, mock_env_get, mock_path_exists):
''' Testing binary lookup in /usr/local/bin '''
oc_bin = '/usr/local/bin/oc'
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_path_exists.side_effect = lambda f: f == oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
@mock.patch('os.environ.get')
def test_binary_lookup_in_home(self, mock_env_get, mock_path_exists):
''' Testing binary lookup in ~/bin '''
oc_bin = os.path.expanduser('~/bin/oc')
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_path_exists.side_effect = lambda f: f == oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY2, 'py3 test only')
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_fallback_py3(self, mock_env_get, mock_shutil_which):
''' Testing binary lookup fallback '''
mock_env_get.side_effect = lambda _v, _d: ''
mock_shutil_which.side_effect = lambda _f, path=None: None
self.assertEqual(locate_oc_binary(), 'oc')
@unittest.skipIf(six.PY2, 'py3 test only')
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_in_path_py3(self, mock_env_get, mock_shutil_which):
''' Testing binary lookup in path '''
oc_bin = '/usr/bin/oc'
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY2, 'py3 test only')
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_in_usr_local_py3(self, mock_env_get, mock_shutil_which):
''' Testing binary lookup in /usr/local/bin '''
oc_bin = '/usr/local/bin/oc'
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY2, 'py3 test only')
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_in_home_py3(self, mock_env_get, mock_shutil_which):
''' Testing binary lookup in ~/bin '''
oc_bin = os.path.expanduser('~/bin/oc')
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
def tearDown(self):
'''TearDown method'''
pass
if __name__ == "__main__":
unittest.main()
| 31.873711 | 105 | 0.519447 | 1,300 | 12,367 | 4.723846 | 0.183077 | 0.033708 | 0.026054 | 0.022146 | 0.703957 | 0.687673 | 0.667155 | 0.655105 | 0.605927 | 0.574336 | 0 | 0.020529 | 0.346163 | 12,367 | 387 | 106 | 31.956072 | 0.738931 | 0.098973 | 0 | 0.586081 | 0 | 0 | 0.455387 | 0.053191 | 0 | 0 | 0 | 0 | 0.062271 | 1 | 0.043956 | false | 0.014652 | 0.021978 | 0 | 0.069597 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea9516490ce67f7072507e6c62c6b4853a7e844b | 1,789 | py | Python | spn_codes/models.py | zhangyuygss/WSL | c622f606c4b6557b45cec6068713ff05cdb8962a | [
"BSD-3-Clause"
] | 2 | 2018-01-10T08:29:38.000Z | 2018-06-15T09:07:25.000Z | spn_codes/models.py | zhangyuygss/WSL | c622f606c4b6557b45cec6068713ff05cdb8962a | [
"BSD-3-Clause"
] | null | null | null | spn_codes/models.py | zhangyuygss/WSL | c622f606c4b6557b45cec6068713ff05cdb8962a | [
"BSD-3-Clause"
] | 2 | 2018-05-28T07:50:20.000Z | 2018-08-01T15:33:59.000Z | import spn_codes.spatialpooling as spatialpooling
import torch.nn as nn
import torch
import torchvision.models as models
from spn.modules import SoftProposal
class SPNetWSL(nn.Module):
def __init__(self, num_classes=20, num_maps=1024):
super(SPNetWSL, self).__init__()
model = models.vgg16(pretrained=True)
num_features = model.features[28].out_channels
self.features = nn.Sequential(*list(model.features.children())[:-1])
# self.spatial_pooling = pooling
self.addconv = nn.Conv2d(num_features, num_maps, kernel_size=3,
stride=1, padding=1, groups=2, bias=True)
self.maps = nn.ReLU()
self.sp = SoftProposal()
self.sum = spatialpooling.SpatialSumOverMap()
# classification layer
self.classifier = nn.Sequential(
nn.Dropout(0.5),
nn.Linear(num_maps, num_classes)
)
# image normalization
self.image_normalization_mean = [0.485, 0.456, 0.406]
self.image_normalization_std = [0.229, 0.224, 0.225]
def forward(self, x):
x = self.features(x)
x = self.addconv(x)
x = self.maps(x)
sp = self.sp(x)
x = self.sum(sp)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def get_att_map(self, x):
x = self.features(x)
x = self.addconv(x)
x = self.maps(x)
sp = self.sp(x)
x = self.sum(sp)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x, sp
def get_config_optim(self, lr):
return [{'params': self.features.parameters(), 'lr': lr},
{'params': self.spatial_pooling.parameters()},
{'params': self.classifier.parameters()}]
| 32.527273 | 76 | 0.584684 | 232 | 1,789 | 4.387931 | 0.349138 | 0.019646 | 0.047151 | 0.019646 | 0.19057 | 0.19057 | 0.19057 | 0.19057 | 0.19057 | 0.19057 | 0 | 0.036107 | 0.28787 | 1,789 | 54 | 77 | 33.12963 | 0.762951 | 0.039687 | 0 | 0.318182 | 0 | 0 | 0.011669 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.113636 | 0.022727 | 0.295455 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea99007023e0c0b5ecb6b6d69b009da0fd06b755 | 1,461 | py | Python | adabinipostmotion.py | Adabini/AdabiniPostMotion | b968c2a6f6ef181b07d31ba38fd19ef7fb3c8cca | [
"MIT"
] | null | null | null | adabinipostmotion.py | Adabini/AdabiniPostMotion | b968c2a6f6ef181b07d31ba38fd19ef7fb3c8cca | [
"MIT"
] | null | null | null | adabinipostmotion.py | Adabini/AdabiniPostMotion | b968c2a6f6ef181b07d31ba38fd19ef7fb3c8cca | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import mediapipe as mp
import cv2
import os, glob
import sys
# Utils
def detect_hand(path):
image = cv2.imread(path, cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
mp_hands = mp.solutions.hands
hands = mp_hands.Hands(min_detection_confidence=0.5, min_tracking_confidence=0.5)
results = hands.process(image)
if results.multi_hand_landmarks:
hands.close()
return True
else:
hands.close()
return False
def split_video(path, output_path):
os.system(f'ffmpeg -i {path} -vf fps=1 {output_path}output%06d.png')
def merge_video(path, frame_path, framerate):
os.system(f'''ffmpeg -framerate {framerate} -i {frame_path}output%06d.png {path}''')
def rename_files(path):
index = 0
for file in os.scandir(path):
index += 1
decimals = 6 - len(str(index))
name = f'{path}output'
for i in range(decimals):
name += '0'
name += str(index)
name += '.png'
os.rename(file.path, name)
def clear_dir(dir):
for file in os.scandir(dir):
os.remove(file.path)
# Main
def main(input_path, output_path):
split_video(input_path, 'frames/')
for file in os.scandir('frames/'):
if detect_hand(file.path):
os.remove(file.path)
rename_files('frames/')
merge_video(output_path, 'frames/', 2)
clear_dir('frames/')
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2])
| 22.828125 | 87 | 0.647502 | 214 | 1,461 | 4.252336 | 0.35514 | 0.054945 | 0.02967 | 0.036264 | 0.059341 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019214 | 0.21629 | 1,461 | 63 | 88 | 23.190476 | 0.775546 | 0.021218 | 0 | 0.090909 | 0 | 0 | 0.131965 | 0.038856 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136364 | false | 0 | 0.090909 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea993ef9973be622bc853c77e3b1b3daba9871cd | 21,609 | py | Python | pysph/sph/wc/gtvf.py | ckesanapalli/pysph | ce0be549543c533200a4c6d6336dd2d772bc0891 | [
"BSD-3-Clause"
] | 3 | 2019-09-07T13:27:19.000Z | 2019-09-07T13:27:46.000Z | pysph/sph/wc/gtvf.py | ckesanapalli/pysph | ce0be549543c533200a4c6d6336dd2d772bc0891 | [
"BSD-3-Clause"
] | null | null | null | pysph/sph/wc/gtvf.py | ckesanapalli/pysph | ce0be549543c533200a4c6d6336dd2d772bc0891 | [
"BSD-3-Clause"
] | 3 | 2022-03-04T16:07:29.000Z | 2022-03-16T19:28:32.000Z | """
Generalized Transport Velocity Formulation
##########################################
Some notes on the paper,
- In the viscosity term of equation (17) a factor of '2' is missing.
- A negative sign is missing from equation (22) i.e, either put a negative
sign in equation (22) or at the integrator step equation(25).
- The Solid Mechanics Equations are not tested.
References
-----------
.. [ZhangHuAdams2017] Chi Zhang, Xiangyu Y. Hu, Nikolaus A. Adams
"A generalized transport-velocity formulation for smoothed particle
hydrodynamics", Journal of Computational Physics 237 (2017),
pp. 216--232.
"""
from compyle.api import declare
from pysph.sph.equation import Equation
from pysph.base.utils import get_particle_array
from pysph.sph.integrator import Integrator
from pysph.sph.integrator_step import IntegratorStep
from pysph.sph.equation import Group, MultiStageEquations
from pysph.sph.scheme import Scheme
from pysph.sph.wc.linalg import mat_vec_mult, mat_mult
def get_particle_array_gtvf(constants=None, **props):
gtvf_props = [
'uhat', 'vhat', 'what', 'rho0', 'rhodiv', 'p0', 'auhat', 'avhat',
'awhat', 'arho', 'arho0'
]
pa = get_particle_array(
constants=constants, additional_props=gtvf_props, **props
)
pa.add_property('gradvhat', stride=9)
pa.add_property('sigma', stride=9)
pa.add_property('asigma', stride=9)
pa.set_output_arrays([
'x', 'y', 'z', 'u', 'v', 'w', 'rho', 'p', 'h', 'm', 'au', 'av', 'aw',
'pid', 'gid', 'tag'
])
return pa
class GTVFIntegrator(Integrator):
def one_timestep(self, t, dt):
self.stage1()
self.do_post_stage(dt, 1)
self.compute_accelerations(0, update_nnps=False)
self.stage2()
# We update domain here alone as positions only change here.
self.update_domain()
self.do_post_stage(dt, 2)
self.compute_accelerations(1)
self.stage3()
self.do_post_stage(dt, 3)
class GTVFStep(IntegratorStep):
def stage1(self, d_idx, d_u, d_v, d_w, d_au, d_av, d_aw, d_uhat, d_vhat,
d_what, d_auhat, d_avhat, d_awhat, dt):
dtb2 = 0.5*dt
d_u[d_idx] += dtb2*d_au[d_idx]
d_v[d_idx] += dtb2*d_av[d_idx]
d_w[d_idx] += dtb2*d_aw[d_idx]
d_uhat[d_idx] = d_u[d_idx] + dtb2*d_auhat[d_idx]
d_vhat[d_idx] = d_v[d_idx] + dtb2*d_avhat[d_idx]
d_what[d_idx] = d_w[d_idx] + dtb2*d_awhat[d_idx]
def stage2(self, d_idx, d_uhat, d_vhat, d_what, d_x, d_y, d_z, d_rho,
d_arho, d_sigma, d_asigma, dt):
d_rho[d_idx] += dt*d_arho[d_idx]
i = declare('int')
for i in range(9):
d_sigma[d_idx*9 + i] += dt * d_asigma[d_idx*9 + i]
d_x[d_idx] += dt*d_uhat[d_idx]
d_y[d_idx] += dt*d_vhat[d_idx]
d_z[d_idx] += dt*d_what[d_idx]
def stage3(self, d_idx, d_u, d_v, d_w, d_au, d_av, d_aw, dt):
dtb2 = 0.5*dt
d_u[d_idx] += dtb2*d_au[d_idx]
d_v[d_idx] += dtb2*d_av[d_idx]
d_w[d_idx] += dtb2*d_aw[d_idx]
class ContinuityEquationGTVF(Equation):
r"""**Evolution of density**
From [ZhangHuAdams2017], equation (12),
.. math::
\frac{\tilde{d} \rho_i}{dt} = \rho_i \sum_j \frac{m_j}{\rho_j}
\nabla W_{ij} \cdot \tilde{\boldsymbol{v}}_{ij}
"""
def initialize(self, d_arho, d_idx):
d_arho[d_idx] = 0.0
def loop(self, d_idx, s_idx, s_m, d_rho, s_rho, d_uhat, d_vhat, d_what,
s_uhat, s_vhat, s_what, d_arho, DWIJ):
uhatij = d_uhat[d_idx] - s_uhat[s_idx]
vhatij = d_vhat[d_idx] - s_vhat[s_idx]
whatij = d_what[d_idx] - s_what[s_idx]
udotdij = DWIJ[0]*uhatij + DWIJ[1]*vhatij + DWIJ[2]*whatij
fac = d_rho[d_idx] * s_m[s_idx] / s_rho[s_idx]
d_arho[d_idx] += fac * udotdij
class CorrectDensity(Equation):
r"""**Density correction**
From [ZhangHuAdams2017], equation (13),
.. math::
\rho_i = \frac{\sum_j m_j W_{ij}}
{\min(1, \sum_j \frac{m_j}{\rho_j^{*}} W_{ij})}
where,
.. math::
\rho_j^{*} = \text{density before this correction is applied.}
"""
def initialize(self, d_idx, d_rho, d_rho0, d_rhodiv):
d_rho0[d_idx] = d_rho[d_idx]
d_rho[d_idx] = 0.0
d_rhodiv[d_idx] = 0.0
def loop(self, d_idx, s_idx, d_rho, d_rhodiv, s_m, WIJ, s_rho0):
d_rho[d_idx] += s_m[s_idx]*WIJ
d_rhodiv[d_idx] += s_m[s_idx]*WIJ/s_rho0[s_idx]
def post_loop(self, d_idx, d_rho, d_rhodiv):
d_rho[d_idx] = d_rho[d_idx] / min(1, d_rhodiv[d_idx])
class MomentumEquationPressureGradient(Equation):
r"""**Momentum Equation**
From [ZhangHuAdams2017], equation (17),
.. math::
\frac{\tilde{d} \boldsymbol{v}_i}{dt} = - \sum_j m_j \nabla W_{ij}
\cdot \left[\left(\frac{p_i}{\rho_i^2} + \frac{p_j}{\rho_j^2}
\right)\textbf{I} - \left(\frac{\boldsymbol{A_i}}{\rho_i^2} +
\frac{\boldsymbol{A_j}}{\rho_j^2} \right)\right] + \sum_j
\frac{\eta_{ij}\boldsymbol{v}_{ij}}{\rho_i \rho_j r_{ij}}
\nabla W_{ij} \cdot \boldsymbol{x}_{ij}
where,
.. math::
\boldsymbol{A_{i/j}} = \rho_{i/j} \boldsymbol{v}_{i/j} \otimes
(\tilde{\boldsymbol{v}}_{i/j} - \boldsymbol{v}_{i/j})
.. math::
\eta_{ij} = \frac{2\eta_i \eta_j}{\eta_i + \eta_j}
.. math::
\eta_{i/j} = \rho_{i/j} \nu
for solids, replace :math:`\boldsymbol{A}_{i/j}` with
:math:`\boldsymbol{\sigma}'_{i/j}`.
The rate of change of transport velocity is given by,
.. math::
(\frac{d\boldsymbol{v}_i}{dt})_c = -p_i^0 \sum_j \frac{m_j}
{\rho_i^2} \nabla \tilde{W}_{ij}
where,
.. math::
\tilde{W}_{ij} = W(\boldsymbol{x}_ij, \tilde{0.5 h_{ij}})
.. math::
p_i^0 = \min(10|p_i|, p_{ref})
Notes:
A negative sign in :math:`(\frac{d\boldsymbol{v}_i}{dt})_c` is
missing in the paper [ZhangHuAdams2017].
"""
def __init__(self, dest, sources, pref, gx=0.0, gy=0.0, gz=0.0):
r"""
Parameters
----------
pref : float
reference pressure
gx : float
body force per unit mass along the x-axis
gy : float
body force per unit mass along the y-axis
gz : float
body force per unit mass along the z-axis
"""
self.pref = pref
self.gx = gx
self.gy = gy
self.gz = gz
super(MomentumEquationPressureGradient, self).__init__(dest, sources)
def initialize(self, d_idx, d_au, d_av, d_aw, d_auhat, d_avhat, d_awhat,
d_p0, d_p):
d_au[d_idx] = self.gx
d_av[d_idx] = self.gy
d_aw[d_idx] = self.gz
d_auhat[d_idx] = 0.0
d_avhat[d_idx] = 0.0
d_awhat[d_idx] = 0.0
d_p0[d_idx] = min(10*abs(d_p[d_idx]), self.pref)
def loop(self, d_rho, s_rho, d_idx, s_idx, d_p, s_p, s_m, d_au, d_av,
d_aw, DWIJ, d_p0, d_auhat, d_avhat, d_awhat, XIJ, RIJ, SPH_KERNEL,
HIJ):
rhoi2 = d_rho[d_idx] * d_rho[d_idx]
rhoj2 = s_rho[s_idx] * s_rho[s_idx]
pij = d_p[d_idx]/rhoi2 + s_p[s_idx]/rhoj2
tmp = -s_m[s_idx] * pij
d_au[d_idx] += tmp * DWIJ[0]
d_av[d_idx] += tmp * DWIJ[1]
d_aw[d_idx] += tmp * DWIJ[2]
tmp = -d_p0[d_idx] * s_m[s_idx]/rhoi2
dwijhat = declare('matrix(3)')
SPH_KERNEL.gradient(XIJ, RIJ, 0.5*HIJ, dwijhat)
d_auhat[d_idx] += tmp * dwijhat[0]
d_avhat[d_idx] += tmp * dwijhat[1]
d_awhat[d_idx] += tmp * dwijhat[2]
class MomentumEquationViscosity(Equation):
r"""**Momentum equation Artificial stress for solids**
See the class MomentumEquationPressureGradient for details.
Notes:
A factor of '2' is missing in the viscosity equation given by
[ZhangHuAdams2017].
"""
def __init__(self, dest, sources, nu):
r"""
Parameters
----------
nu : float
viscosity of the fluid.
"""
self.nu = nu
super(MomentumEquationViscosity, self).__init__(dest, sources)
def loop(self, d_idx, s_idx, d_rho, s_rho, s_m, d_au,
d_av, d_aw, VIJ, R2IJ, EPS, DWIJ, XIJ):
etai = self.nu * d_rho[d_idx]
etaj = self.nu * s_rho[s_idx]
etaij = 4 * (etai * etaj)/(etai + etaj)
xdotdij = DWIJ[0]*XIJ[0] + DWIJ[1]*XIJ[1] + DWIJ[2]*XIJ[2]
tmp = s_m[s_idx]/(d_rho[d_idx] * s_rho[s_idx])
fac = tmp * etaij * xdotdij/(R2IJ + EPS)
d_au[d_idx] += fac * VIJ[0]
d_av[d_idx] += fac * VIJ[1]
d_aw[d_idx] += fac * VIJ[2]
class MomentumEquationArtificialStress(Equation):
r"""**Momentum equation Artificial stress for solids**
See the class MomentumEquationPressureGradient for details.
"""
def __init__(self, dest, sources, dim):
r"""
Parameters
----------
dim : int
Dimensionality of the problem.
"""
self.dim = dim
super(MomentumEquationArtificialStress, self).__init__(dest, sources)
def _get_helpers_(self):
return [mat_vec_mult]
def loop(self, d_idx, s_idx, d_rho, s_rho, d_u, d_v, d_w, d_uhat, d_vhat,
d_what, s_u, s_v, s_w, s_uhat, s_vhat, s_what, d_au, d_av, d_aw,
s_m, DWIJ):
rhoi = d_rho[d_idx]
rhoj = s_rho[s_idx]
i, j = declare('int', 2)
ui, uj, uidif, ujdif, res = declare('matrix(3)', 5)
Aij = declare('matrix(9)')
for i in range(3):
res[i] = 0.0
for j in range(3):
Aij[3*i + j] = 0.0
ui[0] = d_u[d_idx]
ui[1] = d_v[d_idx]
ui[2] = d_w[d_idx]
uj[0] = s_u[s_idx]
uj[1] = s_v[s_idx]
uj[2] = s_w[s_idx]
uidif[0] = d_uhat[d_idx] - d_u[d_idx]
uidif[1] = d_vhat[d_idx] - d_v[d_idx]
uidif[2] = d_what[d_idx] - d_w[d_idx]
ujdif[0] = s_uhat[s_idx] - s_u[s_idx]
ujdif[1] = s_vhat[s_idx] - s_v[s_idx]
ujdif[2] = s_what[s_idx] - s_w[s_idx]
for i in range(3):
for j in range(3):
Aij[3*i + j] = (ui[i]*uidif[j] / rhoi + uj[i]*ujdif[j] / rhoj)
mat_vec_mult(Aij, DWIJ, 3, res)
d_au[d_idx] += s_m[s_idx] * res[0]
d_av[d_idx] += s_m[s_idx] * res[1]
d_aw[d_idx] += s_m[s_idx] * res[2]
class VelocityGradient(Equation):
r"""**Gradient of velocity vector**
.. math::
(\nabla \otimes \tilde{\boldsymbol{v}})_i = \sum_j \frac{m_j}
{\rho_j} \tilde{\boldsymbol{v}}_{ij} \otimes \nabla W_{ij}
"""
def __init__(self, dest, sources, dim):
r"""
Parameters
----------
dim : int
Dimensionality of the problem.
"""
self.dim = dim
super(VelocityGradient, self).__init__(dest, sources)
def initialize(self, d_idx, d_gradvhat):
for i in range(9):
d_gradvhat[9*d_idx + i] = 0.0
def loop(self, s_idx, d_idx, s_m, d_uhat, d_vhat, d_what, s_uhat, s_vhat,
s_what, s_rho, d_gradvhat, DWIJ):
i, j = declare('int', 2)
uhatij = declare('matrix(3)')
Vj = s_m[s_idx]/s_rho[s_idx]
uhatij[0] = d_uhat[d_idx] - s_uhat[s_idx]
uhatij[1] = d_vhat[d_idx] - s_vhat[s_idx]
uhatij[2] = d_what[d_idx] - s_what[s_idx]
for i in range(3):
for j in range(3):
d_gradvhat[d_idx*9 + 3*i + j] += Vj * uhatij[i] * DWIJ[j]
class DeviatoricStressRate(Equation):
r"""**Stress rate for solids**
From [ZhangHuAdams2017], equation (5),
.. math::
\frac{d \boldsymbol{\sigma}'}{dt} = 2 G (\boldsymbol{\epsilon}
- \frac{1}{3} \text{Tr}(\boldsymbol{\epsilon})\textbf{I}) +
\boldsymbol{\sigma}' \cdot \boldsymbol{\Omega}^{T} +
\boldsymbol{\Omega} \cdot \boldsymbol{\sigma}'
where,
.. math::
\boldsymbol{\Omega_{i/j}} = \frac{1}{2}
\left(\nabla \otimes \boldsymbol{v}_{i/j} -
(\nabla \otimes \boldsymbol{v}_{i/j})^{T}\right)
.. math::
\boldsymbol{\epsilon_{i/j}} = \frac{1}{2}
\left(\nabla \otimes \boldsymbol{v}_{i/j} +
(\nabla \otimes \boldsymbol{v}_{i/j})^{T}\right)
see the class VelocityGradient for :math:`\nabla \otimes \boldsymbol{v}_i`
"""
def __init__(self, dest, sources, dim, G):
r"""
Parameters
----------
dim : int
Dimensionality of the problem.
G : float
value of shear modulus
"""
self.G = G
self.dim = dim
super(DeviatoricStressRate, self).__init__(dest, sources)
def _get_helpers_(self):
return [mat_vec_mult, mat_mult]
def initialize(self, d_idx, d_sigma, d_asigma, d_gradvhat):
i, j, ind = declare('int', 3)
eps, omega, omegaT, sigmai, dvi = declare('matrix(9)', 5)
G = self.G
for i in range(9):
sigmai[i] = d_sigma[d_idx*9 + i]
dvi[i] = d_gradvhat[d_idx*9 + i]
d_asigma[d_idx*9 + i] = 0.0
eps_trace = 0.0
for i in range(3):
for j in range(3):
eps[3*i + j] = 0.5*(dvi[3*i + j] + dvi[3*j + i])
omega[3*i + j] = 0.5*(dvi[3*i + j] - dvi[3*j + i])
if i == j:
eps_trace += eps[3*i + j]
for i in range(3):
for j in range(3):
omegaT[3*j + i] = omega[3*i + j]
smo, oms = declare('matrix(9)', 2)
mat_mult(sigmai, omegaT, 3, smo)
mat_mult(omega, sigmai, 3, oms)
for i in range(3):
for j in range(3):
ind = 3*i + j
d_asigma[d_idx*9 + ind] = 2*G * eps[ind] + smo[ind] + oms[ind]
if i == j:
d_asigma[d_idx*9 + ind] += -2*G * eps_trace/3.0
class MomentumEquationArtificialStressSolid(Equation):
r"""**Momentum equation Artificial stress for solids**
See the class MomentumEquationPressureGradient for details.
"""
def __init__(self, dest, sources, dim):
r"""
Parameters
----------
dim : int
Dimensionality of the problem.
"""
self.dim = dim
super(MomentumEquationArtificialStressSolid, self).__init__(dest,
sources)
def _get_helpers_(self):
return [mat_vec_mult]
def loop(self, d_idx, s_idx, d_sigma, s_sigma, d_au, d_av, d_aw, s_m,
DWIJ):
i = declare('int')
sigmaij = declare('matrix(9)')
res = declare('matrix(3)')
for i in range(9):
sigmaij[i] = d_sigma[d_idx*9 + i] + s_sigma[s_idx*9 + i]
mat_vec_mult(sigmaij, DWIJ, 3, res)
d_au[d_idx] += s_m[s_idx] * res[0]
d_av[d_idx] += s_m[s_idx] * res[1]
d_aw[d_idx] += s_m[s_idx] * res[2]
class GTVFScheme(Scheme):
def __init__(self, fluids, solids, dim, rho0, c0, nu, h0, pref,
gx=0.0, gy=0.0, gz=0.0, b=1.0, alpha=0.0):
r"""Parameters
----------
fluids: list
List of names of fluid particle arrays.
solids: list
List of names of solid particle arrays.
dim: int
Dimensionality of the problem.
rho0: float
Reference density.
c0: float
Reference speed of sound.
nu: float
Real viscosity of the fluid.
h0: float
Reference smoothing length.
pref: float
reference pressure for rate of change of transport velocity.
gx: float
Body force acceleration components in x direction.
gy: float
Body force acceleration components in y direction.
gz: float
Body force acceleration components in z direction.
b: float
constant for the equation of state.
"""
self.fluids = fluids
self.solids = solids
self.dim = dim
self.rho0 = rho0
self.c0 = c0
self.nu = nu
self.h0 = h0
self.pref = pref
self.gx = gx
self.gy = gy
self.gz = gz
self.b = b
self.alpha = alpha
self.solver = None
def configure_solver(self, kernel=None, integrator_cls=None,
extra_steppers=None, **kw):
"""Configure the solver to be generated.
Parameters
----------
kernel : Kernel instance.
Kernel to use, if none is passed a default one is used.
integrator_cls : pysph.sph.integrator.Integrator
Integrator class to use, use sensible default if none is
passed.
extra_steppers : dict
Additional integration stepper instances as a dict.
**kw : extra arguments
Any additional keyword args are passed to the solver instance.
"""
from pysph.base.kernels import WendlandQuintic
if kernel is None:
kernel = WendlandQuintic(dim=self.dim)
steppers = {}
if extra_steppers is not None:
steppers.update(extra_steppers)
step_cls = GTVFStep
for fluid in self.fluids:
if fluid not in steppers:
steppers[fluid] = step_cls()
if integrator_cls is not None:
cls = integrator_cls
print("Warning: GTVF Integrator is not being used.")
else:
cls = GTVFIntegrator
integrator = cls(**steppers)
from pysph.solver.solver import Solver
self.solver = Solver(
dim=self.dim, integrator=integrator, kernel=kernel, **kw
)
def get_equations(self):
from pysph.sph.wc.transport_velocity import (
StateEquation, SetWallVelocity, SolidWallPressureBC,
VolumeSummation, SolidWallNoSlipBC,
MomentumEquationArtificialViscosity, ContinuitySolid
)
all = self.fluids + self.solids
stage1 = []
if self.solids:
eq0 = []
for solid in self.solids:
eq0.append(SetWallVelocity(dest=solid, sources=self.fluids))
stage1.append(Group(equations=eq0, real=False))
eq1 = []
for fluid in self.fluids:
eq1.append(ContinuityEquationGTVF(dest=fluid, sources=self.fluids))
if self.solids:
eq1.append(
ContinuitySolid(dest=fluid, sources=self.solids)
)
stage1.append(Group(equations=eq1, real=False))
eq2, stage2 = [], []
for fluid in self.fluids:
eq2.append(CorrectDensity(dest=fluid, sources=all))
stage2.append(Group(equations=eq2, real=False))
eq3 = []
for fluid in self.fluids:
eq3.append(
StateEquation(dest=fluid, sources=None, p0=self.pref,
rho0=self.rho0, b=1.0)
)
stage2.append(Group(equations=eq3, real=False))
g2_s = []
for solid in self.solids:
g2_s.append(VolumeSummation(dest=solid, sources=all))
g2_s.append(SolidWallPressureBC(
dest=solid, sources=self.fluids, b=1.0, rho0=self.rho0,
p0=self.pref, gx=self.gx, gy=self.gy, gz=self.gz
))
if g2_s:
stage2.append(Group(equations=g2_s, real=False))
eq4 = []
for fluid in self.fluids:
eq4.append(
MomentumEquationPressureGradient(
dest=fluid, sources=all, pref=self.pref,
gx=self.gx, gy=self.gy, gz=self.gz
))
if self.alpha > 0.0:
eq4.append(
MomentumEquationArtificialViscosity(
dest=fluid, sources=all, c0=self.c0,
alpha=self.alpha
))
if self.nu > 0.0:
eq4.append(
MomentumEquationViscosity(
dest=fluid, sources=all, nu=self.nu
))
if self.solids:
eq4.append(
SolidWallNoSlipBC(
dest=fluid, sources=self.solids, nu=self.nu
))
eq4.append(
MomentumEquationArtificialStress(
dest=fluid, sources=self.fluids, dim=self.dim
))
stage2.append(Group(equations=eq4, real=True))
return MultiStageEquations([stage1, stage2])
def setup_properties(self, particles, clean=True):
particle_arrays = dict([(p.name, p) for p in particles])
dummy = get_particle_array_gtvf(name='junk')
props = list(dummy.properties.keys())
props += [dict(name=p, stride=v) for p, v in dummy.stride.items()]
output_props = dummy.output_property_arrays
for fluid in self.fluids:
pa = particle_arrays[fluid]
self._ensure_properties(pa, props, clean)
pa.set_output_arrays(output_props)
solid_props = ['uf', 'vf', 'wf', 'vg', 'ug', 'wij', 'wg', 'V']
props += solid_props
for solid in self.solids:
pa = particle_arrays[solid]
self._ensure_properties(pa, props, clean)
pa.set_output_arrays(output_props)
| 31.777941 | 79 | 0.547318 | 3,047 | 21,609 | 3.674762 | 0.120446 | 0.040011 | 0.012503 | 0.006966 | 0.384389 | 0.293204 | 0.237474 | 0.223989 | 0.175404 | 0.170224 | 0 | 0.022886 | 0.318571 | 21,609 | 679 | 80 | 31.824742 | 0.737521 | 0.25221 | 0 | 0.229947 | 0 | 0 | 0.01611 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.082888 | false | 0 | 0.029412 | 0.008021 | 0.15508 | 0.002674 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea9bf95a96922eb0043225c8a1b649cee28193d7 | 6,343 | py | Python | src/comp/amazonScraper.py | Dosclic98/PerProduct_AmazonPriceTracker | 3b6af36986cb2f039e0d7690d034a12dc520c922 | [
"Apache-2.0"
] | null | null | null | src/comp/amazonScraper.py | Dosclic98/PerProduct_AmazonPriceTracker | 3b6af36986cb2f039e0d7690d034a12dc520c922 | [
"Apache-2.0"
] | null | null | null | src/comp/amazonScraper.py | Dosclic98/PerProduct_AmazonPriceTracker | 3b6af36986cb2f039e0d7690d034a12dc520c922 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
import json
from src.model.amazonItem import AmazonItem
import requests
import urllib.parse
from bs4 import BeautifulSoup
from price_parser import Price
from decimal import Decimal
import random
import re
import time
# TODO Add an header randomizer to generate a wider variety of headers
class AmazonScraper:
baseUrlRedirect = "https://www.amazon.it"
baseUrl = baseUrlRedirect + "/"
searchUrlPart = "s?"
searchKey = "k"
pageParam = "&ref=sr_pg_"
headers = {
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0",
"Accept-Encoding":"gzip, deflate",
"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"DNT":"1",
"Connection":"close",
"Upgrade-Insecure-Requests":"1",
"Content-Type": "text/html"
}
affInfoPathFile = "./data/aff/affInfo.json"
def searchItemsMultipleQuery(self, queryStrList, debug = False):
setToRet = set()
# Randomizing order of queryes
random.shuffle(queryStrList)
for queryStr in queryStrList:
foundItems = self.searchItems(queryStr, debug=debug)
setToRet = setToRet.union(foundItems)
# Randomizer to randomly separate requests in time
time.sleep(random.randint(4,8))
return setToRet
def searchItemsMultiplePage(self, queryStr, maxPage = 1, debug = False):
setToRet = set()
for numPage in range(1, maxPage + 1):
foundItems = self.searchItems(queryStr, page=numPage, debug=debug)
setToRet = setToRet.union(foundItems)
# Randomizer to randomly separate requests in time
time.sleep(random.randint(4,8))
return setToRet
def searchItems(self, queryStr, page = 1, debug = False):
if page < 1:
raise ValueError("Parameter page must be greater than 0")
foundItems = set()
complUrl = self.baseUrl + self.searchUrlPart + self.searchKey + "=" + urllib.parse.quote_plus(queryStr) + self.pageParam + str(page)
r = requests.get(complUrl, headers=self.headers)
soup = BeautifulSoup(r.content, features="html5lib")
for div in soup.findAll("div", {"data-component-type":"s-search-result"}):
foundItems.add(self.initItemFromDiv(div, debug=debug))
return foundItems
def genAffLink(self, linkTitle):
if linkTitle["href"] == None:
return None
else:
fp = open(self.affInfoPathFile)
affTag = json.load(fp)
fp.close()
objLinkAsUrl = list(urllib.parse.urlparse(self.baseUrlRedirect + linkTitle["href"]))
query = dict(urllib.parse.parse_qsl(objLinkAsUrl[4]))
query.update(affTag)
objLinkAsUrl[4] = urllib.parse.urlencode(query)
finalLink = None
try:
link = query["url"]
linkAsUrl = list(urllib.parse.urlparse(self.baseUrlRedirect + link))
realQuery = dict(urllib.parse.parse_qsl(linkAsUrl[4]))
realQuery.update(affTag)
linkAsUrl[4] = urllib.parse.urlencode(realQuery)
finalLink = urllib.parse.urlunparse(linkAsUrl)
except KeyError:
finalLink = urllib.parse.urlunparse(objLinkAsUrl)
return finalLink
def initItemFromDiv(self, div, debug=False):
id = div["data-asin"]
imgLink = div.findAll("img")[0]["src"]
linkTitle = div.findAll("a", {"class":"a-link-normal a-text-normal"})[0]
objLink = self.genAffLink(linkTitle)
title = linkTitle.findAll("span", {"class":"a-size-base-plus a-color-base a-text-normal"})[0].get_text()
starsCont = div.findAll("span", {"class":"a-icon-alt"})
stars = self.parseStarStr(None
if len(starsCont) == 0
else div.findAll("span", {"class":"a-icon-alt"})[0].get_text())
priceVal = div.findAll("span", {"class":"a-price-whole"})
priceSymbol = div.findAll("span", {"class":"a-price-symbol"})
price = None if len(priceVal) == 0 or len(priceSymbol) == 0 else Price.fromstring(priceVal[0].get_text() + priceSymbol[0].get_text())
usedPriceParent = div.findAll("div", {"class":"a-row a-size-base a-color-secondary"})
if len(usedPriceParent) != 0:
usedPriceCont = usedPriceParent[0].findAll("span", {"class":"a-color-base", "dir":"auto"})
usedPrice = Price.fromstring(usedPriceCont[0].get_text()) if len(usedPriceCont) != 0 else None
else:
usedPrice = None
discRegex = re.compile("[\d]+")
discList = div.findAll("span", {"class":"s-coupon-highlight-color"})
discount = None if len(discList) == 0 else int(discRegex.findall(discList[0].get_text())[0])
isPrime = False if len(div.findAll("i", {"class":"a-icon a-icon-prime a-icon-medium"})) == 0 else True
# Added for some edge cases where the used price gets read with "None" amount or currency
if(usedPrice != None and (usedPrice.amount_float == None or usedPrice.currency == None)): usedPrice = None
# Debug Tests
if debug:
print("Id: " + id)
print("ImgLink: " + imgLink)
print("ObjLink: " + objLink)
print("Title: " + title)
print("RevStar: " + str(stars))
if(price != None):
print("Price: " + price.amount_text + " " + price.currency)
else:
print("Price: Unknown")
if(usedPrice != None):
print("usedPrice: " + str(usedPrice.amount_text) + " " + str(usedPrice.currency))
print("Discount: " + str(discount))
print("IsPrime: " + str(isPrime))
print(" ")
return AmazonItem(id, title, stars, objLink, imgLink, price, usedPrice, discount, isPrime)
def parseStarStr(self, strStar):
if strStar == None: return None
else:
starSplitted = strStar.split(sep=" ", maxsplit=1)
return None if len(starSplitted) == 0 else float(starSplitted[0].replace(",", "."))
| 42.286667 | 141 | 0.589626 | 699 | 6,343 | 5.329041 | 0.331903 | 0.02953 | 0.030067 | 0.027383 | 0.129396 | 0.117047 | 0.081074 | 0.066577 | 0.066577 | 0.066577 | 0 | 0.014433 | 0.279048 | 6,343 | 150 | 142 | 42.286667 | 0.800131 | 0.049346 | 0 | 0.101695 | 0 | 0.016949 | 0.137782 | 0.02241 | 0 | 0 | 0 | 0.006667 | 0 | 1 | 0.050847 | false | 0 | 0.084746 | 0 | 0.262712 | 0.09322 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea9ccfc417e3905564b91109195f3ac5eabb3b37 | 5,450 | py | Python | 2019/day_16/solution.py | dandiez/AdventOfCode | 99ebe6991964290ede87b144c8692c8f6b31030d | [
"MIT"
] | null | null | null | 2019/day_16/solution.py | dandiez/AdventOfCode | 99ebe6991964290ede87b144c8692c8f6b31030d | [
"MIT"
] | null | null | null | 2019/day_16/solution.py | dandiez/AdventOfCode | 99ebe6991964290ede87b144c8692c8f6b31030d | [
"MIT"
] | null | null | null | import itertools
from typing import List
from unittest import TestCase
import numpy as np
def main(input_file):
"""Solve puzzle and connect part 1 with part 2 if needed."""
# part 1
inp = read_input(input_file)
p1 = part_1(inp)
print(f"Solution to part 1: {p1}")
# part 2
inp = read_input(input_file)
p2 = part_2(inp)
print(f"Solution to part 2: {p2}")
return p1, p2
def read_input(filename="input"):
with open(filename) as f:
lines = [line.strip() for line in f.readlines() if line.strip()]
inp = [int(val) for val in lines[0]] # parse here...
return inp
def generate_base_cyclic_pattern_for_row(row):
base_pattern = (0, 1, 0, -1)
repeat_times = row + 1
for value in itertools.cycle(base_pattern):
for _ in range(repeat_times):
yield value
def generate_repeating_pattern(row, length):
base = generate_base_cyclic_pattern_for_row(row)
next(base) # consume first element
value = next(base)
for _ in range(length):
yield value
value = next(base)
def get_unit_digit(number):
return abs(number) % 10
def make_single_digit_array(signal_array):
single_digit_signal = np.array([get_unit_digit(num) for num in signal_array])
return single_digit_signal
def part_1(inp, num_phases=100):
signal = np.array(inp)
length = len(signal)
pattern_matrix = np.stack(
[
np.fromiter(generate_repeating_pattern(row, length), "int8", length)
for row in range(length)
]
)
for _ in range(num_phases):
output_signal = np.matmul(signal, pattern_matrix.T)
output_signal_single_digit = make_single_digit_array(output_signal)
signal = output_signal_single_digit
return "".join([str(n) for n in signal[0:8]])
def full_signal_iter(inp, repetitions):
for _ in range(repetitions):
yield from inp
def part_2_with_trick(inp, repetitions=10000, num_phases=100):
offset = int("".join([str(v) for v in inp[0:7]]))
# print(f"offset is {offset}")
signal = list(full_signal_iter(inp, repetitions))
length = len(signal)
if not offset > length / 2:
raise RuntimeError("cannot use the trick")
print("Can use the trick, but still can take a little while...")
signal = signal[offset:]
length = len(signal)
for n in range(num_phases):
# print(f"phase {n}")
output_list = [sum(signal)]
for digit, value in zip(range(1, length), signal):
digit_sum = output_list[-1] - value # would be faster to start from the end...
output_list.append(digit_sum)
signal = [abs(v) % 10 for v in output_list]
return "".join([str(n) for n in signal[0:8]])
def generate_output_signal_array_mult_single(signal: List, length, offset):
signal_array = np.array(signal)
output_signal = list()
for digit_number in range(length):
if digit_number % 100 == 0:
pass
# print(f"Calculating digit number {digit_number}")
factors = np.fromiter(
generate_repeating_pattern(digit_number, length), "int8", length
)
scalar_product = np.dot(signal_array, factors)
output_signal.append(abs(scalar_product) % 10)
return output_signal
def part_1_alt(inp, num_phases=100):
return part_2(inp, num_phases=num_phases, repetitions=1, is_part_1=True)
def part_2(inp, num_phases=100, repetitions=10000, is_part_1=False):
if not is_part_1:
try:
return part_2_with_trick(
inp, repetitions=repetitions, num_phases=num_phases
)
except RuntimeError:
pass
offset = int("".join([str(v) for v in inp[0:7]]))
# print(f"offset is {offset}")
signal = list(full_signal_iter(inp, repetitions))
length = len(signal)
for n in range(num_phases):
# print(f"phase {n}")
output_signal = generate_output_signal_array_mult_single(signal, length, offset)
signal = output_signal
if is_part_1:
return "".join([str(n) for n in signal[0:8]])
return "".join([str(n) for n in signal[0:8]])
def test_sample_0(self):
inp = [int(v) for v in "12345678"]
expected = "01029498"
self.assertEqual(expected, part_1(inp, num_phases=4))
self.assertEqual(expected, part_1_alt(inp, num_phases=4))
def test_sample_1(self):
inp = [int(v) for v in "80871224585914546619083218645595"]
expected = "24176176"
self.assertEqual(expected, part_1(inp))
self.assertEqual(expected, part_1_alt(inp))
def test_sample_2(self):
inp = [int(v) for v in "19617804207202209144916044189917"]
expected = "73745418"
self.assertEqual(expected, part_1(inp))
self.assertEqual(expected, part_1_alt(inp))
def test_sample_3(self):
inp = [int(v) for v in "69317163492948606335995924319873"]
expected = "52432133"
self.assertEqual(expected, part_1(inp))
self.assertEqual(expected, part_1_alt(inp))
def test_sample_4(self):
print("Part 2 test...")
inp = [int(v) for v in "03036732577212944063491565474664"]
expected = "84462026"
self.assertEqual(expected, part_2(inp))
if __name__ == "__main__":
print("*** solving tests ***")
test_sample_0(TestCase())
test_sample_1(TestCase())
test_sample_2(TestCase())
test_sample_3(TestCase())
test_sample_4(TestCase())
print("*** solving main ***")
main("input")
| 30.110497 | 90 | 0.656881 | 773 | 5,450 | 4.416559 | 0.195343 | 0.026362 | 0.060633 | 0.071178 | 0.385179 | 0.31488 | 0.26362 | 0.189807 | 0.189807 | 0.189807 | 0 | 0.066002 | 0.227156 | 5,450 | 180 | 91 | 30.277778 | 0.744539 | 0.053945 | 0 | 0.210526 | 0 | 0 | 0.073916 | 0.024898 | 0 | 0 | 0 | 0 | 0.067669 | 1 | 0.12782 | false | 0.015038 | 0.030075 | 0.015038 | 0.240602 | 0.045113 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea9d1db2e30cb44954b76901a89bdae58e810ab3 | 2,666 | py | Python | validators/hashes.py | rure6748/validators | a6b0320dd52de524ae83705c96cb64bcbb1146f1 | [
"MIT"
] | null | null | null | validators/hashes.py | rure6748/validators | a6b0320dd52de524ae83705c96cb64bcbb1146f1 | [
"MIT"
] | null | null | null | validators/hashes.py | rure6748/validators | a6b0320dd52de524ae83705c96cb64bcbb1146f1 | [
"MIT"
] | null | null | null | from typing import Pattern
import re
from .utils import validator
md5_regex: Pattern = re.compile(
r"^[0-9a-f]{32}$",
re.IGNORECASE
)
sha1_regex: Pattern = re.compile(
r"^[0-9a-f]{40}$",
re.IGNORECASE
)
sha224_regex: Pattern = re.compile(
r"^[0-9a-f]{56}$",
re.IGNORECASE
)
sha256_regex: Pattern = re.compile(
r"^[0-9a-f]{64}$",
re.IGNORECASE
)
sha512_regex: Pattern = re.compile(
r"^[0-9a-f]{128}$",
re.IGNORECASE
)
@validator
def md5(value: str) -> bool:
"""
Return whether or not given value is a valid MD5 hash.
Examples::
>>> md5('d41d8cd98f00b204e9800998ecf8427e')
True
>>> md5('900zz11')
ValidationFailure(func=md5, args={'value': '900zz11'})
:param value: MD5 string to validate
"""
return bool(md5_regex.match(value))
@validator
def sha1(value: str) -> bool:
"""
Return whether or not given value is a valid SHA1 hash.
Examples::
>>> sha1('da39a3ee5e6b4b0d3255bfef95601890afd80709')
True
>>> sha1('900zz11')
ValidationFailure(func=sha1, args={'value': '900zz11'})
:param value: SHA1 string to validate
"""
return bool(sha1_regex.match(value))
@validator
def sha224(value: str) -> bool:
"""
Return whether or not given value is a valid SHA224 hash.
Examples::
>>> sha224('d14a028c2a3a2bc9476102bb288234c415a2b01f828ea62ac5b3e42f')
True
>>> sha224('900zz11')
ValidationFailure(func=sha224, args={'value': '900zz11'})
:param value: SHA224 string to validate
"""
return bool(sha224_regex.match(value))
@validator
def sha256(value: str) -> bool:
"""
Return whether or not given value is a valid SHA256 hash.
Examples::
>>> sha256(
... 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b'
... '855'
... )
True
>>> sha256('900zz11')
ValidationFailure(func=sha256, args={'value': '900zz11'})
:param value: SHA256 string to validate
"""
return bool(sha256_regex.match(value))
@validator
def sha512(value: str) -> bool:
"""
Return whether or not given value is a valid SHA512 hash.
Examples::
>>> sha512(
... 'cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce'
... '9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af9'
... '27da3e'
... )
True
>>> sha512('900zz11')
ValidationFailure(func=sha512, args={'value': '900zz11'})
:param value: SHA512 string to validate
"""
return bool(sha512_regex.match(value))
| 21.674797 | 79 | 0.615529 | 278 | 2,666 | 5.866906 | 0.201439 | 0.036787 | 0.042918 | 0.064378 | 0.452483 | 0.226855 | 0.226855 | 0.226855 | 0.147149 | 0.147149 | 0 | 0.182091 | 0.250188 | 2,666 | 122 | 80 | 21.852459 | 0.633817 | 0.573143 | 0 | 0.263158 | 0 | 0 | 0.077681 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.131579 | false | 0 | 0.078947 | 0 | 0.342105 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea9d34027cc05d5e99f35deb3bbc39d0c8fb36e4 | 5,323 | py | Python | aiida_kkr/parsers/voro.py | broeder-j/aiida-kkr | fe7f39aa8f1396e02c0eb51c1cd2a7dc050620d6 | [
"MIT"
] | 2 | 2017-11-09T10:21:43.000Z | 2017-11-09T18:42:05.000Z | aiida_kkr/parsers/voro.py | broeder-j/aiida-kkr | fe7f39aa8f1396e02c0eb51c1cd2a7dc050620d6 | [
"MIT"
] | 8 | 2018-07-19T12:33:28.000Z | 2018-10-18T10:02:32.000Z | aiida_kkr/parsers/voro.py | broeder-j/aiida-kkr | fe7f39aa8f1396e02c0eb51c1cd2a7dc050620d6 | [
"MIT"
] | null | null | null | #-*- coding: utf-8 -*-
from aiida.parsers.parser import Parser
from aiida.orm.data.parameter import ParameterData
from aiida_kkr.calculations.voro import VoronoiCalculation
from aiida.common.exceptions import InputValidationError
from aiida_kkr.tools.voroparser_functions import parse_voronoi_output
__copyright__ = (u"Copyright (c), 2017, Forschungszentrum Jülich GmbH, "
"IAS-1/PGI-1, Germany. All rights reserved.")
__license__ = "MIT license, see LICENSE.txt file"
__version__ = "0.3"
__contributors__ = ("Jens Broeder", "Philipp Rüßmann")
class VoronoiParser(Parser):
"""
Parser class for parsing output of voronoi code..
"""
def __init__(self, calc):
"""
Initialize the instance of Voronoi_Parser
"""
# check for valid input
if not isinstance(calc, VoronoiCalculation):
raise InputValidationError("Input calc must be a Voronoi Calculation")
# these files should be present after success of voronoi
self._default_files = {'outfile': calc._OUTPUT_FILE_NAME,
'atominfo': calc._ATOMINFO,
'radii': calc._RADII}
self._ParserVersion = __version__
#reuse init of base class
super(VoronoiParser, self).__init__(calc)
# pylint: disable=protected-access
def parse_with_retrieved(self, retrieved):
"""
Parse output data folder, store results in database.
:param retrieved: a dictionary of retrieved nodes, where
the key is the link name
:returns: a tuple with two values ``(bool, node_list)``,
where:
* ``bool``: variable to tell if the parsing succeeded
* ``node_list``: list of new nodes to be stored in the db
(as a list of tuples ``(link_name, node)``)
"""
success = False
node_list = ()
# Check that the retrieved folder is there
try:
out_folder = retrieved[self._calc._get_linkname_retrieved()]
except KeyError:
self.logger.error("No retrieved folder found")
return success, node_list
# check what is inside the folder
list_of_files = out_folder.get_folder_list()
# we need at least the output file name as defined in calcs.py
if self._calc._OUTPUT_FILE_NAME not in list_of_files:
self.logger.error("Output file '{}' not found".format(self._calc._OUTPUT_FILE_NAME))
return success, node_list
#Parse voronoi output, results that are stored in database are in out_dict
# get path to files and catch errors if files are not present
file_errors = []
try:
potfile = out_folder.get_abs_path(self._calc._OUT_POTENTIAL_voronoi)
except OSError:
# cover case where potfile is overwritten from input to voronoi calculation
try:
potfile = out_folder.get_abs_path(self._calc._POTENTIAL_IN_OVERWRITE)
except OSError:
file_errors.append("Critical error! Neither potfile {} not {} "
"was found".format(self._calc._OUT_POTENTIAL_voronoi,
self._calc._POTENTIAL_IN_OVERWRITE))
potfile = 'file_not_found'
try:
outfile = out_folder.get_abs_path(self._calc._OUTPUT_FILE_NAME)
except OSError:
file_errors.append("Critical error! outfile not found {}".format(self._calc._OUTPUT_FILE_NAME))
outfile = 'file_not_found'
try:
atominfo = out_folder.get_abs_path(self._calc._ATOMINFO)
except OSError:
file_errors.append("Critical error! atominfo not found {}".format(self._calc._ATOMINFO))
atominfo = 'file_not_found'
try:
radii = out_folder.get_abs_path(self._calc._RADII)
except OSError:
file_errors.append("Critical error! radii not found {}".format(self._calc._RADII))
radii = 'file_not_found'
try:
inputfile = out_folder.get_abs_path(self._calc._INPUT_FILE_NAME)
except OSError:
file_errors.append("Critical error! inputfile not found {}".format(self._calc._INPUT_FILE_NAME))
inputfile = 'file_not_found'
# initialize out_dict and parse output files
out_dict = {'parser_version': self._ParserVersion}
out_dict['calculation_plugin_version'] = self._calc._CALCULATION_PLUGIN_VERSION
#TODO add job description, compound name, calculation title
success, msg_list, out_dict = parse_voronoi_output(out_dict, outfile,
potfile, atominfo,
radii, inputfile)
# add file open errors to parser output of error messages
for f_err in file_errors:
msg_list.append(f_err)
out_dict['parser_errors'] = msg_list
#create output node and link
output_data = ParameterData(dict=out_dict)
link_name = self.get_linkname_outparams()
node_list = [(link_name, output_data)]
return success, node_list
| 41.585938 | 108 | 0.618636 | 614 | 5,323 | 5.074919 | 0.296417 | 0.043646 | 0.026958 | 0.036585 | 0.220154 | 0.154044 | 0.154044 | 0.078947 | 0.055841 | 0 | 0 | 0.002432 | 0.304903 | 5,323 | 128 | 109 | 41.585938 | 0.83973 | 0.214541 | 0 | 0.216216 | 0 | 0 | 0.145185 | 0.00642 | 0 | 0 | 0 | 0.007813 | 0 | 1 | 0.027027 | false | 0 | 0.067568 | 0 | 0.148649 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea9ea90fc928518afbf97800ce453da8afdcd777 | 11,579 | py | Python | insaatWeb/views_ek/satis_view.py | Mustafaseyhanhh/django-insaatdb | b74c02a327313db465f02842e19316d623d24681 | [
"Apache-2.0"
] | null | null | null | insaatWeb/views_ek/satis_view.py | Mustafaseyhanhh/django-insaatdb | b74c02a327313db465f02842e19316d623d24681 | [
"Apache-2.0"
] | null | null | null | insaatWeb/views_ek/satis_view.py | Mustafaseyhanhh/django-insaatdb | b74c02a327313db465f02842e19316d623d24681 | [
"Apache-2.0"
] | null | null | null | from django.shortcuts import render
from insaatWeb import models
from django.http import HttpResponse,HttpResponseRedirect
from django.shortcuts import render
from django.contrib import messages
from datetime import datetime,date
import random,os,filetype
from insaatWeb.models import Projeler,Bloklar,Daireler,Satis,Ariza_Takip,Tapu_Satis
def satis_projeler(request):
request.session["menu1"]=1
projeler=models.Projeler.objects.all()
daires=[]
bloks=[]
for i in projeler:
bloks.append(len(models.Bloklar.objects.filter(projeid=i)))
daires.append(len(models.Daireler.objects.filter(blokid__projeid=i)))
projes=zip(projeler,bloks,daires)
if not projeler:
messages.info(request, 'Henüz proje oluşturulmadı.')
return render(request,'satis_projeler.html',
context={
"projeler":projeler,
"projes":projes
}, # num_visits appended
)
def satis_projeler_ekle_fuc(request):
if request.POST:
print (request.POST)
pname=request.POST["pname"]
new=Projeler(adi=pname)
new.save()
messages.success(request, '{} projesi başarı ile eklendi.'.format(pname))
else:
messages.error(request, 'Hatalı url girildi')
return HttpResponseRedirect('/satis/projeler')
def satis_projeler_duzenle_fuc(request):
if request.POST:
print (request.POST.keys())
pname=request.POST["pname"]
pid=request.POST["id"]
if "btduzenle" in request.POST.keys():
new=models.Projeler.objects.filter(id=int(pid))[0]
lastname=new.adi
new.adi=pname
new.save()
messages.success(request, '{} projesi {} olarak değiştirildi .'.format(lastname,pname))
elif "btsil" in request.POST.keys():
satis_projeler_sil_fuc(request)
else:
messages.error(request, 'Hatalı url girildi')
return HttpResponseRedirect('/satis/projeler')
def satis_projeler_sil_fuc(request):
pid=int(request.POST["id"])
pname=request.POST["pname"]
print (pid)
new=models.Projeler.objects.filter(id=pid)[0]
new.delete()
messages.warning(request, '{} projesi silindi .'.format(pname))
return HttpResponseRedirect('/satis/projeler')
def satis_projeler_detay(request,pid,num):
pidblokdaire=[]
pid=models.Projeler.objects.filter(id=pid)[0]
pidblok=models.Bloklar.objects.filter(projeid=pid)
for i in pidblok:
if models.v_satis.objects.filter(projeid=pid.id,blokid=i.id):
pidblokdaire.append(models.v_satis.objects.filter(projeid=pid.id,blokid=i.id).order_by("-daireno"))
print (pidblokdaire)
return render(request,'satis_projeler_detay.html',
context={
"num":num,
"pid":pid,
"pidblok":pidblok,
"pidblokdaire":pidblokdaire
}, # num_visits appended
)
def satis_projeler_blok_ekle(request,pid):
pid=models.Projeler.objects.filter(id=pid)[0]
return render(request,'blok_ekle.html',
context={
"pid":pid
}, # num_visits appended
)
def satis_projeler_blok_sil_fuc(request,pid,blokid):
proje=models.Projeler.objects.filter(id=pid)[0]
blok=models.Bloklar.objects.filter(id=blokid,projeid=proje)[0]
messages.warning(request, '{} isimli blok silindi'.format(blok.adi))
blok.delete()
return HttpResponseRedirect('/satis/projeler/'+str(pid)+'/0')
def satis_projeler_blok_ekle_fuc(request,pid):
if request.POST["metrekare"].isdigit():
new=Bloklar(
projeid = models.Projeler.objects.filter(id=pid)[0],
adi = request.POST["blokadi"],
metrekare = int(request.POST["metrekare"]),
odasayisi = request.POST["odasayisi"],
katsayisi = int(request.POST["katsayisi"]),
birkattakidairesayisi = int(request.POST["birkattakidairesayisi"]),
blokkenarsol = request.POST["blokKenarSol"],
blokkenarsag = request.POST["blokKenarSag"],
blokkenaryukari = request.POST["blokKenarYukari"],
blokkenarasagi = request.POST["blokKenarAsagi"])
new.save()
genelfiyat=request.POST["genelfiyat"]
if genelfiyat:
oto_daire(new,genelfiyat)
else:
oto_daire(new,0)
messages.success(request, '{} isimli blok eklendi'.format(request.POST["blokadi"]))
return HttpResponseRedirect('/satis/projeler/'+str(pid)+'/'+str(new.id))
else:
messages.error(request, 'Yeni blok eklenemedi girdiğiniz değerleri lütfen kontrol ediniz')
return HttpResponseRedirect('/satis/projeler/blokekle/'+str(pid))
def satis_projeler_blok_duzenle(request,pid,blokid):
proje=models.Projeler.objects.filter(id=pid)[0]
blok=models.Bloklar.objects.filter(projeid=proje,id=blokid)[0]
return render(request,'blok_duzenle.html',
context={
"blok":blok,
"proje":proje
}, # num_visits appended
)
def satis_projeler_blok_duzenle_fuc(request,pid,blokid):
blokid=blokid
pid=pid
blok=models.Bloklar.objects.filter(id=blokid)[0]
if request.POST["metrekare"].isdigit():
blok.adi = request.POST["blokadi"]
blok.metrekare = int(request.POST["metrekare"])
blok.odasayisi = request.POST["odasayisi"]
blok.katsayisi = int(request.POST["katsayisi"])
blok.birkattakidairesayisi = int(request.POST["birkattakidairesayisi"])
blok.blokkenarsol = request.POST["blokKenarSol"]
blok.blokkenarsag = request.POST["blokKenarSag"]
blok.blokkenaryukari = request.POST["blokKenarYukari"]
blok.blokkenarasagi = request.POST["blokKenarAsagi"]
messages.success(request, '{} isimli blok kaydedildi'.format(request.POST["blokadi"]))
blok.save()
else:
messages.error(request, '{} isimli blok kaydedilemedi. Girdiğiniz yeni bilgileri kontrol ediniz'.format(request.POST["blokadi"]))
return HttpResponseRedirect('/satis/projeler/duzenle/'+str(pid)+'/'+str(blokid))
return HttpResponseRedirect('/satis/projeler/'+str(pid)+'/'+str(blokid))
def oto_daire(new,genelfiyat):
dairesayisi=new.birkattakidairesayisi*new.katsayisi
for i in range(dairesayisi):
print ("girildi")
newdaire=Daireler(
blokid=new,
no=i+1,
fiyat=int(str(genelfiyat).replace(".",""))
)
newdaire.save()
def satis_projeler_blok_daire_ekle_fuc(request,pid):
blokid=request.POST["hide"]
daireno=request.POST["daireno"]
fiyat=request.POST["fiyat"]
proje=models.Projeler.objects.filter(id=pid)[0]
blok=models.Bloklar.objects.filter(projeid=proje,id=blokid)[0]
if models.Daireler.objects.filter(blokid=blok,no=daireno):
messages.error(request, '{} Numaralı daire daha önceden oluşturulmuş'.format(daireno))
else:
try:
fiyat=int(fiyat.replace(".",""))
new=Daireler(blokid=blok,
no=daireno,
fiyat=fiyat)
messages.success(request, '{} numaralı daire eklendi'.format(daireno))
new.save()
except ValueError:
messages.error(request, 'Daire için gitmiş olduğunuz fiyat geçersiz')
return HttpResponseRedirect('/satis/projeler/'+str(pid)+"/"+blokid)
return HttpResponseRedirect('/satis/projeler/'+str(pid)+"/"+blokid)
def satis_projeler_daire_duzenle(request,pid,blokid,no):
proje=models.Projeler.objects.filter(id=pid)[0]
blok=models.Bloklar.objects.filter(id=blokid)[0]
daire=models.Daireler.objects.filter(blokid=blok,no=no)[0]
satis=models.Satis.objects.filter(daireid=daire)
record=models.Ariza_Takip.objects.filter(daireid=daire)
tapu=models.Tapu_Satis.objects.filter(daireid=daire)
yuzdelik=0
if satis:
satis=satis[0]
if daire.fiyat != 0:
yuzdelik=int(satis.odenen/(daire.fiyat/100))
else:
yuzdelik=100
return render(request,'daire_duzenle.html',
context={
"proje":proje,
"blok":blok,
"daire":daire,
"satis":satis,
"yuzdelik":yuzdelik,
"record":record,
"tapu":tapu
}, # num_visits appended
)
def satis_projeler_daire_duzenle_kaydet(request,pid,blokid,no):
print("*"*100)
print ("*"*50)
if request.FILES:
pname, ext = os.path.splitext(request.FILES["dosya"].name)
request.FILES["dosya"].name="sozlesme-"+str(pid)+"-"+str(blokid)+"-"+str(no)+ext
print(request.FILES["dosya"].name)
#handle_uploaded_file(request.FILES['dosya'], "yeni isim")
blok=models.Bloklar.objects.filter(id=blokid)[0]
daire=models.Daireler.objects.filter(blokid=blok,no=no)[0]
satis=models.Satis.objects.filter(daireid=daire)
if satis:
satis=satis[0]
if "odenen" in request.POST.keys():
if request.POST["odenen"].isdigit():
satis.odenen=request.POST["odenen"]
else:
messages.error(request, 'Ödenen miktarı sayısal olarak giriniz')
return HttpResponseRedirect('/satis/projeler/duzenle/'+str(pid)+"/"+str(blokid)+"/"+str(no))
else:
satis.odenen=-1
satis.tckimlik=request.POST["tckimlik"]
satis.adisoyadi=request.POST["adisoyadi"]
satis.telefon=request.POST["telefon"]
satis.hakkinda=request.POST["hakkinda"]
if request.FILES :
satis.sozlesme=request.FILES["dosya"]
satis.save()
else:
if "odenen" in request.POST.keys():
odenen=request.POST["odenen"].replace(".","")
if not odenen.isdigit():
messages.error(request, 'Ödenen miktarı sayısal olarak giriniz')
return HttpResponseRedirect('/satis/projeler/duzenle/'+str(pid)+"/"+str(blokid)+"/"+str(no))
else:
odenen=-1
newsatis=Satis(
daireid=daire,
odenen=odenen,
tckimlik=request.POST["tckimlik"],
adisoyadi=request.POST["adisoyadi"],
telefon=request.POST["telefon"],
hakkinda=request.POST["hakkinda"],
)
newsatis.save()
if request.FILES:
newsatis.sozlesme=request.FILES["dosya"]
newsatis.save()
messages.success(request, 'Kayıt başarı ile eklendi')
return HttpResponseRedirect('/satis/projeler/'+str(pid)+"/"+str(blokid))
def satis_projeler_daire_duzenle_satisiptal(request,pid,blokid,no):
v_satisiptal=models.v_satis.objects.filter(projeid=pid,blokid=blokid,daireno=no)[0]
satisiptal=models.Satis.objects.filter(daireid=v_satisiptal.daireid)[0]
messages.success(request, '{} kişisine yapılan satış iptal edildi.'.format(v_satisiptal.adisoyadi))
satisiptal.delete()
return HttpResponseRedirect('/satis/projeler/'+str(pid)+"/"+str(blokid))
def satis_projeler_daire_duzenle_sozlesme_sil(request,id):
satis=models.Satis.objects.filter(id=id)[0]
satis.sozlesme.delete()
messages.warning(request, 'Sözleşme başarı ile silindi.')
return HttpResponseRedirect('/satis/projeler/duzenle/'+ str(satis.daireid.blokid.projeid.id)+"/"+str(satis.daireid.blokid.id)+"/"+str(satis.daireid.no))
def handle_uploaded_file(file, filename):
MEDIA_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))+"/media/satis_sozlesmeleri/"
with open(MEDIA_DIR + filename, 'wb+') as destination:
for chunk in file.chunks():
destination.write(chunk) | 39.250847 | 156 | 0.652992 | 1,313 | 11,579 | 5.6885 | 0.145468 | 0.07511 | 0.032133 | 0.078324 | 0.455349 | 0.35172 | 0.304057 | 0.240594 | 0.185835 | 0.170036 | 0 | 0.004583 | 0.208481 | 11,579 | 295 | 157 | 39.250847 | 0.810366 | 0.013473 | 0 | 0.268199 | 0 | 0 | 0.139792 | 0.018744 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065134 | false | 0 | 0.030651 | 0 | 0.172414 | 0.030651 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eaa0f21a63749661c81cfee528775ea3092670da | 897 | py | Python | tests/layers/reshape_test.py | hemantranvir/turret | bc3df21541ce2f808c749c985db47a210149f22c | [
"MIT"
] | 4 | 2019-03-14T18:27:33.000Z | 2021-07-05T05:34:30.000Z | tests/layers/reshape_test.py | hemantranvir/turret | bc3df21541ce2f808c749c985db47a210149f22c | [
"MIT"
] | 1 | 2019-06-07T06:03:04.000Z | 2019-06-07T06:03:04.000Z | tests/layers/reshape_test.py | hemantranvir/turret | bc3df21541ce2f808c749c985db47a210149f22c | [
"MIT"
] | 4 | 2019-10-30T10:30:47.000Z | 2019-10-30T11:15:40.000Z | # -*- coding: utf-8 -*-
import unittest
import numpy as np
import turret
import turret.layers as L
from util import execute_inference
class ReshapeTest(unittest.TestCase):
def test_default(self):
N = 5
C_in, H_in, W_in = 3, 20, 30
C_out, H_out, W_out = 6, 5, 60
input = np.random.rand(N, C_in, H_in, W_in).astype(np.float32)
def build_network(network):
h = network.add_input("input", turret.DataType.FLOAT,
turret.Dimensions.CHW(C_in, H_in, W_in))
h = L.reshape(h, turret.Dimensions.CHW(C_out, H_out, W_out))
network.mark_output("output", h)
actual = execute_inference({"input": input}, build_network)
expect = np.reshape(input, (N, C_out, H_out, W_out))
self.assertEqual(expect.shape, actual.shape)
self.assertTrue(np.allclose(expect, actual))
| 32.035714 | 74 | 0.619844 | 132 | 897 | 4.022727 | 0.416667 | 0.022599 | 0.022599 | 0.033898 | 0.118644 | 0.118644 | 0 | 0 | 0 | 0 | 0 | 0.019578 | 0.259755 | 897 | 27 | 75 | 33.222222 | 0.78012 | 0.023411 | 0 | 0 | 0 | 0 | 0.018307 | 0 | 0 | 0 | 0 | 0 | 0.1 | 1 | 0.1 | false | 0 | 0.25 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
575c5e7e78a19448e4043e13fd5696b8b3592a83 | 392 | py | Python | mini_project_1/logout.py | CMPUT291PROJECT1F18/Mini-Project-1 | b58144dd80c40466de755877b7c3996f4aa67af9 | [
"MIT"
] | 1 | 2018-11-06T01:04:13.000Z | 2018-11-06T01:04:13.000Z | mini_project_1/logout.py | CMPUT291PROJECT1F18/Mini-Project-1 | b58144dd80c40466de755877b7c3996f4aa67af9 | [
"MIT"
] | 39 | 2018-10-23T00:28:13.000Z | 2018-11-06T16:14:56.000Z | mini_project_1/logout.py | CMPUT291PROJECT1F18/Mini-Project-1 | b58144dd80c40466de755877b7c3996f4aa67af9 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Logout functionality"""
from mini_project_1.common import ShellArgumentParser
def get_logout_parser() -> ShellArgumentParser:
"""Argparser for the :class:`.shell.MiniProjectShell` ``logout`` command"""
parser = ShellArgumentParser(
prog="logout",
description="Logout to the mini-project-1 database")
return parser
| 24.5 | 79 | 0.693878 | 42 | 392 | 6.380952 | 0.714286 | 0.08209 | 0.089552 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009202 | 0.168367 | 392 | 15 | 80 | 26.133333 | 0.812883 | 0.329082 | 0 | 0 | 0 | 0 | 0.171315 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.166667 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
575c7323607ca37bb1ad3bcf43f942e74774eaa8 | 4,272 | py | Python | 2019/python/aoc_2019_06.py | robjwells/adventofcode-solutions | 1c3aa376f1c779a69aa515ce70f0537e13f25eab | [
"MIT"
] | null | null | null | 2019/python/aoc_2019_06.py | robjwells/adventofcode-solutions | 1c3aa376f1c779a69aa515ce70f0537e13f25eab | [
"MIT"
] | null | null | null | 2019/python/aoc_2019_06.py | robjwells/adventofcode-solutions | 1c3aa376f1c779a69aa515ce70f0537e13f25eab | [
"MIT"
] | null | null | null | """Day 6: Universal Orbit Map"""
import math
from collections import defaultdict, deque
from copy import deepcopy
from typing import DefaultDict, Iterator, List, NamedTuple, Tuple
import aoc
DAY = 6
OrbitGraph = DefaultDict[str, List[str]]
def parse_input(input_text: str) -> OrbitGraph:
orbits: OrbitGraph = defaultdict(list)
pairs = [line.split(")") for line in input_text.splitlines()]
for orbited, orbited_by in pairs:
orbits[orbited].append(orbited_by)
return orbits
def orbit_depths(orbit_graph: OrbitGraph) -> Iterator[int]:
"""Yields node depths in a breadth-first traversal of the orbit graph."""
queue = deque([(0, "COM")])
while queue:
depth, body = queue.popleft()
yield depth
queue.extend(
[(depth + 1, orbiting_body) for orbiting_body in orbit_graph[body]]
)
def find_shortest_path(
directed_orbit_graph: OrbitGraph, source: str = "YOU", dest: str = "SAN"
) -> Tuple[int, List[str]]:
orbits = directed_to_undirected_graph(directed_orbit_graph)
class BFSEntry(NamedTuple):
depth: int
current_node: str
path_from_source: List[str]
start = orbits[source][0] # Body orbited by source
queue = deque([BFSEntry(0, start, [])])
min_distance = math.inf
shortest_path: List[str] = []
while queue:
depth, current_node, path = queue.popleft()
if depth >= min_distance:
# Cut off search branch if distance is already too long.
continue
# Filter already-visited nodes from next steps to avoid cycles.
unvisited_neighbours = [n for n in orbits[current_node] if n not in path]
current_path = path + [current_node]
if dest in unvisited_neighbours:
# The earlier check ensures the current distance is known to be
# shorter than the previous-shortest, so we can just assign the
# current distance and path without testing again.
min_distance = depth
shortest_path = current_path
else:
queue.extend(
[
BFSEntry(depth + 1, neighbour, current_path)
for neighbour in unvisited_neighbours
]
)
if min_distance is math.inf:
raise ValueError(f"Node {dest} not present in orbit graph.")
# Turn off mypy checking for the return value because the use of math.inf
# (which is a float) earlier causes it to complain about the return type
# really being a Union[float, int], where for any valid input graph the
# dest node will be found and min_distance will be an int.
return min_distance, shortest_path # type: ignore
def directed_to_undirected_graph(directed: OrbitGraph) -> OrbitGraph:
"""Create undirected graph from the given directed graph."""
# Make an undirected graph so that we can traverse orbits in
# either direction. The original orbit graph is strictly
# orbited_body -> orbiting body, ie a directed acyclic graph.
undirected = deepcopy(directed)
for orbited_body, orbiting_bodies in directed.items():
for orbiting in orbiting_bodies:
undirected[orbiting].append(orbited_body)
return undirected
def main(orbit_graph: OrbitGraph) -> Tuple[int, int]:
part_one_solution = sum(orbit_depths(orbit_graph))
part_two_solution, shortest_path = find_shortest_path(orbit_graph)
return (part_one_solution, part_two_solution)
def test_orbit_depths() -> None:
orbits = """\
COM)B
B)C
C)D
D)E
E)F
B)G
G)H
D)I
E)J
J)K
K)L"""
orbit_graph = parse_input(orbits)
assert sum(orbit_depths(orbit_graph)) == 42
def test_find_shortest_path() -> None:
orbits = """\
COM)B
B)C
C)D
D)E
E)F
B)G
G)H
D)I
E)J
J)K
K)L
K)YOU
I)SAN"""
orbit_graph = parse_input(orbits)
distance, path = find_shortest_path(orbit_graph)
assert distance == 4
assert path == ["K", "J", "E", "D", "I"]
if __name__ == "__main__":
parsed = parse_input(aoc.load_puzzle_input(2019, DAY))
part_one_solution, part_two_solution = main(parsed)
print(
aoc.format_solution(
title=__doc__,
part_one=part_one_solution,
part_two=part_two_solution,
)
)
| 28.864865 | 81 | 0.659878 | 584 | 4,272 | 4.650685 | 0.316781 | 0.051546 | 0.023564 | 0.023196 | 0.138439 | 0.069219 | 0.025037 | 0.025037 | 0.025037 | 0.025037 | 0 | 0.00437 | 0.25 | 4,272 | 147 | 82 | 29.061224 | 0.843321 | 0.215356 | 0 | 0.264151 | 0 | 0 | 0.050557 | 0 | 0 | 0 | 0 | 0 | 0.028302 | 1 | 0.066038 | false | 0 | 0.04717 | 0 | 0.188679 | 0.009434 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
575e0d9fcb880defcf1be989891030a8d0c75149 | 9,691 | py | Python | ijr/generic_lib.py | IJsvogel/ijr | a1785888c5b6fafd23c1f83f14af7d652f21372c | [
"MIT"
] | 1 | 2020-05-29T07:35:15.000Z | 2020-05-29T07:35:15.000Z | ijr/generic_lib.py | IJsvogel/ijr | a1785888c5b6fafd23c1f83f14af7d652f21372c | [
"MIT"
] | 2 | 2020-08-06T08:57:05.000Z | 2020-09-04T07:48:59.000Z | ijr/generic_lib.py | IJsvogel/ijr | a1785888c5b6fafd23c1f83f14af7d652f21372c | [
"MIT"
] | null | null | null | import datetime
import os
from itertools import chain, starmap
def dict_compare(old_dict, new_dict, nested=None):
""" Compare two dictionaries
Only 1 level, ignoring attributes starting with '_'
"""
key_prefix = nested + '|' if nested else ''
intersect_keys = old_dict.keys() & new_dict.keys()
modified = {key_prefix + k: dict(old=old_dict[k], new=new_dict[k], action='mod') for k in intersect_keys
if k[0] != '_'
and old_dict[k] != new_dict[k]
and not (isinstance(old_dict[k], dict) and isinstance(new_dict[k], dict))}
nested_keys = [k for k in intersect_keys if k[0] != '_' and isinstance(old_dict[k], dict) and isinstance(new_dict[k], dict)]
for k in nested_keys:
x = dict_compare(old_dict[k], new_dict[k], key_prefix + k)
if x:
modified.update(x)
added = new_dict.keys() - old_dict.keys()
modified.update({key_prefix + k: dict(new=new_dict[k], action='add') for k in added if k[0] != '_'})
deleted = old_dict.keys() - new_dict.keys()
modified.update({key_prefix + k: dict(old=old_dict[k], action='del') for k in deleted if k[0] != '_'})
if modified:
return modified
def running_in_gcf():
""" Determine if code is running in GCF using GCP_PROJECT
"""
return os.getenv('GCP_PROJECT') is not None
def default_object(o):
""" Default handler for json.dumps()"""
if isinstance(o, (datetime.date, datetime.datetime)):
return o.isoformat()
raise TypeError("Type %s not serializable" % type(o))
# def buid_dict(composed_key, key_value):
# def recursive_buid_dict(keys, value, nest_level=0):
# if nest_level < len(keys):
# return recursive_buid_dict(keys, value={keys[nest_level]: value}, nest_level=nest_level + 1)
# elif nest_level == len(keys):
# return value
#
# return recursive_buid_dict(keys=list(reversed(composed_key.split('.'))), value=key_value)
#
# from collections import ChainMap
#
# def merge_dicts(dict_a, dict_b):
# new_dict = dict()
# for key in ChainMap(dict_a, dict_b):
# val_a , val_b = dict_a.get(key), dict_b.get(key)
# if isinstance(val_a, dict) and isinstance(val_b, dict):
# new_dict[key] = merge_dicts(val_a, val_b)
# elif val_a is None and val_b is None:
# continue
# elif val_a is None:
# new_dict[key] = val_b
# elif val_b is None or val_a == val_b:
# new_dict[key] = val_a
# else:
# new_dict[key] = [val_a, val_b]
# return new_dict
class Compare(object):
def __init__(self, old_dictionary=None, new_dictionary=None, ignore_starting=None, ignore_ending=None):
self.old_dict = old_dictionary
self.new_dict = new_dictionary
self.ignore_starting = ignore_starting
self.ignore_ending = ignore_ending
self.result = self.deepcompare(old_dict=old_dictionary,
new_dict=new_dictionary,
ignore_starting=self.ignore_starting,
ignore_ending=self.ignore_starting)
self.flatresult = self.flat()
@staticmethod
def intersect_lists(left_list, right_list):
"""Intersect two list and return difference as tuple (left, middle, right)
USAGE:
l, _, _ = intersect_list(left_list, right_list)
return list of values in left list not matched with right list
_, m, _ = intersect_list(left_list, right_list)
return list of values in matched in left list and right list
_, _, r = intersect_list(left_list, right_list)
return list of values in right list not matched with left list
l, m, r = intersect_list(left_list, right_list)
return all three above mentioned results
EXAMPLE:
l, m, r = intersect_list([1, 2, 3], [2, 3, 4])
l = [1]
m = [2, 3]
r = [4]
"""
l = [value for value in left_list if value not in right_list]
m = [value for value in left_list if value in right_list]
r = [value for value in right_list if value not in left_list]
return l, m, r
@staticmethod
def unnest(dictionary, separator='.'):
"""Flatten a nested dictionary structure
EXAMPLE:
test_unnest = {'a': {'b': {'c': {'d': ['e', 'f'], 'g': 5}}}}
unnest(test_unnest)
returns {'a.b.c.d': ['e', 'f'], 'a.b.c.g': 5}
"""
def unpack(parent_key, parent_value):
"""Unpack one level of nesting in a dictionary"""
try:
for key, value in parent_value.items():
yield (f'{parent_key}{separator}{key}', value)
except AttributeError:
# parent_value was not a dict, no need to flatten
yield (f'{parent_key}', parent_value)
if not isinstance(dictionary, dict):
return dictionary
while any(isinstance(value, dict) for value in dictionary.values()): # TODO secure breaking
# Keep unpacking the dictionary until all value's are not dictionary's
try:
dictionary = dict(chain.from_iterable(starmap(unpack, dictionary.items())))
except AttributeError:
break
return dictionary
@staticmethod
def exclude_keys(set_of_keys, starting=None, ending=None):
"""Remove keys from set of keys starting and/or ending with string
:param set_of_keys: a set of string keys to be iterated for ignoring keys
:param starting: keys starting with string to be removed from a set of keys (default None)
:param ending: keys ending with string to be removed from a set of keys (default None)
"""
try:
clean_keys = set()
for key in set_of_keys:
if starting and ending and key.startswith(starting) and key.endswith(ending):
continue
elif starting and key.startswith(starting):
continue
elif ending and key.endswith(ending):
continue
else:
clean_keys.add(key)
return clean_keys
except TypeError as te:
raise te
def listtodict(self, lst):
"""Convert list of dictionaries to dictionary of dictionaries using index of list
converts list of objects to dictionary of objects
:param lst: list or dictionary to be converted to dictionary of objects
"""
#NOTE: if object is not a dictionary it will be converted using default key as parent and index as child key
if not isinstance(lst, (list, dict)):
return lst
try:
result = {key: self.listtodict(value) for key, value in lst.items()}
except AttributeError:
result = {f'{index}': self.listtodict(value) for index, value in enumerate(lst)}
return result
def deepcompare(self, old_dict, new_dict, ignore_starting=None, ignore_ending=None):
"""Compare nested dictionaries, including lists recursively
:param old_dict: the dictionary to be compared with
:param new_dict: the dictionary to compare
:param ignore_starting: keys to be ignored starting with string (default None)
:param ignore_ending: keys to be ignored ending with string (default None)
:param unnested_result: boolean if return unnest(normalized) result
:param separator: if unnested_result separator used to normalize keys
"""
if old_dict == new_dict:
return
result = {}
try:
old = self.exclude_keys(old_dict.keys(), ignore_starting, ignore_ending)
new = self.exclude_keys(new_dict.keys(), ignore_starting, ignore_ending)
overlapping_keys = old & new
old_keys = old - new
new_keys = new - old
if old_keys:
for key in old_keys:
result.update({key: {'action': 'del', 'old': old_dict[key]}})
if new_keys:
for key in new_keys:
result.update({key: {'action': 'add', 'new': new_dict[key]}})
for key in overlapping_keys:
if isinstance(old_dict[key], list) and isinstance(new_dict[key], list):
old_d = self.listtodict(old_dict[key])
new_d = self.listtodict(new_dict[key])
value = self.deepcompare(old_d, new_d)
if value is None:
continue
else:
value = self.deepcompare(old_dict[key], new_dict[key])
if value is None:
continue
if all(f'{index}' == key for index, key in enumerate(sorted(value.keys()))):
value = [v for _, v in value.items()]
result.update({key: value})
except AttributeError:
if old_dict != new_dict:
result = {'action': 'mod', 'old': old_dict, 'new': new_dict}
finally:
if result:
return result
def flat(self, dictionary=None, separator='.', unpack_lists=False):
if unpack_lists == True:
return self.unnest(self.listtodict(dictionary or self.result), separator=separator)
else:
return self.unnest(dictionary or self.result, separator=separator) | 42.504386 | 128 | 0.58673 | 1,243 | 9,691 | 4.40708 | 0.159292 | 0.035779 | 0.014604 | 0.015517 | 0.252282 | 0.163928 | 0.116466 | 0.108434 | 0.070829 | 0.061336 | 0 | 0.002875 | 0.318027 | 9,691 | 228 | 129 | 42.504386 | 0.825995 | 0.335053 | 0 | 0.208 | 0 | 0 | 0.023821 | 0.004632 | 0 | 0 | 0 | 0.004386 | 0 | 1 | 0.088 | false | 0 | 0.024 | 0 | 0.224 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5762eb361d3adfdf4d01dc32fadd4d65e08d49dc | 280 | py | Python | Physics250-ME30/timeConstant.py | illusion173/Physics250 | 69f2ffdb8af013e8b0739779861c1455b579ddaf | [
"MIT"
] | null | null | null | Physics250-ME30/timeConstant.py | illusion173/Physics250 | 69f2ffdb8af013e8b0739779861c1455b579ddaf | [
"MIT"
] | null | null | null | Physics250-ME30/timeConstant.py | illusion173/Physics250 | 69f2ffdb8af013e8b0739779861c1455b579ddaf | [
"MIT"
] | null | null | null | import numpy as np
import math
extraNumber = 4 * math.pi * pow(10,-7)
def timeConstant():
henry = float(input('Input Henry: '))
ohms = float(input('Output Ohms: '))
ohms = ohms / 1000
timeConstant = henry/ohms
print(timeConstant)
timeConstant() | 20 | 41 | 0.625 | 34 | 280 | 5.147059 | 0.588235 | 0.194286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.038095 | 0.25 | 280 | 14 | 42 | 20 | 0.795238 | 0 | 0 | 0 | 0 | 0 | 0.092527 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.2 | 0 | 0.3 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
57632319a70a6513f6913389740a43989ef17840 | 2,520 | py | Python | sleekxmpp/plugins/xep_0191/blocking.py | E-Tahta/sleekxmpp | ed067c9412835c5fe44bf203936262bcec09ced4 | [
"BSD-3-Clause"
] | 499 | 2015-01-04T21:45:16.000Z | 2022-02-14T13:04:08.000Z | sleekxmpp/plugins/xep_0191/blocking.py | E-Tahta/sleekxmpp | ed067c9412835c5fe44bf203936262bcec09ced4 | [
"BSD-3-Clause"
] | 159 | 2015-01-02T19:09:47.000Z | 2020-02-12T08:29:54.000Z | sleekxmpp/plugins/xep_0191/blocking.py | E-Tahta/sleekxmpp | ed067c9412835c5fe44bf203936262bcec09ced4 | [
"BSD-3-Clause"
] | 209 | 2015-01-07T16:23:16.000Z | 2022-01-26T13:02:20.000Z | """
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2012 Nathanael C. Fritz, Lance J.T. Stout
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
import logging
from sleekxmpp import Iq
from sleekxmpp.plugins import BasePlugin
from sleekxmpp.xmlstream.handler import Callback
from sleekxmpp.xmlstream.matcher import StanzaPath
from sleekxmpp.xmlstream import register_stanza_plugin, JID
from sleekxmpp.plugins.xep_0191 import stanza, Block, Unblock, BlockList
log = logging.getLogger(__name__)
class XEP_0191(BasePlugin):
name = 'xep_0191'
description = 'XEP-0191: Blocking Command'
dependencies = set(['xep_0030'])
stanza = stanza
def plugin_init(self):
register_stanza_plugin(Iq, BlockList)
register_stanza_plugin(Iq, Block)
register_stanza_plugin(Iq, Unblock)
self.xmpp.register_handler(
Callback('Blocked Contact',
StanzaPath('iq@type=set/block'),
self._handle_blocked))
self.xmpp.register_handler(
Callback('Unblocked Contact',
StanzaPath('iq@type=set/unblock'),
self._handle_unblocked))
def plugin_end(self):
self.xmpp.remove_handler('Blocked Contact')
self.xmpp.remove_handler('Unblocked Contact')
def get_blocked(self, ifrom=None, block=True, timeout=None, callback=None):
iq = self.xmpp.Iq()
iq['type'] = 'get'
iq['from'] = ifrom
iq.enable('blocklist')
return iq.send(block=block, timeout=timeout, callback=callback)
def block(self, jids, ifrom=None, block=True, timeout=None, callback=None):
iq = self.xmpp.Iq()
iq['type'] = 'set'
iq['from'] = ifrom
if not isinstance(jids, (set, list)):
jids = [jids]
iq['block']['items'] = jids
return iq.send(block=block, timeout=timeout, callback=callback)
def unblock(self, jids=None, ifrom=None, block=True, timeout=None, callback=None):
iq = self.xmpp.Iq()
iq['type'] = 'set'
iq['from'] = ifrom
if jids is None:
jids = []
if not isinstance(jids, (set, list)):
jids = [jids]
iq['unblock']['items'] = jids
return iq.send(block=block, timeout=timeout, callback=callback)
def _handle_blocked(self, iq):
self.xmpp.event('blocked', iq)
def _handle_unblocked(self, iq):
self.xmpp.event('unblocked', iq)
| 30 | 86 | 0.627381 | 302 | 2,520 | 5.13245 | 0.261589 | 0.046452 | 0.032258 | 0.042581 | 0.396129 | 0.298065 | 0.298065 | 0.298065 | 0.298065 | 0.252903 | 0 | 0.012807 | 0.256349 | 2,520 | 83 | 87 | 30.361446 | 0.814301 | 0.06627 | 0 | 0.303571 | 0 | 0 | 0.09532 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.392857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
57639652e9a174a922890189fb6a50d9bc18bd04 | 7,985 | py | Python | src/mxnet_container/train.py | yangaws/sagemaker-mxnet-containers | 0c7bc3422986e673acf6e0a01a750e005b2d0d74 | [
"Apache-2.0"
] | null | null | null | src/mxnet_container/train.py | yangaws/sagemaker-mxnet-containers | 0c7bc3422986e673acf6e0a01a750e005b2d0d74 | [
"Apache-2.0"
] | null | null | null | src/mxnet_container/train.py | yangaws/sagemaker-mxnet-containers | 0c7bc3422986e673acf6e0a01a750e005b2d0d74 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import container_support as cs
import inspect
import json
import logging
import os
import socket
import subprocess
logger = logging.getLogger(__name__)
DEFAULT_MODEL_NAME = "model"
DEFAULT_MODEL_FILENAMES = {
'symbol': 'model-symbol.json',
'params': 'model-0000.params',
'shapes': 'model-shapes.json',
}
class MXNetTrainingEnvironment(cs.TrainingEnvironment):
""" Configuration for single machine and distributed mxnet training.
"""
def __init__(self, base_dir):
super(MXNetTrainingEnvironment, self).__init__(base_dir)
self._ps_verbose = int(self.hyperparameters.get('_ps_verbose', 0))
self._ps_port = int(self.hyperparameters.get('_ps_port', 8000))
self._scheduler_host = sorted(self.hosts)[0]
self._scheduler_ip = host_lookup(self._scheduler_host)
# Block until all host lookups succeed. Relies on retrying host_lookup.
for host in self.hosts:
host_lookup(host)
@property
def distributed(self):
""" Returns True if this configuration defines a distributed learning task."""
return len(self.hosts) > 1
@property
def current_host_scheduler(self):
""" Returns True if this machine should be the mxnet parameter server scheduler."""
return self._scheduler_host == self.current_host
def env_vars_for_role(self, role):
""" Returns environment variables for a python process to run as an
mxnet parameter server process with the specified role.
Args:
role (str): One of "worker", "server", or "scheduler"
"""
if role not in ["worker", "scheduler", "server"]:
raise ValueError("Unexpected role {}".format(role))
return {
'DMLC_NUM_WORKER': str(len(self.hosts)),
'DMLC_NUM_SERVER': str(len(self.hosts)),
'DMLC_ROLE': role,
'DMLC_PS_ROOT_URI': str(self._scheduler_ip),
'DMLC_PS_ROOT_PORT': str(self._ps_port),
'PS_VERBOSE': str(self._ps_verbose)
}
@property
def kwargs_for_training(self):
""" Returns a dictionary of key-word arguments for input to the user supplied
module train function. """
return {
'hyperparameters': dict(self.hyperparameters),
'input_data_config': dict(self.channels),
'channel_input_dirs': dict(self.channel_dirs),
'output_data_dir': self.output_data_dir,
'model_dir': self.model_dir,
'num_gpus': self.available_gpus,
'num_cpus': self.available_cpus,
'hosts': list(self.hosts),
'current_host': self.current_host
}
def default_save(self, mod):
""" Saves the specified mxnet module to ``self.model_dir``.
This generates three files in ``self.model_dir``:
- model-symbol.json - The serialized module symbolic graph. Formed by
invoking ```module.symbol.save```
- model-0000.params - The serialized module parameters. Formed by
invoking ```module.save_params```
- model-shapes.json - The serialized module input data shapes. A json list
of json data-shape objects. Each data-shape object
contains a string name and a list of integer dimensions.
Args:
mod : (mxnet.mod.Module) The module to save."""
if not self.distributed or self.current_host_scheduler:
mod.symbol.save(os.path.join(self.model_dir, DEFAULT_MODEL_FILENAMES['symbol']))
mod.save_params(os.path.join(self.model_dir, DEFAULT_MODEL_FILENAMES['params']))
signature = self._build_data_shape_signature(mod)
with open(os.path.join(self.model_dir, DEFAULT_MODEL_FILENAMES['shapes']), 'w') as f:
json.dump(signature, f)
@classmethod
def _build_data_shape_signature(cls, mod):
""" Returns a list of data shape description dicts. Each element in the
returned list is a dict with a 'name' key, mapping to a string name
and a 'shape' key, mapping to a list of ints.
"""
return [{"name": data_desc.name, "shape": [dim for dim in data_desc.shape]}
for data_desc in mod.data_shapes]
@cs.retry(stop_max_delay=1000 * 60 * 15,
wait_exponential_multiplier=100,
wait_exponential_max=30000)
def host_lookup(host):
""" Retrying host lookup on host """
return socket.gethostbyname(host)
def _run_mxnet_process(role, mxnet_env):
""" Runs an mxnet process for the specified role with the specified
environment.
Args:
role (str): The mxnet process role.
mxnet_env (MXNetEnvironment): The mxnet environment used to provide
environment variables for the launched process.
Returns:
(int) The launched process id """
role_env = os.environ.copy()
role_env.update(mxnet_env.env_vars_for_role(role))
return subprocess.Popen("python -c 'import mxnet'", shell=True, env=role_env).pid
def train(base_dir=MXNetTrainingEnvironment.BASE_DIRECTORY):
""" Runs mxnet training on a user supplied module in either a local or distributed
SageMaker environment.
The user supplied module and its dependencies are downloaded from S3, and the module
imported using a ``MXNetTrainingEnvironment`` instance.
Training is invoked by calling a "train" function in the user supplied module.
if the environment contains multiple hosts, then a distributed learning
task is started. This function will, in addition to running the user supplied script
as an mxnet parameter server worker process, launch an additional mxnet server
process. If the host this process is executing on is designated as the scheduler, then
this funciton will launch an mxnet scheduler parameter server process.
Args:
base_dir (str): The SageMaker container environment base directory.
"""
mxnet_env = MXNetTrainingEnvironment(base_dir)
logger.info("MXNetTrainingEnvironment: {}".format(repr(mxnet_env.__dict__)))
if mxnet_env.user_script_archive.lower().startswith('s3://'):
mxnet_env.download_user_module()
logger.info("Starting distributed training task")
if mxnet_env.current_host_scheduler:
_run_mxnet_process("scheduler", mxnet_env)
_run_mxnet_process("server", mxnet_env)
os.environ.update(mxnet_env.env_vars_for_role("worker"))
user_module = mxnet_env.import_user_module()
train_args = inspect.getargspec(user_module.train)
# avoid forcing our callers to specify **kwargs in their function
# signature. If they have **kwargs we still pass all the args, but otherwise
# we will just pass what they ask for.
if train_args.keywords is None:
kwargs_to_pass = {}
for arg in train_args.args:
if arg != "self" and arg in mxnet_env.kwargs_for_training:
kwargs_to_pass[arg] = mxnet_env.kwargs_for_training[arg]
else:
kwargs_to_pass = mxnet_env.kwargs_for_training
model = user_module.train(**kwargs_to_pass)
if model:
if hasattr(user_module, 'save'):
user_module.save(model, mxnet_env.model_dir)
else:
mxnet_env.default_save(model)
mxnet_env.write_success_file()
| 40.125628 | 97 | 0.672386 | 1,039 | 7,985 | 4.980751 | 0.282002 | 0.027826 | 0.013913 | 0.008116 | 0.108986 | 0.035749 | 0.035749 | 0.024928 | 0.024928 | 0 | 0 | 0.006753 | 0.239699 | 7,985 | 198 | 98 | 40.328283 | 0.84566 | 0.405009 | 0 | 0.070707 | 0 | 0 | 0.106301 | 0.005666 | 0 | 0 | 0 | 0 | 0 | 1 | 0.10101 | false | 0.040404 | 0.090909 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5763a1c795b3daa155e3f3d837a14612e86d83f5 | 880 | py | Python | Backjoon/14888/solution.py | seunghwanly/CODING-TEST | a820da950c163d399594770199aa2e782d1fbbde | [
"MIT"
] | null | null | null | Backjoon/14888/solution.py | seunghwanly/CODING-TEST | a820da950c163d399594770199aa2e782d1fbbde | [
"MIT"
] | null | null | null | Backjoon/14888/solution.py | seunghwanly/CODING-TEST | a820da950c163d399594770199aa2e782d1fbbde | [
"MIT"
] | null | null | null | import sys
# input
N = int(sys.stdin.readline().rstrip())
numbers = list(map(int, sys.stdin.readline().split()))
operators = list(map(int, sys.stdin.readline().split()))
MIN = 1000000000
MAX = -1000000000
# 완전탐색
def search(picked, result):
global MIN, MAX, numbers, operators
if picked == N:
if MIN > result:
MIN = result
if MAX < result:
MAX = result
else:
for i in range(4):
if operators[i] > 0:
operators[i] -= 1
if i == 0: search(picked + 1, result + numbers[picked])
elif i == 1: search(picked + 1, result - numbers[picked])
elif i == 2: search(picked + 1, result * numbers[picked])
else: search(picked + 1, int(result / numbers[picked]))
operators[i] += 1
search(1, numbers[0])
print(MAX)
print(MIN)
| 25.882353 | 73 | 0.544318 | 111 | 880 | 4.315315 | 0.306306 | 0.125261 | 0.10856 | 0.118998 | 0.350731 | 0.350731 | 0.283925 | 0.154489 | 0 | 0 | 0 | 0.054908 | 0.317045 | 880 | 33 | 74 | 26.666667 | 0.742097 | 0.011364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.04 | 0 | 0.08 | 0.08 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5764c36bc762264c3504a3fc9c9f6ee97bc76342 | 2,066 | py | Python | python-indexer/src/parse_page.py | tkuriyama/elm-package-search | e68bf9cf66686630df1e1b502684a354028d6563 | [
"BSD-3-Clause"
] | null | null | null | python-indexer/src/parse_page.py | tkuriyama/elm-package-search | e68bf9cf66686630df1e1b502684a354028d6563 | [
"BSD-3-Clause"
] | null | null | null | python-indexer/src/parse_page.py | tkuriyama/elm-package-search | e68bf9cf66686630df1e1b502684a354028d6563 | [
"BSD-3-Clause"
] | null | null | null | """Parse HTML pages.
"""
from bs4 import BeautifulSoup # type: ignore
import nltk_utils # type: ignore
import parser_types as PT # type: ignore
import re # type: ignore
from typing import List, Tuple # type: ignore
################################################################################
# Parse Pages other than About
def parse_page(html: str) -> List[PT.Word]:
"""Parse page, return tokenized and stemmed plaintext words."""
words = extract_text(html) + ' ' + ' '.join(extract_code_names(html))
tokens = nltk_utils.tokenize_and_filter(words)
return nltk_utils.stem(tokens)
def extract_text(html: str) -> str:
"""Extract plaintext from HTML."""
soup = BeautifulSoup(html, 'html.parser')
plaintexts = [p.text for p in soup.find_all(['h1', 'h2', 'h3', 'p'])]
return ' '.join(plaintexts)
def extract_code_names(html: str) -> List[PT.Word]:
"""Extract type and function names from HTML."""
soup = BeautifulSoup(html, 'html.parser')
headers = soup.find_all('div', class_='docs-header')
names = []
for header in headers:
text = header.text
g = re.findall(r'^([A-Za-z0-9]+)\s+:.*', text)
if g:
names.append(g[0])
else:
g = re.findall(r'^type alias ([A-Za-z0-9]+)\s.*', text)
if g:
names.append(g[0])
else:
g = re.findall(r'^type ([A-Za-z0-9]+)\s.*', text)
if g:
names.append(g[0])
return names
################################################################################
# Parse About
def parse_about(html: str) -> Tuple[List[PT.Word], List[str]]:
"""Parse about page, return plaintext and list of dependencies."""
soup = BeautifulSoup(html, 'html.parser')
tokens = nltk_utils.tokenize_and_filter(soup.find('p').text)
tokens_ = nltk_utils.stem(tokens)
dependencies = [cell.text for cell in soup.find_all('td')
if not re.findall(r'[0-9]\.[0-9]\.[0-9].*', cell.text)]
return tokens_, dependencies
| 31.30303 | 80 | 0.555179 | 265 | 2,066 | 4.237736 | 0.283019 | 0.044524 | 0.035619 | 0.066785 | 0.290294 | 0.232413 | 0.175423 | 0.105966 | 0.105966 | 0.105966 | 0 | 0.011845 | 0.223621 | 2,066 | 65 | 81 | 31.784615 | 0.688279 | 0.152469 | 0 | 0.289474 | 0 | 0 | 0.100128 | 0.026958 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.131579 | 0 | 0.342105 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5765b4e93bc48ad433757057a7a0ac799a0e3ec5 | 5,187 | py | Python | KG/DuEE_baseline/bin/predict_eval_process.py | pkulzb/Research | 88da4910a356f1e95e1e1e05316500055533683d | [
"Apache-2.0"
] | 1,319 | 2020-02-14T10:42:07.000Z | 2022-03-31T15:42:18.000Z | KG/DuEE_baseline/bin/predict_eval_process.py | pkulzb/Research | 88da4910a356f1e95e1e1e05316500055533683d | [
"Apache-2.0"
] | 192 | 2020-02-14T02:53:34.000Z | 2022-03-31T02:25:48.000Z | KG/DuEE_baseline/bin/predict_eval_process.py | pkulzb/Research | 88da4910a356f1e95e1e1e05316500055533683d | [
"Apache-2.0"
] | 720 | 2020-02-14T02:12:38.000Z | 2022-03-31T12:21:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
处理得到预测数据
"""
import os
import json
import sys
from utils import utils
def test_data_2_eval():
"""test_2_eval_data"""
test_file_path = sys.argv[2]
save_path = sys.argv[3]
if not test_file_path or not save_path:
raise Exception("must set test_data_path and save_path")
datas = utils.read_by_lines(test_file_path)
all_events = {}
for data in datas:
d_json = json.loads(data)
text = d_json["text"]
_id = utils.cal_md5(text.encode("utf-8"))
event = {
"trigger": d_json["trigger"],
"trigger_start_index": d_json["trigger_start_index"],
"event_type": d_json["event_type"],
"class": d_json["class"],
"arguments": d_json["arguments"],
}
if _id not in all_events:
all_events[_id] = {u"id": _id, u"text": text, u"event_list": []}
all_events[_id][u"event_list"].append(event)
outputs = [json.dumps(x, ensure_ascii=False) for x in all_events.values()]
utils.write_by_lines(save_path, outputs)
print(u"test data 2 eval data, inputs {} outputs {}".format(
len(datas), len(outputs)))
def predict_data_2_eval():
"""pred_process_with_golden_type"""
pred_trigger_path = sys.argv[2]
pred_role_path = sys.argv[3]
schema_path = sys.argv[4]
save_path = sys.argv[5]
if not pred_trigger_path or not pred_role_path or not schema_path or not save_path:
raise Exception(
"must set pred_trigger_path and pred_role_path and schema_path and save_path"
)
print(u"predict data 2 eval data start")
trigger_data_list = utils.read_by_lines(pred_trigger_path)
trigger_datas = {}
for d in trigger_data_list:
d_json = json.loads(d)
trigger_datas[d_json["event_id"]] = d_json
print(u"load trigger predict datas {} from {}".format(
len(trigger_datas), pred_trigger_path))
role_data_list = utils.read_by_lines(pred_role_path)
role_datas = {}
for d in role_data_list:
d_json = json.loads(d)
role_datas[d_json["event_id"]] = d_json
print(u"load role predict datas {} from {}".format(
len(role_datas), pred_role_path))
schema_data_list = utils.read_by_lines(schema_path)
schema_datas = {}
for d in schema_data_list:
d_json = json.loads(d)
schema_datas[d_json["event_type"]] = [
r["role"] for r in d_json["role_list"]
]
print(u"load schema datas {} from {}".format(
len(schema_data_list), schema_path))
all_events = {}
for t_json in trigger_datas.values():
text = t_json["sentence"]
_id = utils.cal_md5(text.encode("utf-8"))
exist_event_type = set()
for tri_info in t_json["trigger_ret"]:
event_type = tri_info["event_type"]
if event_type in exist_event_type:
continue
trigger = tri_info["text"]
role_type_set = set(schema_datas[event_type])
r_json = role_datas[t_json["event_id"]]
arguments = []
for p_r in r_json["roles_ret"]:
role_type = p_r["role_type"]
if role_type in role_type_set:
arguments.append({
u"role": role_type,
u"argument": p_r["text"]
})
if len(arguments) > 0:
event = {
u"trigger": trigger,
u"event_type": event_type,
u"arguments": arguments
}
if _id not in all_events:
all_events[_id] = {
u"id": _id,
u"text": text,
u"event_list": []
}
all_events[_id][u"event_list"].append(event)
exist_event_type.add(event_type)
outputs = [json.dumps(x, ensure_ascii=False) for x in all_events.values()]
utils.write_by_lines(save_path, outputs)
print(u"predict data 2 eval data is finished, outputs {}".format(
len(outputs)))
def main():
"""main"""
func_mapping = {
"predict_data_2_eval": predict_data_2_eval,
"test_data_2_eval": test_data_2_eval
}
func_name = sys.argv[1]
if func_name not in func_name:
raise Exception("no function {}, please choice {}".format(
func_name, u"|".join(func_mapping.keys())))
func_mapping[func_name]()
if __name__ == '__main__':
main()
| 34.812081 | 89 | 0.604396 | 720 | 5,187 | 4.063889 | 0.222222 | 0.027341 | 0.027683 | 0.027341 | 0.294942 | 0.272044 | 0.263841 | 0.193438 | 0.174983 | 0.149009 | 0 | 0.008363 | 0.285329 | 5,187 | 148 | 90 | 35.047297 | 0.780955 | 0.132832 | 0 | 0.151786 | 0 | 0 | 0.160054 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026786 | false | 0 | 0.035714 | 0 | 0.0625 | 0.053571 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5765c5451f6e97beb759b5e735880aeec2b21afd | 858 | py | Python | 150-Challenges/Challenges 20 - 26/Challenge 23.py | DGrifferty/Python | d725301664db2cbcfd5c4f5974745b4d81c8e28a | [
"Apache-2.0"
] | null | null | null | 150-Challenges/Challenges 20 - 26/Challenge 23.py | DGrifferty/Python | d725301664db2cbcfd5c4f5974745b4d81c8e28a | [
"Apache-2.0"
] | null | null | null | 150-Challenges/Challenges 20 - 26/Challenge 23.py | DGrifferty/Python | d725301664db2cbcfd5c4f5974745b4d81c8e28a | [
"Apache-2.0"
] | null | null | null | # 023
# Ask the user to type in the first line of a nursery rhyme and display
# the length of the string. Ask for a starting number and an
# ending number and then display just that section of the text
# (remember Python starts counting from 0 and not 1).
rhyme = list()
while True:
try:
if not rhyme:
rhyme = input('Please enter the first line of a nursery '
'rhyme: ')
print(f'There are {len(rhyme)} characters in that line')
from_to = input('Please type in the starting character'
'you want to the final character: ')
from_to = from_to.split(' ')
for index, value in enumerate(from_to):
from_to[index] = int(value)
print(rhyme[from_to[0] - 1:from_to[1] + 1])
break
except Exception as e:
print(e)
| 29.586207 | 71 | 0.596737 | 126 | 858 | 4.007937 | 0.5 | 0.083168 | 0.035644 | 0.055446 | 0.106931 | 0.106931 | 0.106931 | 0 | 0 | 0 | 0 | 0.015517 | 0.324009 | 858 | 28 | 72 | 30.642857 | 0.855172 | 0.286713 | 0 | 0 | 0 | 0 | 0.272277 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.1875 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
576655e160e5c0cac35d0a56a097cadf140806c6 | 10,015 | py | Python | organisation/tasks.py | mohdbakhrayba/it-assets | ea03882ffd70e40c82f5684dc4980ff46520843b | [
"Apache-2.0"
] | null | null | null | organisation/tasks.py | mohdbakhrayba/it-assets | ea03882ffd70e40c82f5684dc4980ff46520843b | [
"Apache-2.0"
] | null | null | null | organisation/tasks.py | mohdbakhrayba/it-assets | ea03882ffd70e40c82f5684dc4980ff46520843b | [
"Apache-2.0"
] | null | null | null | from datetime import date, datetime, timedelta
from django.conf import settings
import logging
from collections import OrderedDict
import psycopg2
import pytz
import titlecase
TZ = pytz.timezone(settings.TIME_ZONE)
LOGGER = logging.getLogger('sync_tasks')
ALESCO_DB_FIELDS = (
'employee_id', 'surname', 'initials', 'first_name', 'second_name', 'gender',
'date_of_birth', 'occup_type', 'current_commence', 'job_term_date',
'occup_commence_date', 'occup_term_date', 'position_id', 'occup_pos_title',
'clevel1_id', 'clevel1_desc', 'clevel5_id', 'clevel5_desc', 'award',
'classification', 'step_id', 'emp_status', 'emp_stat_desc',
'location', 'location_desc', 'paypoint', 'paypoint_desc', 'manager_emp_no',
)
ALESCO_DATE_MAX = date(2049, 12, 31)
def alesco_scrub_title(title):
# remove extra spaces
title_raw = ' '.join(title.upper().split())
# ranger job titles have too much junk attached, jettison!
if title_raw.startswith('RANGER'):
return 'Ranger'
if title_raw.startswith('SENIOR RANGER'):
return 'Senior Ranger'
def replace(word, **kwargs):
result = None
prefix = ''
suffix = ''
if word.startswith('('):
prefix = '('
word = word[1:]
if word.endswith(')'):
suffix = ')'
word = word[:-1]
if word.upper() in ('RIA', 'DBCA', 'BGPA', 'ZPA', 'PVS', 'IT', 'ICT', 'AV', 'HR', 'GIS', 'FMDP', 'SFM', 'OIM', 'HAAMC', 'TEC', 'MATES', 'AWU', 'FOI', 'KABC', 'VOG', 'WSS', 'EDRMS', 'LUP', 'WA', 'KSCS', 'OT'):
result = word.upper()
else:
expand = {
'&': 'and',
'BG': 'Botanic Garden',
'DEPT': 'Department',
'CON': 'Conservation',
'CONS': 'Conservation',
'CONSER': 'Conservation',
'CONSERV': 'Conservation',
'COORD': 'Coordinator',
'CO-ORDINATOR': 'Coordinator',
'COORDIN': 'Coordinator',
'CUST': 'Customer',
'MGMT': 'Management',
'IS': 'Island',
'NP': 'National Park',
'OCC': 'Occupational',
'SAF': 'Safety',
'SRV': 'Service',
'SNR': 'Senior',
'SERVIC': 'Services',
'SCIENT': 'Scientist',
'SCIENT.': 'Scientist',
'ODG': 'Office of the Director General',
'CHAIRPERSON,': 'Chairperson -',
'OFFICER,': 'Officer -',
'DIRECTOR,': 'Director -',
'LEADER,': 'Leader -',
'MANAGER,': 'Manager -',
'COORDINATOR,': 'Coordinator -',
}
if word.upper() in expand:
result = expand[word.upper()]
if result:
return prefix + result + suffix
title_fixed = titlecase.titlecase(title_raw, callback=replace)
return title_fixed
def alesco_date_to_dt(dt, hour=0, minute=0, second=0):
"""Take in a date object and return it as a timezone-aware datetime.
"""
d = TZ.localize(datetime(dt.year, dt.month, dt.day, 0, 0))
return d + timedelta(hours=hour, minutes=minute, seconds=second)
def update_manager_from_alesco(user):
from .models import DepartmentUser
manager = None
if user.alesco_data:
managers = [x['manager_emp_no'] for x in user.alesco_data if x['manager_emp_no']]
managers = OrderedDict.fromkeys(managers).keys()
managers = [DepartmentUser.objects.filter(employee_id=x).first() for x in managers]
managers = [x for x in managers if x and (user.pk != x.pk)]
if managers:
manager = managers[0]
if manager:
if manager != user.parent:
if manager in user.get_descendants():
LOGGER.info('Removing manager relationship from {}, should be fixed next cycle'.format(manager.email))
manager.parent = None
manager.save()
LOGGER.info('Updating manager for {} from {} to {}'.format(user.email, user.parent.email if user.parent else None, manager.email if manager else None))
user.parent = manager
user.save()
def update_term_date_from_alesco(user):
from .models import DepartmentUser
today = alesco_date_to_dt(date.today())
term_date = None
if user.alesco_data:
term_dates = [date.fromisoformat(x['job_term_date']) for x in user.alesco_data if x['job_term_date']]
if term_dates:
term_date = max(term_dates)
term_date = alesco_date_to_dt(term_date) if term_date and term_date != ALESCO_DATE_MAX else None
if term_date:
stored_term_date = TZ.normalize(user.date_hr_term) if user.date_hr_term else None
if term_date != stored_term_date:
if user.hr_auto_expiry:
LOGGER.info('Updating expiry for {} from {} to {}'.format(user.email, stored_term_date, term_date))
user.expiry_date = term_date
user.date_hr_term = term_date
user.save()
def update_title_from_alesco(user):
from .models import DepartmentUser
title = None
if user.alesco_data:
title = next((x['occup_pos_title'] for x in user.alesco_data if 'occup_pos_title' in x and x['occup_pos_title']), None)
if title:
title = alesco_scrub_title(title)
if title:
if title != user.title:
LOGGER.info('Updating title for {} from {} to {}'.format(user.email, user.title, title))
user.title = title
user.save()
def update_location_from_alesco(user):
from .models import DepartmentUser, Location
location = None
if user.alesco_data:
location = next((x['location'] for x in user.alesco_data if 'location' in x and x['location']), None)
location = Location.objects.filter(ascender_code=location).first()
if location:
if location != user.location:
LOGGER.info('Updating location for {} from {} to {}'.format(user.email, user.location, location))
user.location = location
user.save()
def update_user_from_alesco(user):
update_manager_from_alesco(user)
update_term_date_from_alesco(user)
update_title_from_alesco(user)
update_location_from_alesco(user)
def alesco_db_fetch():
"""Returns an iterator which fields rows from a database query until completed.
"""
conn = psycopg2.connect(
host=settings.ALESCO_DB_HOST,
database=settings.ALESCO_DB_NAME,
user=settings.ALESCO_DB_USERNAME,
password=settings.ALESCO_DB_PASSWORD
)
cur = conn.cursor()
query = "SELECT {} FROM {};".format(', '.join(ALESCO_DB_FIELDS), settings.ALESCO_DB_TABLE)
cur.execute(query)
while True:
row = cur.fetchone()
if row is None:
break
yield row
def alesco_db_import(update_dept_user=False):
"""A task to update DepartmentUser field values from Alesco database information.
By default, it saves Alesco data in the alesco_data JSON field.
If update_dept_user == True, the function will also update several other field values.
"""
from .models import DepartmentUser
date_fields = ['date_of_birth', 'current_commence', 'job_term_date', 'occup_commence_date', 'occup_term_date']
status_ranking = [
'PFAS', 'PFA', 'PFT', 'CFA', 'CFT', 'NPAYF',
'PPA', 'PPT', 'CPA', 'CPT', 'NPAYP',
'CCFA', 'CAS', 'SEAS', 'TRAIN', 'NOPAY', 'NON',
]
classification_ranking = [
'CEO', 'CL3', 'CL2', 'CL1',
'L9', 'L8', 'L7',
'SCL6', 'L6',
'SCL5', 'L5',
'SCL4', 'S4', 'L4',
'SCL3', 'S3', 'L3',
'SCL2', 'R2', 'L2',
'SCL1', 'R1', 'L12', 'L1',
]
records = {}
alesco_iter = alesco_db_fetch()
today = date.today()
LOGGER.info('Querying Alesco database for employee information')
for row in alesco_iter:
record = dict(zip(ALESCO_DB_FIELDS, row))
eid = record['employee_id']
if eid not in records:
records[eid] = []
records[eid].append(record)
LOGGER.info('Updating local DepartmentUser information from Alesco data')
for key, record in records.items():
if not DepartmentUser.objects.filter(employee_id=key).exists():
continue
# Perform some sorting to place the employee's Alesco record(s) in order from
# most applicable to least applicable.
record.sort(key=lambda x: classification_ranking.index(x['classification']) if x['classification'] in classification_ranking else 100)
record.sort(key=lambda x: status_ranking.index(x['emp_status']) if x['emp_status'] in status_ranking else 100)
# start off by current jobs sorted by rank, follow up by chronological list of expired jobs
current = [x for x in record if x['job_term_date'] is None or x['job_term_date'] >= today]
expired = [x for x in record if x['job_term_date'] and x['job_term_date'] < today]
expired.sort(key=lambda x: x['job_term_date'], reverse=True)
record = current + expired
for rec in record:
for field in date_fields:
rec[field] = rec[field].isoformat() if rec[field] and rec[field] != ALESCO_DATE_MAX else None
user = DepartmentUser.objects.get(employee_id=key)
# order = lambda obj: tuple([x['position_id'] for x in obj])
# if order(user.alesco_data) != order(record):
# print('Changing {}'.format(user.email))
# print([(x['classification'], x['emp_stat_desc'], x['occup_pos_title'], x['job_term_date']) for x in user.alesco_data])
# print([(x['classification'], x['emp_stat_desc'], x['occup_pos_title'], x['job_term_date']) for x in record])
user.alesco_data = record
user.save()
if update_dept_user:
update_user_from_alesco(user)
| 38.079848 | 216 | 0.603195 | 1,232 | 10,015 | 4.724838 | 0.281656 | 0.039856 | 0.020787 | 0.018554 | 0.220409 | 0.145679 | 0.126095 | 0.07387 | 0.058409 | 0.058409 | 0 | 0.007093 | 0.267998 | 10,015 | 262 | 217 | 38.225191 | 0.786932 | 0.108038 | 0 | 0.074627 | 0 | 0 | 0.189545 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.049751 | false | 0.004975 | 0.064677 | 0 | 0.139303 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
576678ebcd87140fa25fe997c95dbee7ed3786b2 | 946 | py | Python | setup.py | rustamzh/synthetic_data | 3ee9edbd452493489fed80071a1e95802068afe1 | [
"MIT"
] | 8 | 2020-03-18T01:20:51.000Z | 2022-02-25T10:22:22.000Z | setup.py | rustamzh/synthetic_data | 3ee9edbd452493489fed80071a1e95802068afe1 | [
"MIT"
] | 9 | 2020-03-17T00:58:31.000Z | 2022-02-10T01:31:14.000Z | setup.py | rustamzh/synthetic_data | 3ee9edbd452493489fed80071a1e95802068afe1 | [
"MIT"
] | 1 | 2020-01-27T20:44:45.000Z | 2020-01-27T20:44:45.000Z | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="synthetic_data",
version="1.0.0",
author="Karan Bhanot",
author_email="bhanotkaran22@gmail.com",
description="Package that enables generation of synthetic data",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/TheRensselaerIDEA/synthetic_data",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent"
],
install_requires=[
"numpy==1.19.5",
"pandas==1.1.5",
"scipy==1.5.4",
"scikit-learn==0.24.2",
"tensorflow==1.13.1",
"psutil==5.8.0",
"tqdm==4.17.0",
"matplotlib==2.1.2",
"seaborn==0.9.0"
],
python_requires='>=3.6, <3.8',
)
| 27.823529 | 68 | 0.614165 | 118 | 946 | 4.822034 | 0.644068 | 0.105448 | 0.066784 | 0.105448 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.055256 | 0.215645 | 946 | 33 | 69 | 28.666667 | 0.71159 | 0 | 0 | 0.064516 | 0 | 0 | 0.451374 | 0.024313 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.032258 | 0 | 0.032258 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5766c599d84dc42fdd684f68e5e4999661ae5877 | 1,267 | py | Python | indicoio/api/tests/test_base.py | IndicoDataSolutions/indicoio | 2d92b09dddbac892934cbc4265f26ea9a4c89fac | [
"MIT"
] | null | null | null | indicoio/api/tests/test_base.py | IndicoDataSolutions/indicoio | 2d92b09dddbac892934cbc4265f26ea9a4c89fac | [
"MIT"
] | 3 | 2020-01-22T15:30:43.000Z | 2020-02-10T15:50:19.000Z | indicoio/api/tests/test_base.py | IndicoDataSolutions/indicoio-py | 2d92b09dddbac892934cbc4265f26ea9a4c89fac | [
"MIT"
] | null | null | null | from unittest.mock import patch
from indicoio.client import RequestProxy
from indicoio.api.base import ObjectProxy
@patch.object(RequestProxy, "_make_request")
def test_object_proxy_in(mock_request_proxy):
obj_proxy = ObjectProxy(mock_arg="mock_arg_value")
assert "mock_arg" in obj_proxy, obj_proxy
@patch.object(RequestProxy, "_make_request")
def test_object_proxy_attrs(mock_request_proxy):
obj_proxy = ObjectProxy(mock_arg="mock_arg_value")
assert obj_proxy["mock_arg"] == "mock_arg_value"
obj_proxy["mock_arg"] = "mock_arg_value_new"
assert obj_proxy["mock_arg"] == "mock_arg_value_new", "Did not update to new value"
@patch.object(RequestProxy, "_make_request")
def test_object_proxy_build(mock_request_proxy):
obj_proxy = ObjectProxy(mock_arg="mock_arg_value")
new_obj = obj_proxy.build_object(ObjectProxy, new_arg="new_arg")
assert isinstance(new_obj, ObjectProxy)
assert new_obj["new_arg"] == "new_arg"
@patch.object(RequestProxy, "_make_request")
def test_object_proxy_get(mock_request_proxy):
obj_proxy = ObjectProxy(mock_arg="mock_arg_value")
assert obj_proxy.get("mock_arg") == "mock_arg_value"
assert obj_proxy.get("mock_argx") is None
assert obj_proxy.get("mock_argx", False) is False
| 33.342105 | 87 | 0.771113 | 188 | 1,267 | 4.781915 | 0.18617 | 0.132369 | 0.097887 | 0.124583 | 0.68743 | 0.68743 | 0.651835 | 0.651835 | 0.557286 | 0.325918 | 0 | 0 | 0.121547 | 1,267 | 37 | 88 | 34.243243 | 0.807727 | 0 | 0 | 0.32 | 0 | 0 | 0.219589 | 0 | 0 | 0 | 0 | 0 | 0.32 | 1 | 0.16 | false | 0 | 0.12 | 0 | 0.28 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5766de1b844e77ba7bfa9ca4e204639de4e2b651 | 1,417 | py | Python | attacks/ctr/separator_oracle.py | jvdsn/crypto-attacks | df37f112c28687efd105b7770b1baa4c53a71ad8 | [
"MIT"
] | 139 | 2020-10-26T00:43:15.000Z | 2022-03-28T20:00:46.000Z | ctr/separator_oracle.py | kanhavishva/crypto-attacks | ab3b17cb880f8592bc266f0a991e606786c8d875 | [
"MIT"
] | 6 | 2021-06-21T05:59:04.000Z | 2022-02-17T22:50:42.000Z | ctr/separator_oracle.py | kanhavishva/crypto-attacks | ab3b17cb880f8592bc266f0a991e606786c8d875 | [
"MIT"
] | 22 | 2021-07-01T08:42:54.000Z | 2022-03-20T20:27:18.000Z | def _find_separator_positions(separator_oracle, c):
separator_positions = []
c = bytearray(c)
for i in range(len(c)):
c[i] ^= 1
valid = separator_oracle(c)
c[i] ^= 1
if not valid:
c[i] ^= 2
valid = separator_oracle(c)
c[i] ^= 2
if not valid:
separator_positions.append(i)
return separator_positions
def attack(separator_oracle, separator_byte, c):
"""
Recovers the plaintext using the separator oracle attack.
:param separator_oracle: the separator oracle, returns True if the separators are correct, False otherwise
:param separator_byte: the separator which is used in the separator oracle
:param c: the ciphertext
:return: the plaintext
"""
separator_positions = _find_separator_positions(separator_oracle, c)
c = bytearray(c)
# Ensure that at least 1 separator is missing.
c[separator_positions[0]] ^= 1
p = bytearray(len(c))
for i in range(len(c)):
if i in separator_positions:
p[i] = separator_byte
else:
c_i = c[i]
# Try every byte until an additional separator is created.
for b in range(256):
c[i] = b
if separator_oracle(c):
p[i] = c_i ^ c[i] ^ separator_byte
break
c[i] = c_i
return p
| 30.804348 | 110 | 0.580805 | 184 | 1,417 | 4.331522 | 0.293478 | 0.027604 | 0.100376 | 0.06399 | 0.193225 | 0.193225 | 0.040151 | 0 | 0 | 0 | 0 | 0.010627 | 0.335921 | 1,417 | 45 | 111 | 31.488889 | 0.836344 | 0.275229 | 0 | 0.387097 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064516 | false | 0 | 0 | 0 | 0.129032 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5769052ac4f45d71dd0915b43f6a9c410aa1abfd | 822 | py | Python | src/fgsm.py | snaka0213/adversarial_attacks | e6e7964e984bdbe1a9bca010548e088b1997af50 | [
"MIT"
] | 3 | 2020-02-27T02:49:39.000Z | 2020-08-01T15:00:49.000Z | src/fgsm.py | snaka0213/adversarial_attacks | e6e7964e984bdbe1a9bca010548e088b1997af50 | [
"MIT"
] | null | null | null | src/fgsm.py | snaka0213/adversarial_attacks | e6e7964e984bdbe1a9bca010548e088b1997af50 | [
"MIT"
] | 1 | 2021-09-06T08:01:44.000Z | 2021-09-06T08:01:44.000Z | #!/usr/bin/env python3
import functools
import torch
import torch.nn as nn
from .attack import attack
def _fgsm(model, X, y, epsilon):
X.requires_grad = True
out = model(X)
_, predicted = torch.max(out.data, 1)
acc = (predicted == y.data).float().sum().item() / X.size(0)
model.zero_grad()
loss = nn.CrossEntropyLoss()(out, y)
loss.backward()
eta = epsilon*X.grad.data.sign()
X_fgsm = X.data + eta
out_fgsm = model(X_fgsm)
_, predicted_fgsm = torch.max(out_fgsm.data, 1)
acc_fgsm = (predicted_fgsm == y.data).float().sum().item() / X.size(0)
return acc, acc_fgsm
def fgsm(loader, model, epsilon=0.1, use_cuda=False, verbose=False):
_atk = functools.partial(_fgsm, epsilon=epsilon)
return attack(loader, model, atk=_atk, use_cuda=use_cuda, verbose=verbose)
| 28.344828 | 78 | 0.666667 | 126 | 822 | 4.198413 | 0.357143 | 0.034026 | 0.037807 | 0.049149 | 0.086957 | 0.086957 | 0.086957 | 0.086957 | 0 | 0 | 0 | 0.010432 | 0.183698 | 822 | 28 | 79 | 29.357143 | 0.777943 | 0.025547 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0 | 0.190476 | 0 | 0.380952 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5769344527067ead77a62da401a0051f8f4953b2 | 989 | py | Python | examples/lotka-volterra.py | leighton/tensorsim | 8bed5e2971d1c9991fff2add1258c884614c6b0f | [
"Apache-2.0"
] | null | null | null | examples/lotka-volterra.py | leighton/tensorsim | 8bed5e2971d1c9991fff2add1258c884614c6b0f | [
"Apache-2.0"
] | null | null | null | examples/lotka-volterra.py | leighton/tensorsim | 8bed5e2971d1c9991fff2add1258c884614c6b0f | [
"Apache-2.0"
] | 1 | 2022-01-12T17:38:19.000Z | 2022-01-12T17:38:19.000Z | import numpy as np
import tensorflow as tf
import matplotlib.pyplot as pl
import tensorsim as ts
""" Model
dX = (a*X - B*X*Y)dt #Prey
dY = (B*X*Y - mu*Y)dt #Predator
"""
n_reps = 100
X_t0, Y_t0 = 300., 300.
a, B, mu = 0.1, 0.0005, 0.2
with tf.Graph().as_default():
# initial conditions
ics = tf.constant([[X_t0]*n_reps, [Y_t0]*n_reps])
# simulation parameters
theta = tf.constant([[a],[B],[mu]])
#stoichiometric matrix
S = tf.constant([
[ 1., 0.],
[-1., 1.],
[ 0.,-1.]
])
#hazard function a.k.a propensity vector
def h_fn(X, Y, a, B, mu):
return [a*X, B*X*Y, mu*Y]
Y = ts.integrate.mjp(ics, theta, S, h_fn, n_jumps=30000)
with tf.Session() as sess:
# t@(n_jumps, n_reps), Z@(n_jumps, n_var, n_reps)
t, Z = sess.run(Y)
fig = pl.figure(figsize=(20,12))
for q in range(n_reps):
pl.plot(t[:,q], Z[:,0,q], "g-", t[:,q], Z[:,1,q], "r-", linewidth=0.05)
pl.ylim((0,1500))
pl.show()
| 21.5 | 75 | 0.553084 | 181 | 989 | 2.928177 | 0.441989 | 0.056604 | 0.016981 | 0.015094 | 0.035849 | 0 | 0 | 0 | 0 | 0 | 0 | 0.06275 | 0.242669 | 989 | 45 | 76 | 21.977778 | 0.64486 | 0.149646 | 0 | 0 | 0 | 0 | 0.005291 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.16 | 0.04 | 0.24 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
576c31fb8846ecc44f15de5c6e4fe795690930aa | 3,595 | py | Python | VkChatBot/tests.py | valerii-chirkov/PythonToolsAndPrograms | ba75559811f752562b8b3e73a51660c48aad7bad | [
"MIT"
] | null | null | null | VkChatBot/tests.py | valerii-chirkov/PythonToolsAndPrograms | ba75559811f752562b8b3e73a51660c48aad7bad | [
"MIT"
] | null | null | null | VkChatBot/tests.py | valerii-chirkov/PythonToolsAndPrograms | ba75559811f752562b8b3e73a51660c48aad7bad | [
"MIT"
] | null | null | null | from copy import deepcopy
from unittest import TestCase
from unittest.mock import patch, Mock
from pony.orm import db_session, rollback
from vk_api.bot_longpoll import VkBotMessageEvent
from bot import Bot
from chatbot import settings
from chatbot.generate_ticket import generate_ticket
def isolate_db(test_func):
def wrapper(*args, **kwargs):
with db_session:
test_func(*args, **kwargs)
rollback()
return wrapper
class Test1(TestCase):
RAW_EVENT = {
'type': 'message_new',
'object': {'date': 1613066709, 'from_id': 411756322, 'id': 98, 'out': 0, 'peer_id': 411756322,
'text': 'hi bot', 'conversation_message_id': 83, 'fwd_messages': [], 'important': False,
'random_id': 0, 'attachments': [], 'is_hidden': False},
'group_id': 202152694, 'event_id': '7546c9ba003b5f66ad030219ce15db33df7ad170'}
def test_ok(self):
count = 5
obj = {'a': 1}
events = [obj] * count # [obj, obj, ...]
long_poller_mock = Mock(return_value=events)
long_poller_listen_mock = Mock()
long_poller_listen_mock.listen = long_poller_mock
with patch('bot.VkApi'):
with patch('bot.VkBotLongPoll', return_value=long_poller_listen_mock):
bot = Bot('', '')
bot.on_event = Mock()
bot.send_image = Mock()
bot.run()
bot.on_event.assert_called()
bot.on_event.assert_any_call(obj)
assert bot.on_event.call_count == count
INPUTS = [
'Hi',
'when',
'where?',
'register me',
'Alex',
'My address is email@email',
'email@email.ru',
]
EXPECTED_OUTPUTS = [
settings.DEFAULT_ANSWER,
settings.INTENTS[0]['answer'],
settings.INTENTS[1]['answer'],
settings.SCENARIOS['registration']['steps']['step1']['text'],
settings.SCENARIOS['registration']['steps']['step2']['text'],
settings.SCENARIOS['registration']['steps']['step2']['failure_text'],
settings.SCENARIOS['registration']['steps']['step3']['text'].format(name='Alex', email='email@email.ru'),
]
# @isolate_db
def test_run_ok(self):
send_mock = Mock()
api_mock = Mock()
api_mock.messages.send = send_mock
events = []
for input_text in self.INPUTS:
event = deepcopy(self.RAW_EVENT)
event['object']['text'] = input_text
events.append(VkBotMessageEvent(event))
long_poller_mock = Mock()
long_poller_mock.listen = Mock(return_value=events)
with patch('bot.VkBotLongPoll', return_value=long_poller_mock):
bot = Bot('', '')
bot.api = api_mock
bot.send_image = Mock()
bot.run()
assert send_mock.call_count == len(self.INPUTS)
real_outputs = []
for call in send_mock.call_args_list:
args, kwargs = call
real_outputs.append(kwargs['message'])
assert real_outputs == self.EXPECTED_OUTPUTS
def test_image_generation(self):
with open('100.jpeg', 'rb') as avatar_file:
avatar_mock = Mock()
avatar_mock.content = avatar_file.read()
with patch('requests.get', return_value=avatar_mock):
ticket_file = generate_ticket('Valerii', 'email@email.ru')
with open('ticket_example.png', 'rb') as expected:
expected_bytes = expected.read()
assert ticket_file.read() == expected_bytes
| 32.981651 | 113 | 0.594715 | 407 | 3,595 | 5.029484 | 0.321867 | 0.039082 | 0.034196 | 0.066439 | 0.130923 | 0.11236 | 0.070347 | 0.044944 | 0 | 0 | 0 | 0.030757 | 0.276495 | 3,595 | 108 | 114 | 33.287037 | 0.756248 | 0.00751 | 0 | 0.069767 | 0 | 0 | 0.14446 | 0.017672 | 0 | 0 | 0 | 0 | 0.069767 | 1 | 0.05814 | false | 0 | 0.104651 | 0 | 0.22093 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
576e8aab7f5c1f7fe9a9b6b411821065146a8094 | 2,730 | py | Python | src/harness/util_test.py | NSF-Swift/Spectrum-Access-System | 02cf3490c9fd0cec38074d3bdb3bca63bb7d03bf | [
"Apache-2.0"
] | 58 | 2015-07-22T14:16:52.000Z | 2022-03-10T09:09:33.000Z | src/harness/util_test.py | NSF-Swift/Spectrum-Access-System | 02cf3490c9fd0cec38074d3bdb3bca63bb7d03bf | [
"Apache-2.0"
] | 537 | 2015-07-30T16:28:20.000Z | 2021-09-30T17:12:15.000Z | src/harness/util_test.py | NSF-Swift/Spectrum-Access-System | 02cf3490c9fd0cec38074d3bdb3bca63bb7d03bf | [
"Apache-2.0"
] | 51 | 2015-06-30T00:25:15.000Z | 2022-01-21T00:09:22.000Z | # Copyright 2018 SAS Project Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the util.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
try:
from unittest import mock
except ImportError:
import mock
import util
class UtilTest(unittest.TestCase):
def test_decode_openssl_version(self):
self.assertEqual(
util._decode_openssl_version('OpenSSL 1.1.1j 16 Feb 2021'), 111)
self.assertEqual(
util._decode_openssl_version('OpenSSL 1.0.2t 20 Dec 2019'), 102)
self.assertEqual(
util._decode_openssl_version('OpenSSL 0.9.8f [24 Mar 2010]'), 98)
self.assertEqual(util._decode_openssl_version('BoringSSL'), -1)
def test_getFqdnLocalhost(self):
# Initialize the testConfig with our test data.
util._test_config = util._GetSharedTestConfig()
util._test_config.hostname = 'foo.bar.com'
self.assertEqual(util.getFqdnLocalhost(), 'foo.bar.com')
def test_getUnusedPort_pickAny(self):
# Initialize the testConfig with our test data.
util._test_config = util._GetSharedTestConfig()
util._test_config.min_port = -1
util._test_config.min_port = -1
self.assertGreater(util.getUnusedPort(), 0)
def test_getUnusedPort_pickRange(self):
# Initialize the testConfig with our test data.
util._test_config = util._GetSharedTestConfig()
# Note that the could failed if these port are already in use...
START_PORT = 12345
util._test_config.min_port = START_PORT
util._test_config.max_port = START_PORT + 5
self.assertEqual(util.getUnusedPort(), START_PORT)
self.assertEqual(util.getUnusedPort(), START_PORT + 1)
self.assertEqual(util.getUnusedPort(), START_PORT + 2)
self.assertEqual(util.getUnusedPort(), START_PORT + 3)
self.assertEqual(util.getUnusedPort(), START_PORT + 4)
with self.assertRaises(AssertionError):
util.getUnusedPort()
util.releasePort(START_PORT + 2)
self.assertEqual(util.getUnusedPort(), START_PORT + 2)
with self.assertRaises(AssertionError):
util.getUnusedPort()
if __name__ == '__main__':
unittest.main()
| 34.556962 | 77 | 0.732967 | 360 | 2,730 | 5.35 | 0.4 | 0.08567 | 0.108515 | 0.099688 | 0.443925 | 0.433022 | 0.280374 | 0.239356 | 0.19055 | 0.137591 | 0 | 0.026222 | 0.175824 | 2,730 | 78 | 78 | 35 | 0.829778 | 0.305128 | 0 | 0.311111 | 0 | 0 | 0.064533 | 0 | 0 | 0 | 0 | 0 | 0.311111 | 1 | 0.088889 | false | 0 | 0.177778 | 0 | 0.288889 | 0.022222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5772ffe732a3b2299d6c251f5f4ac915f9bfff9a | 3,124 | py | Python | aptl3/tests/integrated/test_coco.py | matteoterruzzi/aptl3 | 680ab58ffa79d0eee293729d36f677a588350519 | [
"MIT"
] | null | null | null | aptl3/tests/integrated/test_coco.py | matteoterruzzi/aptl3 | 680ab58ffa79d0eee293729d36f677a588350519 | [
"MIT"
] | null | null | null | aptl3/tests/integrated/test_coco.py | matteoterruzzi/aptl3 | 680ab58ffa79d0eee293729d36f677a588350519 | [
"MIT"
] | null | null | null | import os
import warnings
from tempfile import TemporaryDirectory
import numpy as np
import pytest
from aptl3.db import Database
from aptl3.scripts.load_coco import load_coco
coco_dir = './coco/annotations/'
@pytest.mark.skipif(not os.path.isdir(coco_dir), reason='test_coco would not find the coco annotations json.')
def test_coco():
with TemporaryDirectory() as _data_dir:
db = Database(data_dir=_data_dir)
_train_images = 5 # This shall be a short test...
load_coco(db=db, coco_dir=coco_dir, data_type='train2017', max_samples=_train_images)
c = db.execute('SELECT relation_id from Relations')
relation_id: int = c.fetchone()[0]
c = db.execute("SELECT COUNT(*) FROM MediaLocations WHERE url NOT LIKE 'data:,%'")
_inserted_images: int = c.fetchone()[0]
assert _inserted_images == _train_images
c = db.execute("SELECT COUNT(*) FROM MediaLocations WHERE url LIKE 'data:,%'")
_inserted_sentences: int = c.fetchone()[0]
assert _inserted_sentences >= _train_images
embedding_id_images = db.add_embedding('image')
embedding_id_sentences = db.add_embedding('sentence')
manifold_id_images, _inserted = db.build_new_manifold(embedding_id=embedding_id_images)
assert _inserted == _inserted_images + _inserted_sentences # items + holes
manifold_id_sentences, _inserted = db.build_new_manifold(embedding_id=embedding_id_sentences)
assert _inserted == _inserted_sentences + _inserted_images # items + holes
gpa_id, gpa = db.build_generalized_procrustes(
src_embedding_ids=(embedding_id_images, embedding_id_sentences),
src_relation_ids=(relation_id,),
min_samples=_inserted_images,
max_samples=_inserted_sentences,
)
assert gpa.procrustes_distance < 1
################################################################################################################
_val_images = 3
load_coco(db=db, coco_dir=coco_dir, data_type='val2017', max_samples=_val_images)
_, _inserted = db.build_new_manifold(embedding_id=embedding_id_images)
assert _inserted >= _val_images
_, _inserted = db.build_new_manifold(embedding_id=embedding_id_sentences)
assert _inserted >= _val_images
################################################################################################################
c = db.execute('SELECT media_id FROM Media')
_tested = 0
for media_id, in c:
for _embedding_id, v in db.get_media_vectors(media_id=media_id):
w = gpa.predict(src_embedding_id=_embedding_id, dest_embedding_id=_embedding_id, x=np.atleast_2d(v))[0]
dist = np.linalg.norm(v-w)
assert dist >= -1.0e-5
assert dist < 0.2 #
if dist > 0.01:
warnings.warn(f'{dist=:.4f} should be approximately 0 as we did v @ R @ R.T with R orthogonal.')
_tested += 1
assert _tested >= _train_images + _val_images
| 40.571429 | 120 | 0.620999 | 376 | 3,124 | 4.792553 | 0.295213 | 0.103774 | 0.066593 | 0.073252 | 0.320755 | 0.308546 | 0.257492 | 0.257492 | 0.257492 | 0.205327 | 0 | 0.012392 | 0.225032 | 3,124 | 76 | 121 | 41.105263 | 0.731929 | 0.018246 | 0 | 0.037736 | 0 | 0.018868 | 0.12685 | 0 | 0 | 0 | 0 | 0 | 0.188679 | 1 | 0.018868 | false | 0 | 0.132075 | 0 | 0.150943 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
57741b8c449b1a45735b70561ed716e153f13b4e | 943 | py | Python | venv/Lib/site-packages/pynance/tst/unit/data/test_compare.py | LeonardoHMS/imobi | 6b2b97a05df67ea7d493f7b601382f65c6629cc2 | [
"MIT"
] | 35 | 2015-03-12T04:16:14.000Z | 2020-12-17T18:10:15.000Z | venv/Lib/site-packages/pynance/tst/unit/data/test_compare.py | LeonardoHMS/imobi | 6b2b97a05df67ea7d493f7b601382f65c6629cc2 | [
"MIT"
] | 31 | 2015-03-16T21:31:04.000Z | 2021-01-26T00:12:34.000Z | venv/Lib/site-packages/pynance/tst/unit/data/test_compare.py | LeonardoHMS/imobi | 6b2b97a05df67ea7d493f7b601382f65c6629cc2 | [
"MIT"
] | 18 | 2015-09-30T10:40:26.000Z | 2021-01-25T21:20:44.000Z | """
Tests for performance comparison functions.
Copyright (c) 2016 Marshall Farrier
license http://opensource.org/licenses/MIT
"""
import unittest
import numpy as np
import pandas as pd
import pynance as pn
class TestCompare(unittest.TestCase):
def test_compare(self):
rng = pd.date_range('2016-03-28', periods=4)
eqs = ('SCTY', 'SPWR')
eq_dfs = [pd.DataFrame(index=rng, columns=['Close']) for i in range(len(eqs))]
eq_dfs[0].iloc[:, 0] = [2., 4., 6., 8.]
eq_dfs[1].iloc[:, 0] = [4., 4., 2., 6.]
rel_perf = pn.data.compare(eq_dfs, eqs)
self.assertTrue((rng == rel_perf.index).all(), 'incorrect index')
self.assertTrue((list(eqs) == list(rel_perf)), 'incorrect column labels')
self.assertTrue(np.allclose(np.array([[1., 2., 3., 4.], [1., 1., .5, 1.5]]).T, rel_perf.to_numpy()),
'incorrect values')
if __name__ == '__main__':
unittest.main()
| 30.419355 | 108 | 0.610817 | 136 | 943 | 4.095588 | 0.558824 | 0.035907 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.045515 | 0.207847 | 943 | 30 | 109 | 31.433333 | 0.700134 | 0.130435 | 0 | 0 | 0 | 0 | 0.10468 | 0 | 0 | 0 | 0 | 0 | 0.166667 | 1 | 0.055556 | false | 0 | 0.222222 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
57741dcb2e85afb9d0268cf305517b0af7c6cb61 | 6,192 | py | Python | dsatools/_base/_imf_decomposition/_emd.py | diarmaidocualain/dsatools | 50b9259e2846b5fdd3dc52206967b0ee8d0144de | [
"MIT"
] | null | null | null | dsatools/_base/_imf_decomposition/_emd.py | diarmaidocualain/dsatools | 50b9259e2846b5fdd3dc52206967b0ee8d0144de | [
"MIT"
] | null | null | null | dsatools/_base/_imf_decomposition/_emd.py | diarmaidocualain/dsatools | 50b9259e2846b5fdd3dc52206967b0ee8d0144de | [
"MIT"
] | null | null | null | import numpy as np
import scipy
import scipy.signal
import scipy.interpolate #import Akima1DInterpolator, Rbf, InterpolatedUnivariateSpline, BSpline
def emd(x, order,method = 'cubic', max_itter = 100, tol = 0.1):
'''
Emperical Mode Decomposition (EMD).
The emperical mode deocomposition method is the nonlinear
time domain decomposition on the so-called
intrinsic mode functions (IMF), based on the idea,
that each component can be reconstructed by it envelope.
Parameters
----------------
* x: 1d ndarray.
* order: int,
number of IMFs (with out remainder).
* method: string,
method of spline approximation:
method = {cubic, akim, rbf, linear, thin_plate}.
* max_itter: int,
maximum number of itteration to search imf.
* tol: float,
tolerance to variance of changing imf in itterations.
Returns
---------------
* imfs: 2d ndarray,
intrinsic mode functions and remainder,
shape = (order+1,x.shape).
References
-----------------
[1] N. E. Huang et al.,
"The empirical mode decomposition and the Hilbert
spectrum for nonlinear and non-stationary time series analysis",
Proc. R. Soc. Lond. A, Math. Phys. Sci.,
vol. 454, no. 1971, 903�995, (1998).
[2] N. E. Huang,
"Hilbert-Huang transform and its applications",
vol. 16. World Scientific, 2014.
[3] Z. Wu, N. E. Huang,
"Ensemble empirical mode decomposition:
A noise-assisted data analysis method",
Adv. Adapt. Data Anal., vol. 1, no. 1, 1�41 (2008).
[4] J. Zheng, J. Cheng, Y. Yang,
"Partly ensemble empirical mode decomposition:
An improved noise-assisted method for eliminating mode mixing",
Signal Process., vol. 96, 362�374, (2014).
See also
-----------------------
vmd
hvd
ewt
hht (operators)
'''
x = np.array(x)
N = x.shape[0]
imf = np.zeros((order, N),dtype = x.dtype)
for ord_cnt in range(order):
h = x
for cnt in range(max_itter):
s1 = get_envelope(h, method = method)
s2 = -get_envelope(-h, method = method)
mean_env = (s1+s2)/2
# for RBF interpolation envelope is complex
if (x.dtype is complex) and (mean_env.dtype != complex):
h = h - scipy.signal.hilbert(mean_env)
else:
h = h - mean_env
#Cashy Criteria
sd = np.sum(np.square(mean_env))/np.sum(np.square(h))
if (np.abs(sd) < tol) or isimf(h):
break
imf[ord_cnt,:] = h
x = x-h
if ismonotonic(x):
break
return imf
#--------------------------------------------------------
def ismonotonic(x):
'''
if there are exists maximums and minimums, False.
'''
pmax=findpeaks(x)
pmin=findpeaks(-x)
if pmax.size*pmin.size > 0:
return False
else:
return True
#--------------------------------------------------------
def isimf(x):
'''
if |zero crossing - extremums| less or equal to 1, than IMF
'''
N = x.shape[0];
# zero crossing
df = (x[1:]*x[:-1])
zc = np.sum(df[df<0])
pmax=findpeaks(x)
pmin=findpeaks(-x)
extremums = pmax.size+pmin.size
if abs(zc-extremums) > 1:
return False
else:
return True
#--------------------------------------------------------
def get_envelope(x, method = 'cubic'):
'''
Function to estimate envelope by spline method.
'''
N = x.shape[0];
p = findpeaks(x)
if(p.size<2):
return np.zeros(N)
points = np.concatenate([[0], p, [N]])
values = np.concatenate([[0], x[p], [0]])
#TODO check for mirror extention in my experiments it was worse
# values, points = x[p],p
# values,points =_extension(values, points, n_points=2)
new_points = np.arange(points[0],points[-1])
fp = np.flatnonzero(new_points == 0)[0]
s=_spline(values, points, new_points, method = method)[fp:fp+N]
return s
#--------------------------------------------------------
def _spline(values, points, new_points, method = 'cubic'):
'''
scipy.interpolate methods.
'''
if(method=='cubic'):
cofs = scipy.interpolate.splrep(points, values.real)
return scipy.interpolate.splev(new_points, cofs)
elif(method=='akim'):
return scipy.interpolate.Akima1DInterpolator(points,values.real)(new_points)
elif(method=='rbf'):
return scipy.interpolate.Rbf(points,values.real, function='gaussian')(new_points)
elif(method=='thin_plate'):
return scipy.interpolate.Rbf(points,values.real, function='thin_plate')(new_points)
elif(method=='linear'):
return scipy.interpolate.Rbf(points,values.real, function='linear')(new_points)
#--------------------------------------------------------
def findpeaks(x):
''' find maximums of signals.
'''
# return scipy.signal.argrelmax(np.real(x))[0]
# def peaks(X):
dX = np.sign(np.diff(x.transpose())).transpose()
locs_max = np.where(np.logical_and(dX[:-1] > 0, dX[1:] < 0))[0] + 1
return locs_max
#--------------------------------------------------------
def _extension(values, points, n_points=2,mirror = True ):
'''
Mirror extention
FOR TEST
'''
N = values.shape[0]
if mirror:
values = np.concatenate(( values[n_points-1::-1],
values,
values[N-1:N-n_points-1:-1] ))
else:
values = np.concatenate(( values[n_points:0:-1],
values,
values[N-2:N-n_points-2:-1] ))
points = np.concatenate((2*points[0] - points[n_points:0:-1],
points,
2*points[-1] - points[N-2:N-n_points-2:-1]))
return values, points
| 29.626794 | 96 | 0.520026 | 734 | 6,192 | 4.336512 | 0.322888 | 0.019793 | 0.025134 | 0.00754 | 0.163054 | 0.147974 | 0.053723 | 0.046183 | 0 | 0 | 0.000485 | 0.025185 | 0.301034 | 6,192 | 208 | 97 | 29.769231 | 0.709566 | 0.398417 | 0 | 0.211765 | 0 | 0 | 0.019689 | 0 | 0 | 0 | 0 | 0.004808 | 0 | 1 | 0.082353 | false | 0 | 0.047059 | 0 | 0.294118 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
57750b43eac6f1f2f43c702c0debafb39af9b3f3 | 1,913 | py | Python | paasta_tools/secret_tools.py | white105/paasta | 410418fb54d501141f091381ada368a8bf62037b | [
"Apache-2.0"
] | 2 | 2020-04-09T06:58:46.000Z | 2021-05-03T21:56:03.000Z | paasta_tools/secret_tools.py | white105/paasta | 410418fb54d501141f091381ada368a8bf62037b | [
"Apache-2.0"
] | null | null | null | paasta_tools/secret_tools.py | white105/paasta | 410418fb54d501141f091381ada368a8bf62037b | [
"Apache-2.0"
] | 1 | 2020-09-29T03:23:02.000Z | 2020-09-29T03:23:02.000Z | # Copyright 2015-2017 Yelp Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import re
from typing import Optional
SECRET_REGEX = "^SECRET\([A-Za-z0-9_-]*\)$"
def is_secret_ref(env_var_val: str) -> bool:
pattern = re.compile(SECRET_REGEX)
return pattern.match(env_var_val) is not None
def get_hmac_for_secret(
env_var_val: str,
service: str,
soa_dir: str,
vault_environment: str,
) -> Optional[str]:
secret_name = _get_secret_name_from_ref(env_var_val)
secret_path = os.path.join(
soa_dir,
service,
"secrets", "{}.json".format(secret_name),
)
try:
with open(secret_path, 'r') as json_secret_file:
secret_file = json.load(json_secret_file)
try:
return secret_file['environments'][vault_environment]['signature']
except KeyError:
print("Failed to get secret signature at environments:{}:signature in json"
" file".format(vault_environment))
return None
except IOError as e:
print("Failed to open json secret at {}".format(secret_path))
return None
except json.decoder.JSONDecodeError as e:
print("Failed to deserialise json secret at {}".format(secret_path))
return None
def _get_secret_name_from_ref(env_var_val: str) -> str:
return env_var_val.split('(')[1][:-1]
| 33.561404 | 91 | 0.67747 | 269 | 1,913 | 4.643123 | 0.431227 | 0.048038 | 0.043235 | 0.028823 | 0.147318 | 0.107286 | 0.107286 | 0.107286 | 0 | 0 | 0 | 0.01084 | 0.228437 | 1,913 | 56 | 92 | 34.160714 | 0.835366 | 0.288029 | 0 | 0.135135 | 0 | 0 | 0.152706 | 0.037806 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081081 | false | 0 | 0.108108 | 0.027027 | 0.351351 | 0.081081 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5777bc8aac7db42bec27a302064dfe812dccf5bc | 722 | py | Python | examples/comms/ISendIRecvExample.py | vibhatha/PythonMPI | e01bb0d4a53059c2bd77f74494db6d2d29844aea | [
"Apache-2.0"
] | null | null | null | examples/comms/ISendIRecvExample.py | vibhatha/PythonMPI | e01bb0d4a53059c2bd77f74494db6d2d29844aea | [
"Apache-2.0"
] | null | null | null | examples/comms/ISendIRecvExample.py | vibhatha/PythonMPI | e01bb0d4a53059c2bd77f74494db6d2d29844aea | [
"Apache-2.0"
] | null | null | null | import sys
sys.path.append('/home/vibhatha/github/bio/PythonMPI')
from comms import Communication
import numpy as np
class ISendIRecvExample:
comms = Communication.Communication()
def example(self):
rank = self.comms.comm.Get_rank()
if (rank == 0):
input = np.array([0,1,2,3,4])
self.comms.isend(input=[input, self.comms.mpi.INT], dest=1, tag=11)
print("Sending Data : " + str(input) + ", from Rank " + str(rank) + "\n")
elif (rank == 1):
data = self.comms.irecv(source=0, tag=11)
print(type(data))
print("Receiving Data : " + str(data) + ", from Rank " + str(rank) + "\n")
ex = ISendIRecvExample()
ex.example()
| 28.88 | 86 | 0.583102 | 94 | 722 | 4.468085 | 0.510638 | 0.085714 | 0.047619 | 0.071429 | 0.07619 | 0 | 0 | 0 | 0 | 0 | 0 | 0.024209 | 0.256233 | 722 | 24 | 87 | 30.083333 | 0.757914 | 0 | 0 | 0 | 0 | 0 | 0.131579 | 0.048476 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.166667 | 0 | 0.333333 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5779c83d850ee723135d94a8709c521285f60a62 | 4,236 | py | Python | pysass.py | WnP/pysass | ba9b949e2ee68f58bd4840a3ae91bd401ebe0253 | [
"MIT"
] | null | null | null | pysass.py | WnP/pysass | ba9b949e2ee68f58bd4840a3ae91bd401ebe0253 | [
"MIT"
] | 1 | 2019-03-08T10:33:34.000Z | 2019-03-19T13:52:46.000Z | pysass.py | WnP/pysass | ba9b949e2ee68f58bd4840a3ae91bd401ebe0253 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import time
from datetime import datetime, timedelta
from functools import wraps
from optparse import AmbiguousOptionError, BadOptionError, OptionParser
from pysassc import main as pysassc_main
from watchdog.events import PatternMatchingEventHandler
from watchdog.observers import Observer
def main(argv=sys.argv):
watch = False
if "-w" in argv:
watch = True
argv.remove("-w")
if "--watch" in argv:
watch = True
argv.remove("--watch")
if watch:
# Just parsing usefull options
parser = PassThroughOptionParser()
parser.add_option(
"-I",
"--include-path",
metavar="DIR",
dest="include_paths",
action="append",
)
options, args = parser.parse_args(argv[1:])
# Fake parsing others
args = fake_parser(args)
rcode = throttled_pysassc_main(argv=argv)
if not args:
sys.exit(rcode)
# Retrieve directories to watch
sourcepath = os.path.dirname(args[0])
paths = options.include_paths + [sourcepath or "."]
observer = Observer()
for path in paths:
observer.schedule(ScssHandler(argv), path=path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
else:
pysassc_main()
class PassThroughOptionParser(OptionParser):
"""
An unknown option pass-through implementation of OptionParser.
When unknown arguments are encountered, bundle with largs and try again,
until rargs is depleted.
sys.exit(status) will still be called if a known argument is passed
incorrectly (e.g. missing arguments or bad argument types, etc.)
"""
def _process_args(self, largs, rargs, values):
while rargs:
try:
OptionParser._process_args(self, largs, rargs, values)
except (BadOptionError, AmbiguousOptionError) as e:
largs.append(e.opt_str)
def fake_parser(args):
ignore_next = False
out = []
for v in args:
if ignore_next:
ignore_next = False
continue
if v in ["-t", "--style", "-s", "--out-style"]:
ignore_next = True
continue
if v in [
"-m",
"-g",
"--sourcemap",
"-p",
"--precision",
"--source-comments",
"-v",
"--version",
"-h",
"--help",
]:
continue
out.append(v)
return out
class ScssHandler(PatternMatchingEventHandler):
patterns = ["*.scss"]
def __init__(self, argv, *args, **kwargs):
self.argv = argv
return super().__init__(*args, **kwargs)
def process(self, event):
print(event.src_path, event.event_type)
throttled_pysassc_main(argv=self.argv)
def on_modified(self, event):
self.process(event)
def on_moved(self, event):
self.process(event)
class Throttle(object):
"""
Decorator that prevents a function from being called more than once every
time period.
To create a function that cannot be called more than once a minute:
@Throttle(minutes=1)
def my_fun():
pass
"""
def __init__(self, seconds=0, minutes=0, hours=0):
self.throttle_period = timedelta(
seconds=seconds, minutes=minutes, hours=hours
)
self.time_of_last_call = datetime.min
def __call__(self, fn):
@wraps(fn)
def wrapper(*args, **kwargs):
now = datetime.now()
time_since_last_call = now - self.time_of_last_call
if time_since_last_call > self.throttle_period:
self.time_of_last_call = now
return fn(*args, **kwargs)
return wrapper
@Throttle(seconds=1)
def throttled_pysassc_main(*args, **kwargs):
print("Compiling...")
pysassc_main(*args, **kwargs)
print("Watching...")
if __name__ == "__main__":
main()
| 25.829268 | 77 | 0.582861 | 470 | 4,236 | 5.104255 | 0.391489 | 0.027511 | 0.02501 | 0.017507 | 0.111713 | 0.046686 | 0 | 0 | 0 | 0 | 0 | 0.003093 | 0.313031 | 4,236 | 163 | 78 | 25.98773 | 0.821306 | 0.148253 | 0 | 0.099099 | 0 | 0 | 0.050891 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.099099 | false | 0.018018 | 0.081081 | 0 | 0.252252 | 0.027027 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
577c18dabc50abfb925ee04dacc4018a9eec7586 | 3,507 | py | Python | url_manager.py | imndszy/pycrawler | 28c0634a802b7c7ea7a0ead2ad8c7b34a542ed2a | [
"MIT"
] | null | null | null | url_manager.py | imndszy/pycrawler | 28c0634a802b7c7ea7a0ead2ad8c7b34a542ed2a | [
"MIT"
] | null | null | null | url_manager.py | imndszy/pycrawler | 28c0634a802b7c7ea7a0ead2ad8c7b34a542ed2a | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
# author: szy
# time: 2017/11/8 15:57
# email: shizhenyu96@gmail.com
import redis
from config import SETTINGS, REDIS_KEY
import GeneralHashFunctions
class BloomFilterRedis(object):
hash_list = ["RSHash", "JSHash", "PJWHash", "ELFHash", "BKDRHash",
"SDBMHash", "DJBHash", "DEKHash"]
def __init__(self, key, host=SETTINGS['REDIS_SERVER'], port=SETTINGS['REDIS_PORT'], hash_list=hash_list):
# redis-bitmap的key
self.key = key
# redis连接信息
self.pool = redis.ConnectionPool(host=host, port=port, db=SETTINGS['REDIS_DB'])
self.handle = redis.StrictRedis(connection_pool=self.pool, charset='utf-8')
self.handle.setbit(self.key, 1, 0)
# 哈希函数列表
self.hash_list = hash_list
@classmethod
def random_generator(cls, hash_value):
'''
将hash函数得出的函数值映射到[0, 2^32-1]区间内
'''
return hash_value % (1 << 28)
def insert(self, item):
'''
更新bitmap
'''
for hash_func_str in self.hash_list:
# 获得到hash函数对象
hash_func = getattr(GeneralHashFunctions, hash_func_str)
# 计算hash值
hash_value = hash_func(item)
# 将hash值映射到[0, 2^32]区间
real_value = BloomFilterRedis.random_generator(hash_value)
# bitmap中对应位置为1
if self.handle.getbit(self.key, real_value) == 0:
self.handle.setbit(self.key, real_value, 1)
def contain(self, item):
'''
检查是否存在
:return: bool
'''
for hash_func_str in self.hash_list:
# 获得到hash函数对象
hash_func = getattr(GeneralHashFunctions, hash_func_str)
# 计算hash值
hash_value = hash_func(item)
# 将hash值映射到[0, 2^32]区间
real_value = BloomFilterRedis.random_generator(hash_value)
# bitmap中对应位是0,说明此条目为新的条目
if self.handle.getbit(self.key, real_value) == 0:
return False
# 当所有hash值在bitmap中对应位都是1,说明此条目重复,返回True
return True
class CrawlerContextManager(object):
def __init__(self):
self.handle = redis.StrictRedis(host=SETTINGS['REDIS_SERVER'],
port=SETTINGS['REDIS_PORT'],
db=SETTINGS['REDIS_DB'])
def urls_in_set(self):
"""
check if there are urls in redis's set which initiated in last crawler's execution
:return:yes->True, no->False
"""
count = self.handle.scard(REDIS_KEY['URL_PERSISTENCE'])
if count == 0:
return False
else:
return True
def get_urls(self):
"""get urls saved in set"""
if not self.urls_in_set():
return set()
return self.handle.smembers(REDIS_KEY['URL_PERSISTENCE'])
def get_crawler_status(self):
status = self.handle.get(REDIS_KEY['SPIDER_STATUS'])
if status is None:
return 0
elif status == "1":
return False
else:
return True
def save_urls_to_redis(self, urls):
for i in urls:
self.handle.sadd(REDIS_KEY['URL_PERSISTENCE'], i)
def delete_older_urls(self):
self.handle.delete(REDIS_KEY['URL_PERSISTENCE'])
def save_status(self, start_update):
if start_update:
self.handle.set(REDIS_KEY['SPIDER_STATUS'], 2)
else:
self.handle.set(REDIS_KEY['SPIDER_STATUS'], 1)
| 31.594595 | 109 | 0.581979 | 403 | 3,507 | 4.870968 | 0.312655 | 0.066225 | 0.022415 | 0.044829 | 0.391238 | 0.322975 | 0.294447 | 0.260825 | 0.215996 | 0.180336 | 0 | 0.018227 | 0.311662 | 3,507 | 110 | 110 | 31.881818 | 0.794946 | 0.134588 | 0 | 0.301587 | 0 | 0 | 0.07605 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.174603 | false | 0 | 0.047619 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
577c57c3ecab14bdea1dcc51cbcbda46ccb73abe | 1,247 | py | Python | engine/classes/match.py | rochford77/ReplayAnalyzerRL | d4c82adff94c29d0b15ab08e7c2393816f165a11 | [
"Apache-2.0"
] | 3 | 2019-10-23T21:05:32.000Z | 2020-06-27T20:28:46.000Z | engine/classes/match.py | rochford77/ReplayAnalyzerRL | d4c82adff94c29d0b15ab08e7c2393816f165a11 | [
"Apache-2.0"
] | 19 | 2019-03-21T00:20:36.000Z | 2019-03-30T03:33:45.000Z | engine/classes/match.py | rochford77/StatManBot | d4c82adff94c29d0b15ab08e7c2393816f165a11 | [
"Apache-2.0"
] | null | null | null | class Match:
raw_matches = []
def __init__(self, data, playlist_filter):
self.map = data["gameMetadata"]["map"]
self.time = data["gameMetadata"]["time"]
self.guid = data["gameMetadata"]["matchGuid"]
self.playlist = data["gameMetadata"]["playlist"]
self.valid_match_created = self.check_valid_match(playlist_filter)
print(data["gameMetadata"]["playlist"])
def look_for_match_index(self):
index = -100
for match in Match.raw_matches:
if match.guid == self.guid:
index = Match.raw_matches.index(match)
break
return index
def check_valid_match(self, playlist_filter):
valid_match = False
is_valid_playlist = (playlist_filter == None or playlist_filter == self.playlist)
if ((self.look_for_match_index() == -100) and is_valid_playlist):
self.add_match()
valid_match = True
return valid_match
def add_match(self):
if len(Match.raw_matches) == 0:
Match.raw_matches.append(self)
else:
matched_index = self.look_for_match_index()
if (matched_index == -100):
Match.raw_matches.append(self)
| 32.815789 | 89 | 0.606255 | 145 | 1,247 | 4.931034 | 0.268966 | 0.067133 | 0.125874 | 0.071329 | 0.128671 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011198 | 0.283881 | 1,247 | 37 | 90 | 33.702703 | 0.789474 | 0 | 0 | 0.066667 | 0 | 0 | 0.073836 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0 | 0 | 0.266667 | 0.033333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
577d2cb3d213acda1ba99afa1d4f8f378c128448 | 1,908 | py | Python | target_bigquery/utils.py | hotgluexyz/target-bigquery | 8b9f6f0ca652dd1ac408965a4e5af06cc1cd344f | [
"BSD-3-Clause"
] | 10 | 2020-09-28T15:12:17.000Z | 2021-12-03T12:39:23.000Z | target_bigquery/utils.py | hotgluexyz/target-bigquery | 8b9f6f0ca652dd1ac408965a4e5af06cc1cd344f | [
"BSD-3-Clause"
] | 26 | 2021-01-04T14:01:07.000Z | 2022-03-27T22:53:34.000Z | target_bigquery/utils.py | hotgluexyz/target-bigquery | 8b9f6f0ca652dd1ac408965a4e5af06cc1cd344f | [
"BSD-3-Clause"
] | 39 | 2020-10-01T18:16:20.000Z | 2022-03-11T16:14:41.000Z | import json
import os
import sys
import singer
from google.api_core import exceptions
from google.cloud import bigquery
from google.cloud.bigquery import Dataset
logger = singer.get_logger()
def emit_state(state):
"""
Given a state, writes the state to a state file (e.g., state.json.tmp)
:param state, State: state with bookmarks dictionary
"""
if state is not None:
line = json.dumps(state)
logger.debug("Emitting state {}".format(line))
sys.stdout.write("{}\n".format(line))
sys.stdout.flush()
if os.environ.get("TARGET_BIGQUERY_STATE_FILE", None):
fn = os.environ.get("TARGET_BIGQUERY_STATE_FILE", None)
with open(fn, "a") as f:
f.write("{}\n".format(line))
def ensure_dataset(project_id, dataset_id, location):
"""
Given a project id, dataset id and location, creates BigQuery dataset
https://googleapis.dev/python/bigquery/latest/generated/google.cloud.bigquery.client.Client.html
:param project_id, str: GCP project id from target config file. Passed to bigquery.Client().
:param dataset_id, str: BigQuery dataset id from target config file.
:param location, str: location for the dataset (US). Passed to bigquery.Client().
:return: client (BigQuery Client Object) and Dataset (BigQuery dataset)
"""
from google.cloud.bigquery import DatasetReference
client = bigquery.Client(project=project_id, location=location)
dataset_ref = DatasetReference(project_id, dataset_id)
try:
client.create_dataset(dataset_ref)
except exceptions.GoogleAPICallError as e:
if e.response.status_code == 409: # dataset exists
pass
else:
logger.critical(f"unable to create dataset {dataset_id} in project {project_id}; Exception {e}")
return 2 # sys.exit(2)
return client, Dataset(dataset_ref)
| 34.071429 | 108 | 0.686059 | 254 | 1,908 | 5.059055 | 0.358268 | 0.049027 | 0.035019 | 0.042023 | 0.140078 | 0.0607 | 0.0607 | 0.0607 | 0 | 0 | 0 | 0.003331 | 0.213312 | 1,908 | 55 | 109 | 34.690909 | 0.852765 | 0.333857 | 0 | 0 | 0 | 0 | 0.126749 | 0.042798 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064516 | false | 0.032258 | 0.258065 | 0 | 0.387097 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
577e190148be519e94ad7cabbba484d05f275011 | 4,876 | py | Python | glance/convertor.py | rickerc/glance_audit | 790d4a739669113a289ab9d5687b28ed1e790bea | [
"Apache-2.0"
] | 1 | 2018-05-03T03:52:39.000Z | 2018-05-03T03:52:39.000Z | glance/convertor.py | rickerc/glance_audit | 790d4a739669113a289ab9d5687b28ed1e790bea | [
"Apache-2.0"
] | null | null | null | glance/convertor.py | rickerc/glance_audit | 790d4a739669113a289ab9d5687b28ed1e790bea | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import tempfile
from oslo.config import cfg
from glance.common import utils
import glance.domain.proxy
from glance.openstack.common import log as logging
from glance.openstack.common import processutils
policy_opts = [
cfg.BoolOpt('convert_image_to_raw', default=False,
help=_('Anything upload in glance in bare container '
'is automatically converted into a raw image.')),
cfg.StrOpt('convert_image_to_raw_path',
help=_('Temporary directory used to convert image to raw, '
'by default the system temporary direcotry is used.')),
]
CONF = cfg.CONF
CONF.register_opts(policy_opts)
LOG = logging.getLogger(__name__)
def safe_delete(path):
try:
os.remove(path)
except Exception:
LOG.warn("Fail to remove file: %s", path)
def safe_close(fd):
try:
os.close(fd)
except Exception:
pass
def convert(image_data):
"""The image to raw convertor, it does:
- Read and put image_data into a temporary file
- Create a copy of the file into raw format
- Return a file object of this new raw file with the new file size
- Also the glance v1/v2 image upload pipeline read this temporary
instead of the streaming data
"""
fd_src, src = tempfile.mkstemp(prefix="glance_convert_image_src",
dir=CONF.convert_image_to_raw_path)
fd_dest, dest = tempfile.mkstemp(prefix="glance_convert_image_dest",
dir=CONF.convert_image_to_raw_path)
try:
for data in utils.chunkreadable(image_data):
os.write(fd_src, data)
finally:
safe_close(fd_src)
safe_close(fd_dest)
cmd = ["/usr/bin/qemu-img", "convert", "-O", "raw", src, dest]
processutils.execute(*cmd)
safe_delete(src)
# NOTE(sileht): we open the file fd and then delete the file
# So the file exists until the fd is closed
ret = file(dest), os.path.getsize(dest)
safe_delete(dest)
return ret
class ImageRepoProxy(glance.domain.proxy.Repo):
def __init__(self, image_repo, context, db_api):
self.image_repo = image_repo
self.db_api = db_api
proxy_kwargs = {'db_api': db_api, 'context': context}
super(ImageRepoProxy, self).__init__(image_repo,
item_proxy_class=ImageProxy,
item_proxy_kwargs=proxy_kwargs)
def save(self, image):
# NOTE(sileht): the disk_format can be changed only in queued state
if image.container_format == 'bare' and image.status == "queued":
image.disk_format = 'raw'
super(ImageRepoProxy, self).save(image)
def add(self, image):
if image.container_format == 'bare':
image.disk_format = 'raw'
super(ImageRepoProxy, self).add(image)
class ImageFactoryProxy(glance.domain.proxy.ImageFactory):
def __init__(self, factory, context, db_api):
proxy_kwargs = {'db_api': db_api, 'context': context}
super(ImageFactoryProxy, self).__init__(factory,
proxy_class=ImageProxy,
proxy_kwargs=proxy_kwargs)
class ImageProxy(glance.domain.proxy.Image):
def __init__(self, image, context, db_api):
self.image = image
self.context = context
self.db_api = db_api
super(ImageProxy, self).__init__(image)
def set_data(self, data, size=None):
try:
if not self.image.container_format or \
self.image.container_format == 'bare':
data, size = convert(utils.CooperativeReader(data))
self.image.size = size
self.image.set_data(data, size=size)
except Exception:
LOG.exception(_('Cleaning up %s after convertion fail.')
% self.image.image_id)
location = self.image.locations[0]['url']
glance.store.safe_delete_from_backend(
location, self.context, self.image.image_id)
raise
| 34.097902 | 78 | 0.629409 | 619 | 4,876 | 4.783522 | 0.339257 | 0.039514 | 0.020263 | 0.028707 | 0.170888 | 0.103343 | 0.077001 | 0.030395 | 0.030395 | 0.030395 | 0 | 0.005156 | 0.284044 | 4,876 | 142 | 79 | 34.338028 | 0.843025 | 0.227646 | 0 | 0.172414 | 0 | 0 | 0.114657 | 0.020011 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103448 | false | 0.011494 | 0.08046 | 0 | 0.229885 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
577efc2c2866b10597734b34d8313d2006047d51 | 878 | py | Python | tools/visualize.py | lraxue/MCN | 5b4a18617d86c18bfa378c08ec6b33c7fec0ef9b | [
"MIT"
] | 1 | 2018-01-18T03:51:56.000Z | 2018-01-18T03:51:56.000Z | tools/visualize.py | lraxue/MCN | 5b4a18617d86c18bfa378c08ec6b33c7fec0ef9b | [
"MIT"
] | null | null | null | tools/visualize.py | lraxue/MCN | 5b4a18617d86c18bfa378c08ec6b33c7fec0ef9b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# @Time : 17-11-22 上午11:17
# @Author : Fei Xue
# @Email : feixue@pku.edu.cn
# @File : visualize.py
# @Software: PyCharm Community Edition
import numpy as np
import matplotlib.pyplot as plt
import os
def load_data(fn):
with open(fn, "r") as f:
data = f.readlines()
return data
def plot_data(data, title=None, xlabel=None, ylabel=None, savename=None, stride=1):
plt.figure()
sample_data = data[0:len(data):stride]
plt.plot(sample_data, 'r-')
if xlabel is not None:
plt.xlabel(xlabel)
if ylabel is not None:
plt.ylabel(ylabel)
if title is not None:
plt.title(title)
if savename is not None:
plt.savefig(savename)
plt.show()
if __name__ == '__main__':
data = load_data("../loss.60000.txt")
print(len(data))
plot_data(data[5000:20000], stride=5) | 20.418605 | 83 | 0.623007 | 131 | 878 | 4.068702 | 0.51145 | 0.037523 | 0.067542 | 0.090056 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.041979 | 0.240319 | 878 | 43 | 84 | 20.418605 | 0.757121 | 0.179954 | 0 | 0 | 0 | 0 | 0.039216 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.125 | 0 | 0.25 | 0.041667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
57829d1a8d6de4f4bac81660c6e216e90c1cb7b4 | 1,260 | py | Python | commands/dump-crashinfo.py | xUndero/noc | 9fb34627721149fcf7064860bd63887e38849131 | [
"BSD-3-Clause"
] | 1 | 2019-09-20T09:36:48.000Z | 2019-09-20T09:36:48.000Z | commands/dump-crashinfo.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | commands/dump-crashinfo.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# ./noc dump-crashinfo
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
from __future__ import print_function
import argparse
import time
# Third-party modules
from six.moves.cPickle import load
# NOC modules
from noc.core.management.base import BaseCommand
class Command(BaseCommand):
help = "Dump crashinfo file"
def add_arguments(self, parser):
parser.add_argument("args", nargs=argparse.REMAINDER, help="List traceback files")
def handle(self, *args, **options):
for path in args:
with open(path) as f:
self.dump_crashinfo(path, load(f))
def dump_crashinfo(self, path, data):
ts = time.localtime(data.get("ts", 0))
print("=" * 72)
print("PATH :", path)
print("COMPONENT :", data.get("component"))
print("TIME : %04d-%02d-%02d %02d:%02d:%02d" % ts[:6])
print("-" * 72)
print(data.get("traceback"))
if __name__ == "__main__":
Command().run()
| 28.636364 | 90 | 0.516667 | 133 | 1,260 | 4.766917 | 0.548872 | 0.082019 | 0.042587 | 0.037855 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.026786 | 0.2 | 1,260 | 43 | 91 | 29.302326 | 0.602183 | 0.288095 | 0 | 0 | 0 | 0 | 0.153499 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.130435 | false | 0 | 0.217391 | 0 | 0.434783 | 0.304348 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5782f763e3e5859bca82333e334148cf7c8e680e | 7,338 | py | Python | texture/analysis/co_occurrence_matrix.py | MatteoZanella/siv-texture-analysis | 5edfbd498b2b2d7c29f9d8ae7f0bdcf05d4105c1 | [
"MIT"
] | 1 | 2021-10-22T03:52:35.000Z | 2021-10-22T03:52:35.000Z | texture/analysis/co_occurrence_matrix.py | MatteoZanella/siv-texture-analysis | 5edfbd498b2b2d7c29f9d8ae7f0bdcf05d4105c1 | [
"MIT"
] | null | null | null | texture/analysis/co_occurrence_matrix.py | MatteoZanella/siv-texture-analysis | 5edfbd498b2b2d7c29f9d8ae7f0bdcf05d4105c1 | [
"MIT"
] | null | null | null | from collections import Sequence
import numpy as np
from PIL import Image
class CoOccur:
"""
Class used to compute all Co-occurrence matrices of an image for a set of distances and angles and some related
parameters: the inertia, the average, the spread.
An instance of the CoOccur class holds a tensor of shape (len(distances), len(angles), levels, levels) that holds
all Co-occurrence matrices of the image passed as constructor's parameter, for each distance and angle in the
sequences passed as constructor's parameters. During the instantiation are computed also the inertia matrix of shape
(len(distances), len(angles)), the average tensor of shape (len(distances), levels, levels), and the spread tensor
of shape (len(distances), levels, levels).
Co-occurrence matrices can dramatically grow in size, so the levels of pixels are usually quantized before the
computation of the Co-occurrence matrices. From the 256 possible values that pixels can assume, these are reduced
to a smaller number specified as constructor's parameter.
Args:
image (PIL.Image): The image that has to be analyzed, it is internally converted in B/W with Image.convert('L')
distances (Sequence[float]): The sequence of analyzed distances. Default: range(1, 16, 2)
angles (Sequence[float]): The sequence of analyzed angles expressed in degrees. Default: range(90, -90, -45)
levels (int): Pixel values are quantized in this number of levels. Should be lower than 256. Default: 8
Attributes:
matrices (numpy.ndarray): Co-Occurrence matrices, of shape (len(distances), len(angles), levels, levels)
inertia (numpy.ndarray): Inertia matrix, of shape (len(distances), len(angles))
average (numpy.ndarray): Average tensor, of shape (len(distances), levels, levels)
spread (numpy.ndarray): Spread tensor, of shape (len(distances), levels, levels)
distances (numpy.ndarray): Array of all analyzed distances
angles (numpy.ndarray): Array of all analyzed angles
"""
def __init__(self, image: Image, distances: Sequence[float] = range(1, 16, 2),
angles: Sequence[float] = range(90, -90, -45), levels: int = 8):
# ===Image quantization===
pixels = np.array(image.convert('L')) # pixels.dtype == np.uint8
pixels = np.floor(pixels / 256 * levels).astype(np.uint8) # quantized in the [0, levels) integer range
# ===Angles and distances===
self.distances = np.array(distances)
self.angles = np.array(angles)
self._idx_of_dist = {distance: idx for idx, distance in enumerate(distances)}
self._idx_of_angle = {angle: idx for idx, angle in enumerate(angles)}
# ===CoOccur Tensor=== (distances, angles, levels_start, levels_end)
self.matrices = self._co_occurrence_matrices(pixels, distances, angles, levels)
# ===Inertia, Average, Spread===
self.inertia = self._inertia_matrix(levels)
self.average = self._average_matrices()
self.spread = self._spread_matrices()
def _co_occurrence_matrices(self, pixels: np.ndarray, dists: Sequence, angles: Sequence, levels: int) -> np.ndarray:
"""Computes the Co-Occurrence matrix of pixels for every distance and every angle passed as parameters"""
dists_list = []
for distance in dists:
angles_list = []
for angle in angles:
slice_start, slice_end = self._offset_slices(distance, angle)
start = pixels[slice_start[0][0]:slice_start[0][1], slice_start[1][0]:slice_start[1][1]].reshape(-1)
end = pixels[slice_end[0][0]:slice_end[0][1], slice_end[1][0]:slice_end[1][1]].reshape(-1)
histogram2d = np.histogram2d(start, end, density=True, bins=levels, range=[[0, levels], [0, levels]])[0]
angles_list.append(histogram2d)
dists_list.append(angles_list)
co_occur_matrices = np.array(dists_list)
return co_occur_matrices
@staticmethod
def _offset_slices(distance: float, angle: float):
"""Returns the starting and ending ranges to slice the pixel matrix given an angle in degrees and a distance"""
angle = np.radians(angle)
offset = np.rint(np.array([-np.sin(angle), np.cos(angle)]) * distance).astype(int)
slice_start = [[-offset[0] if offset[0] < 0 else None, -offset[0] if offset[0] > 0 else None],
[-offset[1] if offset[1] < 0 else None, -offset[1] if offset[1] > 0 else None]]
slice_end = [[offset[0] if offset[0] > 0 else None, offset[0] if offset[0] < 0 else None],
[offset[1] if offset[1] > 0 else None, offset[1] if offset[1] < 0 else None]]
return slice_start, slice_end
def _inertia_matrix(self, levels: int) -> np.ndarray:
"""Returns the inertia of each distance and angle"""
l_b = np.arange(levels)
l_a = l_b[:, np.newaxis]
coefficients = ((l_a - l_b) ** 2).reshape(1, 1, levels, levels)
return np.sum(coefficients * self.matrices, axis=(-1, -2))
def _average_matrices(self) -> np.ndarray:
"""Returns the average on all angles, for all distances and levels"""
return np.mean(self.matrices, axis=1)
def _spread_matrices(self) -> np.ndarray:
"""Returns the spread on all angles, for all distances and levels"""
return np.max(self.matrices, axis=1) - np.min(self.matrices, axis=1)
def matrix_of(self, distance: float, angle: float) -> np.ndarray:
"""
Method to get a matrix from the co-occurrence matrices, relying directly on distance and angle instead
of their indexes
Args:
distance (float): The distance of the direction
angle (float): The angle of the direction, expressed in degrees
Returns:
(np.ndarray): The co-occurrence matrix
"""
return self.matrices[self._idx_of_dist[distance], self._idx_of_angle[angle]]
def inertia_of(self, distance: float, angle: float) -> float:
"""
Method to get a value from the inertia matrix, relying directly on distance and angle instead of their indexes
Args:
distance (float): The distance of the direction
angle (float): The angle of the direction, expressed in degrees
Returns:
(float): Value of the inertia matrix
"""
return self.inertia[self._idx_of_dist[distance], self._idx_of_angle[angle]]
def average_of(self, distance: float) -> np.ndarray:
"""
Method to get a matrix from the average tensor, relying directly on distance instead of its index
Args:
distance (float): The distance of the direction
Returns:
(numpy.ndarray): Matrix of the average tensor
"""
return self.average[self._idx_of_dist[distance]]
def spread_of(self, distance: float) -> np.ndarray:
"""
Method to get a matrix from the spread tensor, relying directly on distance instead of its index
Args:
distance (float): The distance of the direction
Returns:
(numpy.ndarray): Matrix of the spread tensor
"""
return self.spread[self._idx_of_dist[distance]]
| 51.676056 | 120 | 0.655628 | 1,001 | 7,338 | 4.721279 | 0.16983 | 0.012696 | 0.033855 | 0.032163 | 0.402666 | 0.365002 | 0.307871 | 0.296022 | 0.224926 | 0.224926 | 0 | 0.016175 | 0.241755 | 7,338 | 141 | 121 | 52.042553 | 0.833214 | 0.476424 | 0 | 0 | 0 | 0 | 0.000288 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.054545 | 0 | 0.418182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
578330f1c72097621648889dd68bb23bb8243eaa | 4,647 | py | Python | script/gpomdp_cartpole.py | T3p/policy-optimization | 77006545779823737c4ca3b19e9d80506015c132 | [
"MIT"
] | null | null | null | script/gpomdp_cartpole.py | T3p/policy-optimization | 77006545779823737c4ca3b19e9d80506015c132 | [
"MIT"
] | null | null | null | script/gpomdp_cartpole.py | T3p/policy-optimization | 77006545779823737c4ca3b19e9d80506015c132 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 16 14:47:33 2019
@author: Matteo Papini
"""
import torch
import gym
import potion.envs
from potion.actors.continuous_policies import ShallowGaussianPolicy
from potion.actors.discrete_policies import ShallowGibbsPolicy
from potion.common.logger import Logger
from potion.algorithms.reinforce import reinforce
import argparse
import re
from potion.meta.steppers import ConstantStepper, RMSprop, Adam
from gym.spaces.discrete import Discrete
from potion.meta.smoothing_constants import gibbs_lip_const
# Command line arguments
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--name', help='Experiment name', type=str, default='GPOMDP')
parser.add_argument('--storage', help='root of log directories', type=str, default='..')
parser.add_argument('--estimator', help='Policy gradient estimator (reinforce/gpomdp)', type=str, default='gpomdp')
parser.add_argument('--baseline', help='baseline for policy gradient estimator (avg/peters/zero)', type=str, default='peters')
parser.add_argument('--seed', help='RNG seed', type=int, default=0)
parser.add_argument('--env', help='Gym environment id', type=str, default='CartPole-v1')
parser.add_argument('--horizon', help='Task horizon', type=int, default=100)
parser.add_argument('--batchsize', help='Initial batch size', type=int, default=100)
parser.add_argument('--iterations', help='Iterations', type=int, default=20000)
parser.add_argument('--disc', help='Discount factor', type=float, default=0.9)
parser.add_argument('--tmp', help='(Initial) policy temperature', type=float, default=1.)
parser.add_argument('--stepper', help='Step size rule', type=str, default='safe')
parser.add_argument('--step', help='Step size', type=float, default=1.)
parser.add_argument('--ent', help='Entropy bonus coefficient', type=float, default=0.)
parser.add_argument("--render", help="Render an episode",
action="store_true")
parser.add_argument("--no-render", help="Do not render any episode",
action="store_false")
parser.add_argument("--temp", help="Save logs in temp folder",
action="store_true")
parser.add_argument("--no-temp", help="Save logs in logs folder",
action="store_false")
parser.add_argument("--test", help="Test on deterministic policy",
action="store_true")
parser.add_argument("--no-test", help="Online learning only",
action="store_false")
parser.add_argument("--learnstd", help="Learn std",
action="store_true")
parser.add_argument("--no-learnstd", help="Don't learn std",
action="store_false")
parser.set_defaults(render=False, temp=False, learnstd=False, test=False)
args = parser.parse_args()
# Prepare
env = gym.make(args.env)
env.seed(args.seed)
if type(env.action_space) is Discrete:
policy = ShallowGibbsPolicy(env,
temp=1.)
else:
m = sum(env.observation_space.shape)
d = sum(env.action_space.shape)
mu_init = torch.zeros(m*d)
logstd_init = torch.log(torch.zeros(d) + args.std_init)
policy = ShallowGaussianPolicy(m, d,
mu_init=mu_init,
logstd_init=logstd_init,
learn_std=args.learnstd)
test_batchsize = args.batchsize if args.test else 0
envname = re.sub(r'[^a-zA-Z]', "", args.env)[:-1]
envname = re.sub(r'[^a-zA-Z]', "", args.env)[:-1].lower()
logname = envname + '_' + args.name + '_' + str(args.seed)
if args.temp:
logger = Logger(directory= args.storage + '/temp', name = logname, modes=['human', 'csv'])
else:
logger = Logger(directory=args.storage + '/logs', name = logname, modes=['human', 'csv'])
if args.stepper == 'rmsprop':
stepper = RMSprop()
elif args.stepper == 'adam':
stepper = Adam(alpha=args.step)
elif args.stepper == 'safe':
stepper = ConstantStepper(1. / gibbs_lip_const(1., 1., args.disc, 1.))
else:
stepper = ConstantStepper(args.step)
# Run
reinforce(env, policy,
horizon = args.horizon,
stepper = stepper,
batchsize = args.batchsize,
iterations = args.iterations,
disc = args.disc,
entropy_coeff = args.ent,
seed = args.seed,
logger = logger,
render = args.render,
shallow = True,
estimator = args.estimator,
baseline = args.baseline,
test_batchsize=test_batchsize,
log_params=False,
save_params=False)
| 40.763158 | 126 | 0.663869 | 586 | 4,647 | 5.163823 | 0.296928 | 0.065433 | 0.123596 | 0.027759 | 0.229015 | 0.163582 | 0.130866 | 0.016523 | 0.016523 | 0.016523 | 0 | 0.01065 | 0.191737 | 4,647 | 113 | 127 | 41.123894 | 0.794995 | 0.029697 | 0 | 0.120879 | 0 | 0 | 0.182121 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.131868 | 0 | 0.131868 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
57838ecb95d7491e75cc210ed6384b9f7c55f40b | 501 | py | Python | Python3/1663-Smallest-String-With-A-Given-Numeric-Value/soln.py | zhangyaqi1989/LeetCode-Solutions | 2655a1ffc8678ad1de6c24295071308a18c5dc6e | [
"MIT"
] | 5 | 2020-07-24T17:48:59.000Z | 2020-12-21T05:56:00.000Z | Python3/1663-Smallest-String-With-A-Given-Numeric-Value/soln.py | zhangyaqi1989/LeetCode-Solutions | 2655a1ffc8678ad1de6c24295071308a18c5dc6e | [
"MIT"
] | null | null | null | Python3/1663-Smallest-String-With-A-Given-Numeric-Value/soln.py | zhangyaqi1989/LeetCode-Solutions | 2655a1ffc8678ad1de6c24295071308a18c5dc6e | [
"MIT"
] | 2 | 2020-07-24T17:49:01.000Z | 2020-08-31T19:57:35.000Z | class Solution:
def getSmallestString(self, n: int, k: int) -> str:
# len = n, numerical value = k
d = {i: ch for i, ch in enumerate(string.ascii_lowercase, 1)}
d[0] = 'z'
ans = []
for i in range(n):
left = n - i
if 1 + 26 * (left - 1) >= k:
ans.append('a')
k -= 1
else:
ch = d[k % 26]
ans.append(ch)
k -= k % 26
return ''.join(ans)
| 29.470588 | 69 | 0.389222 | 64 | 501 | 3.03125 | 0.53125 | 0.030928 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.041985 | 0.477046 | 501 | 16 | 70 | 31.3125 | 0.698473 | 0.055888 | 0 | 0 | 0 | 0 | 0.004246 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5784714849a56603e356a70a03c84b1d38dbc672 | 21,591 | py | Python | build/lib/soilapis/summary_soil_property.py | eusojk/soil_apis | 630ddd49802c351b44df28225707a53adad782b5 | [
"MIT"
] | 2 | 2021-02-22T06:06:48.000Z | 2021-08-30T13:50:53.000Z | build/lib/soilapis/summary_soil_property.py | eusojk/soil_apis | 630ddd49802c351b44df28225707a53adad782b5 | [
"MIT"
] | null | null | null | build/lib/soilapis/summary_soil_property.py | eusojk/soil_apis | 630ddd49802c351b44df28225707a53adad782b5 | [
"MIT"
] | null | null | null | import argparse
import glob
import os
import sys
from pathlib import Path
# from fortran_apis import which_api
from soilapis.fortran_apis import which_api
import gdal
import osr
import pandas as pd
import numpy as np
# Paths
script_dir = os.getcwd()
outputs = ''
parent_dir = ""
layers_dir = ""
country_dir = ""
globe_dir = ""
soilp_dir = ""
bds_dir = ""
cla_dir = ""
org_dir = ""
san_dir = ""
dir_types = ""
output_path = ""
gdal.UseExceptions() # Enable errors
class CountrySoilProperty(object):
def __init__(self, fname):
"""
:param fname: a GTif file
"""
try:
self.raster = gdal.Open(fname)
spatial_ref = osr.SpatialReference(self.raster.GetProjection())
# retrieve get the WGS84 spatial reference
wgs_ref = osr.SpatialReference()
wgs_ref.ImportFromEPSG(4326)
# Do a coordinate transform
self.coord_transf = osr.CoordinateTransformation(wgs_ref, spatial_ref)
# Apply geo-transformation and its inverse
self.raster_gt = self.raster.GetGeoTransform()
self.raster_inv_gt = gdal.InvGeoTransform(self.raster_gt)
# Handle error from Inverse function
if gdal.VersionInfo()[0] == '1':
if self.raster_inv_gt[0] == 1:
self.raster_inv_gt = self.raster_inv_gt[1]
else:
raise RuntimeError('Inverse geotransform failed')
elif self.raster_inv_gt is None:
raise RuntimeError('Inverse geotransform failed')
except RuntimeError: # <- Check first what exception is being thrown
pass
def get_px_coord(self, lon, lat):
"""
Convert lon-lat into x-y coordinates of pixel
:param lon: longitude
:param lat: latitude
:return: tuple of pixel coordinates
"""
offsets_coord = gdal.ApplyGeoTransform(self.raster_inv_gt, lon, lat)
px_x, px_y = map(int, offsets_coord)
return px_x, px_y
def get_band_array(self):
"""
:return: 2D array of the rater file
"""
return self.raster.GetRasterBand(1).ReadAsArray()
def get_band_value(self, lon, lat):
"""
Extract the pixel value at a given position
:param lon: lon
:param lat: lat
:return: int - pixel value
"""
px_x, px_y = self.get_px_coord(lon, lat)
return self.get_band_array()[px_y, px_x]
def average_by_window(self, lon, lat, window_size=3):
"""
This function slice a big array based on a given window size
:param lon: longitude
:param lat: latitude
:param window_size: int - height and width of window. e.g. 3 means 3 by 3. Must be odd
:return: float value, the average or mean of the array or -99 if invalid
"""
# if self.get_band_value(lon, lat) == 255: # disregard any value from the sea
# return - 99
array = self.slice_by_window(lon, lat, window_size)
if array is None:
return -99
mean = self.is_ndv_over_thres(array)
return mean
def slice_by_window(self, lon, lat, window_size):
"""
This function slice a big array based on a given window size
:param lon: longitude
:param lat: latitude
:param window_size: int - height and width of window. e.g. 3 means 3 by 3. Must be odd
:return: a 2D array or None
"""
px_x, px_y = self.get_px_coord(lon, lat)
if window_size % 2 == 0: # degrade to lower odd number. eg. 4 => 3
window_size -= 1
if window_size < 3:
window_size = 3
step = (window_size - 1) // 2
row_start = px_x - step
row_stop = px_x + step + 1
col_start = px_y - step
col_stop = px_y + step + 1
data = self.get_band_array()
res = data[row_start:row_stop, col_start:col_stop]
# if res.shape[0] * res.shape[1] != window_size * window_size:
# return
return res
def is_ndv_over_thres(self, array, threshold=0.5):
"""
This function checks if the frequency of NoDataValue (255 in this case) is over
the given threshold in the array or not and return the appropriate mean
:param array: numpy array representing a 2D grid
:param threshold: double default is half the size of array
:return: bool
"""
# Actual size of threshold:
size_thresh = int(threshold * array.size)
# first transform this array to 1D:
array_1d = array.reshape(array.size, )
# frequency of NDV or # of occurrence
freq_ndv = sum(array_1d == 255)
# if freq_ndv < size_thresh:
# mean = round(array.mean(), 2)
# else:
# # remove all occurences of NDV
# array_no_ndv = np.delete(array, np.where(array == 255))
# mean = round(array_no_ndv.mean(), 2)
array_no_ndv = np.delete(array_1d, np.where(array_1d == 255))
if array_no_ndv.size == 0:
return -89
else:
mean = round(array_no_ndv.mean(), 2)
return mean
def set_soil_layers_dir(soil_layers_path, output_loc, country_iso):
global bds_dir, cla_dir, org_dir, san_dir, dir_types, output_path
# the ouptput dir given
output_path = output_loc
dir_types = []
layers_types = ['bulkdensity', 'clay', 'organicsoil', 'sandfraction']
for lt in layers_types:
path_obj = soil_layers_path + lt + '/' + country_iso
if Path(path_obj).exists():
path_obj += '/*.tif'
dir_types.append(path_obj)
# print('output_path is', Path(output_path))
def get_soil_layers_dir():
global dir_types
# for t in glob.glob(dir_types[0]):
# print(t)
return dir_types
def ini_dir(script_path):
"""
Initializes important paths
:param script_path: the abs pathname of the script
:return: None
"""
global script_dir, outputs, parent_dir, layers_dir, country_dir, globe_dir, soilp_dir, \
bds_dir, cla_dir, org_dir, san_dir, dir_types
script_dir = os.path.abspath(script_path)
script_dir = os.path.abspath(os.path.dirname(script_path))
parent_dir = os.path.abspath(os.path.join(script_dir, os.pardir))
layers_dir = parent_dir + '/layers/'
country_dir = layers_dir + 'country/'
globe_dir = layers_dir + 'globe/*.tif'
globe_dir = glob.glob(globe_dir)
soilp_dir = layers_dir + 'soilproperties/'
bds_dir = soilp_dir + 'bulkdensity/THA/*.tif'
cla_dir = soilp_dir + 'clay/THA/*.tif'
org_dir = soilp_dir + 'organicsoil/THA/*.tif'
san_dir = soilp_dir + 'sandfraction/THA/*.tif'
dir_types = [bds_dir, cla_dir, org_dir, san_dir]
outputs = script_dir + '/outputs/'
def get_globe_dir(script_path):
"""
Return the pathnames of global tiff files
:param script_path: the abs pathname of this script
:return: abs pathname of the globe tif files as a list
"""
ini_dir(script_path)
return globe_dir
def set_globe_dir(global_layers_dir):
"""
Set the path to dir that contains the global Gtif layers
:param global_layers_dir:
:return:
"""
global globe_dir
globe_dir = global_layers_dir + '*.tif'
globe_dir = glob.glob(globe_dir)
def average_per_layer(dir_path, lon, lat, window_size):
"""
:param dir_path:
:param lon:
:param lat:
:param window_size:
:return:
"""
list_avg = []
dir_path = glob.glob(dir_path)
for tf in dir_path:
co = CountrySoilProperty(tf)
avg = co.average_by_window(lon, lat, window_size)
if avg == -99:
return -99
elif avg == -89:
return -89
list_avg.append(avg)
return list_avg
def average_per_type(dir_path, lon, lat, window_size):
"""
:param dir_path:
:param lon:
:param lat:
:param window_size:
:return:
"""
dict_avg = {}
for path in dir_path:
key = path.split('/')[-3]
list_avg = average_per_layer(path, lon, lat, window_size)
if list_avg == -99:
return -99
elif list_avg == -89:
return -89
if key not in dict_avg:
dict_avg[key] = list_avg
return dict_avg
def dict_to_df(dict_name):
"""
:param dict_name:
:return:
"""
return pd.DataFrame.from_dict(dict_name)
def df_to_asc(dfname, outname):
"""
:param outname:
:param dfname:
:return:
"""
dfname.to_csv(outname, sep='\t', encoding='utf-8', index=False)
def compute_pwp_row(col):
"""
:param col: pandas col
:return:
"""
clay_val = col['clay']
oc_val = col['organicsoil']
sand_val = col['sandfraction']
return compute_pwp(clay_val, oc_val, sand_val)
def compute_pwp(clay_val, oc_val, sand_val):
"""
Calculate permanent wilting point based on Clay, Organic Matter and sand value
:param clay_val: percentage of clay
:param oc_val: percentage of organic carbon
:param sand_val: percentage of sand
:return: a float value representing PWP
"""
# Step #1 - convert OC to OM
om_val = 2 * oc_val
om_val /= 2 #1000
clay_val /= 100
sand_val /= 100
# Step #2 - compute theta_1500_t
theta_1500_t = 0.031 - (0.024 * sand_val) + (0.487 * clay_val) + (0.006 * om_val) \
+ (0.005 * sand_val * om_val) - (0.013 * clay_val * om_val) + (0.068 * sand_val * clay_val)
# Step #3 - finally compute theta_1500
theta_1500 = (1.14 * theta_1500_t) - 0.02
return round(theta_1500, 2)
def compute_fc_row(col):
"""
:param col: pandas col
:return:
"""
clay_val = col['clay']
oc_val = col['organicsoil']
sand_val = col['sandfraction']
return compute_field_capacity(clay_val, oc_val, sand_val)
def compute_field_capacity(clay_val, oc_val, sand_val):
"""
Calculate Field Capacity based on Clay, Organic Matter and sand value
:param clay_val: percentage of clay
:param oc_val: percentage of organic carbon
:param sand_val: percentage of sand
:return: a float value representing FC
"""
# Step #1 - convert OC to OM
om_val = 2 * oc_val
om_val /= 2 #1000
clay_val /= 100
sand_val /= 100
# Step #2 - compute theta_33_t
theta_33_t = 0.299 - (0.251 * sand_val) + (0.195 * clay_val) + (0.011 * om_val) \
+ (0.006 * sand_val * om_val) - (0.027 * clay_val * om_val) + (0.452 * sand_val * clay_val)
# Step #3 - compute actual F.C: theta_33
theta_33 = theta_33_t + ((1.283 * theta_33_t * theta_33_t) - (0.374 * theta_33_t) - 0.015)
return round(theta_33, 2)
def compute_taw(fc, pwp, depth, fraction):
"""
Compute total available water
:param fc: Field capacity
:param pwp: permanent wilting point
:param depth: depth of soil in mm
:param fraction: float value
:return: a float value for TAW
"""
return depth * fraction * (fc - pwp)
def compute_taw_row(row):
"""
:param row: a pandas data frame object
:return: a float value for TAW
"""
fc = row['FC']
pwp = row['PWP']
depth = row['depths']
fraction = row['fraction']
return compute_taw(fc, pwp, depth, fraction)
def get_frac_for_taw(depth):
"""
:param depth: numerical depth values
:return: a float value or fraction
"""
def get_frac_for_taw(depth):
"""
:param depth: numerical depth values
:return: a float value or fraction
"""
depths_available = [10, 90, 200, 300, 400, 1000]
cumulative_depths = [10, 100, 300, 600, 1000, 2000]
# when numerator < denominator (default)
is_num_less = True
if depth not in cumulative_depths:
depths_dif = [abs(x - depth) for x in cumulative_depths]
min_depth_diff = min(depths_dif)
index_closest = depths_dif.index(min_depth_diff)
sum_to_depth_closest = sum( depths_available[:index_closest] )
num = abs(depth - sum_to_depth_closest)
denom = depths_available[index_closest]
if num > denom:
numerator = (num - depths_available[index_closest])
denominator = depths_available[index_closest + 1]
is_num_less = False
else:
numerator = num
denominator = denom
frac = numerator / denominator
return [frac, index_closest, is_num_less]
def setup(lon, lat, window, format_arg, depth=0, json_out=False):
"""
:param lon: longitude
:param lat: latitude
:param window: window size. e.g. 3 means 3 by 3
:param depth: depth of soil in mm
:param format_arg: str - indicates type of output to produce. if 'swb', output is TAW file; if 'dssat', output is .SOL file
:return: a text file or -99
:param json_out: if True, return a dict, otherwise a text file. Errors returned could be : -99 or -89 (NDV)
"""
global outputs, dir_types, script_dir
out_dssat = script_dir
# script_dir += '/soilapis/'
outputs = script_dir + '/outputs'
if not os.path.exists(outputs):
os.makedirs(outputs)
# test = Path(script_dir).exists()
# print('script_dir', test)
# os.chdir(outputs)
# you could change the lat long in the following:
dict_summary = average_per_type(dir_types, lon, lat, window)
if dict_summary == -99:
return -99
if dict_summary == -89: # sea values
print('Error code -89: Sea values spotted')
return -89
# outname = outputs
# outname = 'taw-' + str(lon) + '-' + str(lat) + '-' + str(depth) + 'mm.csv'
outname = 'SoilTAW' + str(depth) + 'mm.csv'
depth_values = [10, 90, 200, 300, 400, 1000]
cumulative_depths = [10, 100, 300, 600, 1000, 2000]
frac_values = []
actual_frac = 1
is_num_less = True
# Estimating closest depth value if depth given is part of depth values available
if depth not in cumulative_depths:
# depth_possible = [abs(x - depth) for x in depth_values]
# min_diff = min(depth_possible)
# index_closest = depth_possible.index(min_diff)
# depth_closest = depth_values[index_closest]
# depth_diff = abs(depth - depth_closest)
# actual_frac = depth_diff / depth_values[index_closest]
res = get_frac_for_taw(depth)
actual_frac = res[0]
index_closest = res[1]
is_num_less = res[2]
else: # depth given is available
# depth = depth_values[0]
# layer_oi = str(depth) + 'mm'
index_closest = cumulative_depths.index(depth)
# compute the fractions needed for computing TAW
if is_num_less:
for i in range(len(depth_values)):
if i < index_closest:
frac = 1
elif i == index_closest:
frac = round(actual_frac, 2)
else:
frac = 0
frac_values.append(frac)
else:
for i in range(len(depth_values)):
if i <= index_closest:
frac = 1
elif i == index_closest+1:
frac = round(actual_frac, 2)
else:
frac = 0
frac_values.append(frac)
# make a dataframe
df_summary = dict_to_df(dict_summary)
# divide current bulk density values by 100
df_summary['bulkdensity'] = round(df_summary['bulkdensity'] / 100, 2)
df_summary = round(df_summary, 2)
df_summary['Latitude'] = lat
df_summary['Longitude'] = lon
df_summary['Depth'] = depth
# export dataframe as csv to produce .SOL
if format_arg == 'dssat':
ascfile = out_dssat + '/sample_asc.csv'
df_to_asc(df_summary.iloc[:, 0:8], ascfile)
out_path_asc = os.path.abspath(ascfile)
out_path = which_api(out_path_asc, script_dir, 1, None, output_path)
# TO-DO: convert TH.SOL into JSON. Ask Jab's advice
if json_out:
pass
# Otherwise, let's make a SoilTAW file
else:
df_summary.insert(0, 'depths', depth_values)
df_summary['FC'] = df_summary.apply(compute_fc_row, axis=1)
df_summary['PWP'] = df_summary.apply(compute_pwp_row, axis=1)
df_summary['fraction'] = frac_values
df_summary['TAW'] = df_summary.apply(lambda x: compute_taw_row(x), axis=1)
# print()
# print(df_summary.to_dict())
# print()
# taw_val = df_summary.loc[layer_oi, 'TAW']
taw_val = df_summary['TAW'].sum()
taw_val = round(taw_val, 2)
# Extract sand and clay value needed to estimate soil type
fracs = [df_summary['sandfraction'].iloc[0], df_summary['clay'].iloc[0]]
# call the fortran_api
out_path_asc = os.path.abspath(outname)
soil_type = which_api(out_path_asc, script_dir, 0, fracs, output_path)
taw_dict = {"Code": 1, "Soil": soil_type, 'Total_Available_Water(mm)': taw_val}
if json_out is True:
return taw_dict
taw_dict = {"Code": [1], "Soil": [soil_type], 'Total_Available_Water(mm)': [taw_val]}
taw_data = pd.DataFrame.from_dict(taw_dict)
# export df as a txt/csv file
df_to_asc(taw_data, outname)
out_path = os.path.abspath(outname)
return out_path
def interactive_run():
"""
This function runs the script in interactive mode
:return: None
"""
# Check that all necessary files are present
script_path = sys.argv[0]
ini_dir(script_path)
# setup(102.765, 13.369, 3, 1000)
print(
"\nThis script is currently only supporting Thailand. Using geo coordinates not associated with this country "
"will give misleading results!\n")
# Check if the soil properties directory is present before running
layers_dir_path = Path(layers_dir)
layers_dir_present = layers_dir_path.exists()
while layers_dir_present:
prompt = input("Enter 'R' to (re)start or 'Q' to quit: ")
print()
if prompt.lower() == 'r':
while True:
try:
lon = float(input("Enter longitude: "))
lat = float(input("Enter latitude: "))
window = int(input("Enter window size (e.g. enter '3' for 3x3): "))
depth = int(input("Enter soil depth (mm): "))
except ValueError:
print('Invalid key. Please enter a numerical value')
continue
outname = setup(lon, lat, window, depth)
if outname == -99:
print("\nInvalid location. This lon({}) and lat({}) is definitely in the sea.".format(lon, lat))
else:
print('Check directory for the following file: ', outname)
setup_prompt = input('\nWould you like to make a new simulation? (y or n): ')
if setup_prompt.lower() == 'y':
continue
elif setup_prompt.lower() == 'n':
break
else:
print('Got invalid response. Restarting...')
elif prompt.lower() == 'q':
print('Exiting...')
break
else:
print("Sorry, invalid key... \n")
continue
else:
print(
"\n The 'layers' directory is missing. Please download the zip file and place it in your project directory.")
def main():
# Check that all necessary files are present
script_path = sys.argv[0]
ini_dir(script_path)
# Check if layers directory exists
layers_dir_path = Path(layers_dir)
layers_dir_present = layers_dir_path.exists()
if not layers_dir_present:
print(
"\n The 'layers' directory is missing. Please download the zip file and place it in your project directory.")
return
# parse the given arguments
parser = argparse.ArgumentParser(
description="This script interpolates TAW value for a specific location in Thailand"
)
parser.add_argument("--lon", type=float, required=True, help="longitude value, e.g. 103.98")
parser.add_argument("--lat", type=float, required=True, help="latitude value, e.g. 15.88")
parser.add_argument("--win", type=int, required=True, help="window size, e.g. enter '3' for a window size "
"of 3x3")
parser.add_argument("--depth", type=int, required=True, help="depth value in mm, e.g. 350")
parser.add_argument("--format", default='swb', type=str, required=False,
help="options are: 'swb' to produce SoilTAW file or 'dssat' to produce .SOL ")
args = vars(parser.parse_args())
# check the value of the format given: should be None ('swb') or 'dssat'
if (args['format'] == 'swb') or (args['format'] == 'dssat'):
outname = setup(args["lon"], args["lat"], args["win"], args['format'], args["depth"])
if outname == -99:
print("\nInvalid location. This lon({}) and lat({}) is definitely in the sea.".format(args["lon"],
args["lat"]))
else:
print('Check directory for the following file: ', outname)
else:
print("Invalid format argument: '{}'. Please choose either 'swb' or 'dssat'".format(args['format']))
if __name__ == '__main__':
main()
# interactive_run()
| 31.473761 | 127 | 0.602427 | 2,914 | 21,591 | 4.264928 | 0.165408 | 0.019311 | 0.009656 | 0.009012 | 0.337303 | 0.299968 | 0.268828 | 0.234471 | 0.219585 | 0.213872 | 0 | 0.026316 | 0.292483 | 21,591 | 685 | 128 | 31.519708 | 0.787248 | 0.254412 | 0 | 0.252841 | 0 | 0.005682 | 0.121375 | 0.007463 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076705 | false | 0.005682 | 0.03125 | 0 | 0.193182 | 0.039773 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
57849a5bdfa16bc1f0ba0fca56bb0bab14cc2e2d | 2,154 | py | Python | tests/test_webapi.py | wsaxun/DemoLib | 2f3368b86c6067f66f9fcb3dcb9f37bf13740d2a | [
"BSD-2-Clause"
] | null | null | null | tests/test_webapi.py | wsaxun/DemoLib | 2f3368b86c6067f66f9fcb3dcb9f37bf13740d2a | [
"BSD-2-Clause"
] | null | null | null | tests/test_webapi.py | wsaxun/DemoLib | 2f3368b86c6067f66f9fcb3dcb9f37bf13740d2a | [
"BSD-2-Clause"
] | null | null | null | import pytest
import json
from mock import MagicMock
from collections import OrderedDict
from werkzeug.wsgi import DispatcherMiddleware
from flaskdemo.webapi import create_app
from flaskdemo.webapi.v1 import view
from common import conf
URL_TEST_INDEX = '/api/v1/'
URL_TEST_POLICY = '/api/v1/policy'
URL_TEST_RESULT = '/api/v1/result/c67f957adbae41fc98bc5dd8cb8e1a6c'
TASK_RESPONSE_DATA = b'{"task_id": "c67f957adbae41fc98bc5dd8cb8e1a6c"}'
TASK_RESULT_DATA = b'{"result": "data"}'
MOCK_TASK_RESPONSE_DATA = {"task_id": "c67f957adbae41fc98bc5dd8cb8e1a6c"}
MOCK_TASK_RESULT_DATA = {"result": "data"}
@pytest.fixture(scope="module")
def app():
conf.get_webApi_conf = MagicMock()
conf.get_amqp_conf = MagicMock()
application = create_app()
application.wsgi_app = DispatcherMiddleware(application.wsgi_app,
OrderedDict({
'/api': application
}))
return application.test_client()
@pytest.fixture()
def task_data():
rpc_response_data = MOCK_TASK_RESPONSE_DATA
return MagicMock(return_value=json.dumps(rpc_response_data))
@pytest.fixture()
def task_result_data():
rpc_response_data = MOCK_TASK_RESULT_DATA
return MagicMock(return_value=json.dumps(rpc_response_data))
class TestWebApi(object):
def test_index(self, app, task_data):
url = URL_TEST_INDEX
view.rpc_request = task_data
get_response = app.get(url)
# assert get_response.status_code == 200
assert get_response.data == TASK_RESPONSE_DATA
def test_policy(self, app, task_data):
url = URL_TEST_POLICY
view.rpc_request = task_data
post_response = app.post(url)
delete_response = app.delete(url)
assert post_response.data == TASK_RESPONSE_DATA
assert delete_response.data == TASK_RESPONSE_DATA
def test_result(self, app, task_result_data):
url = URL_TEST_RESULT
view.rpc_request = task_result_data
get_response = app.get(url)
assert get_response.data == TASK_RESULT_DATA
| 29.108108 | 73 | 0.686165 | 260 | 2,154 | 5.361538 | 0.207692 | 0.120517 | 0.070301 | 0.030129 | 0.34505 | 0.261119 | 0.222382 | 0.136298 | 0.136298 | 0.077475 | 0 | 0.029483 | 0.228412 | 2,154 | 73 | 74 | 29.506849 | 0.809266 | 0.017642 | 0 | 0.156863 | 0 | 0 | 0.091296 | 0.053926 | 0 | 0 | 0 | 0 | 0.078431 | 1 | 0.117647 | false | 0 | 0.156863 | 0 | 0.352941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
57867374e1eccf45f90f58e43cd175e1449968e0 | 3,964 | py | Python | tests/test_errors.py | wangdongcheng/PyRFC | 3311aa5429404105eb07629e8044c9404a7ce5a9 | [
"Apache-2.0"
] | 1 | 2020-06-03T17:56:51.000Z | 2020-06-03T17:56:51.000Z | tests/test_errors.py | coledong/PyRFC | 073bfbd330a6f5581d507c7936974b19147355ea | [
"Apache-2.0"
] | null | null | null | tests/test_errors.py | coledong/PyRFC | 073bfbd330a6f5581d507c7936974b19147355ea | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import socket
import unittest
import pyrfc
from decimal import Decimal
from tests.config import PARAMS as params, CONFIG_SECTIONS as config_sections, get_error
class TestConnection:
def setup_method(self, test_method):
self.conn = pyrfc.Connection(**params)
assert self.conn.alive
def teardown_method(self, test_method):
self.conn.close()
assert not self.conn.alive
def test_no_connection_params(self):
try:
pyrfc.Connection()
except pyrfc.RFCError as ex:
assert ex.args[0] == "Connection parameters missing"
# todo: test correct status after error -> or to the error tests?
def test_incomplete_params(self):
incomplete_params = params.copy()
for p in ["ashost", "gwhost", "mshost"]:
if p in incomplete_params:
del incomplete_params[p]
try:
pyrfc.Connection(**incomplete_params)
except pyrfc.RFCError as ex:
error = get_error(ex)
assert error["code"] == 20
assert error["key"] == "RFC_INVALID_PARAMETER"
assert error["message"][0] in [
"Parameter ASHOST, GWHOST, MSHOST or SERVER_PORT is missing.",
"Parameter ASHOST, GWHOST, MSHOST or PORT is missing.",
"Parameter ASHOST, GWHOST or MSHOST is missing.",
]
def test_denied_users(self):
denied_params = params.copy()
denied_params["user"] = "BLAFASEL"
try:
pyrfc.Connection(**denied_params)
except pyrfc.LogonError as ex:
error = get_error(ex)
assert error["code"] == 2
assert error["key"] == "RFC_LOGON_FAILURE"
assert error["message"][0] == "Name or password is incorrect (repeat logon)"
def test_call_without_RFM_name(self):
try:
self.conn.call()
except Exception as ex:
assert type(ex) is TypeError
assert ex.args[0] == "call() takes at least 1 positional argument (0 given)"
def test_call_non_existing_RFM(self):
try:
self.conn.call("undefined")
except pyrfc.ABAPApplicationError as ex:
error = get_error(ex)
assert error["code"] == 5
assert error["key"] == "FU_NOT_FOUND"
assert error["message"][0] == "ID:FL Type:E Number:046 undefined"
def test_call_non_string_RFM_name(self):
try:
self.conn.call(1)
except pyrfc.RFCError as ex:
assert ex.args == (
"Remote function module name must be unicode string, received:",
1,
int,
)
def test_call_non_existing_RFM_parameter(self):
try:
self.conn.call("STFC_CONNECTION", undefined=0)
except pyrfc.ExternalRuntimeError as ex:
error = get_error(ex)
assert error["code"] == 20
assert error["key"] == "RFC_INVALID_PARAMETER"
assert error["message"][0] == "field 'undefined' not found"
def test_non_existing_field_structure(self):
IMPORTSTRUCT = {"XRFCCHAR1": "A", "RFCCHAR2": "BC", "RFCCHAR4": "DEFG"}
try:
result = self.conn.call("STFC_STRUCTURE", IMPORTSTRUCT=IMPORTSTRUCT)
except pyrfc.ExternalRuntimeError as ex:
assert ex.code == 20
assert ex.key == "RFC_INVALID_PARAMETER"
assert ex.message == "field 'XRFCCHAR1' not found"
def test_non_existing_field_table(self):
IMPORTSTRUCT = {"XRFCCHAR1": "A", "RFCCHAR2": "BC", "RFCCHAR4": "DEFG"}
try:
result = self.conn.call("STFC_STRUCTURE", RFCTABLE=[IMPORTSTRUCT])
except pyrfc.ExternalRuntimeError as ex:
assert ex.code == 20
assert ex.key == "RFC_INVALID_PARAMETER"
assert ex.message == "field 'XRFCCHAR1' not found"
if __name__ == "__main__":
unittest.main()
| 34.77193 | 88 | 0.605449 | 466 | 3,964 | 4.98927 | 0.283262 | 0.056774 | 0.030968 | 0.020645 | 0.489892 | 0.436129 | 0.36129 | 0.315699 | 0.285591 | 0.256344 | 0 | 0.01169 | 0.287841 | 3,964 | 113 | 89 | 35.079646 | 0.811902 | 0.026741 | 0 | 0.333333 | 0 | 0 | 0.202594 | 0.02179 | 0 | 0 | 0 | 0.00885 | 0.258065 | 1 | 0.11828 | false | 0.010753 | 0.107527 | 0 | 0.236559 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5788e5ec5ad850f2ab5583d89148b93f0c9246d1 | 3,690 | py | Python | scripts/setup.py | fosterrath-mila/Benzina | cabea8ecbae5122682c76c0c6d19291305853838 | [
"MIT"
] | null | null | null | scripts/setup.py | fosterrath-mila/Benzina | cabea8ecbae5122682c76c0c6d19291305853838 | [
"MIT"
] | null | null | null | scripts/setup.py | fosterrath-mila/Benzina | cabea8ecbae5122682c76c0c6d19291305853838 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Imports and early Python version check.
#
packageName = "benzina"
githubURL = "https://github.com/obilaniu/Benzina"
import os, sys
if sys.version_info[:2] < (3, 5):
sys.stdout.write(packageName+" is Python 3.5+ only!\n")
sys.exit(1)
from setuptools import setup, find_packages, Extension
from . import git, versioning, utils
#
# Read long description
#
with open(os.path.join(git.getSrcRoot(),
"scripts",
"LONG_DESCRIPTION.txt"), "r", encoding="utf-8") as f:
long_description = f.read()
#
# Synthesize version.py file
#
with open(os.path.join(git.getSrcRoot(),
"src",
packageName,
"version.py"), "w") as f:
f.write(versioning.synthesizeVersionPy())
author = "Olexa Bilaniuk"
#
# Perform setup.
#
setup(
name = packageName,
version = versioning.verPublic,
author = author,
author_email = "anonymous@anonymous.com",
license = "MIT",
url = githubURL,
download_url = githubURL+"/archive/v{}.tar.gz".format(versioning.verRelease),
description = "A fast image-loading package to load images compressed with "
"video codecs onto GPU asynchronously.",
long_description = long_description,
classifiers = [
"Development Status :: 1 - Planning",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX",
"Operating System :: Unix",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Utilities",
],
python_requires = '>=3.5',
setup_requires = [
"meson>=0.50.0",
],
install_requires = [
"nauka>=0.0.11",
"numpy>=1.10",
],
packages = find_packages("src"),
package_dir = {'': 'src'},
ext_modules = [
Extension("benzina.native",
[os.path.join("src", "benzina", "native.c")],
include_dirs=[os.path.join(git.getSrcRoot(), "include")],
library_dirs=[os.path.join(git.getSrcRoot(),
utils.get_build_platlib(),
"benzina",
"lib")],
runtime_library_dirs=[os.path.join("$ORIGIN", "lib")],
libraries=["benzina"],)
],
cmdclass={
"build_configure": utils.build_configure,
"build_ext": utils.build_ext,
"clean": utils.clean,
},
command_options={
'build_sphinx': {
'project': ("setup.py", packageName),
'copyright': ("setup.py", "2019, {}".format(author)),
'version': ("setup.py", versioning.verRelease),
'release': ("setup.py", versioning.verPublic)
}
},
zip_safe = False,
)
| 33.853211 | 89 | 0.521138 | 335 | 3,690 | 5.659701 | 0.468657 | 0.022152 | 0.031646 | 0.068565 | 0.075949 | 0.061181 | 0.0327 | 0 | 0 | 0 | 0 | 0.013986 | 0.341192 | 3,690 | 108 | 90 | 34.166667 | 0.76594 | 0.033875 | 0 | 0.069767 | 0 | 0 | 0.32695 | 0.018868 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.034884 | 0 | 0.034884 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
578a298c3d3e77e4580c88dfc9e3396e12318474 | 13,547 | py | Python | bin/jh_install.py | lsst-camera-dh/release | 4a2f4b5f8622a1c9e2c4e094995aa22a890404e7 | [
"BSD-3-Clause-LBNL"
] | 2 | 2015-10-08T14:05:40.000Z | 2018-04-10T06:34:17.000Z | bin/jh_install.py | lsst-camera-dh/release | 4a2f4b5f8622a1c9e2c4e094995aa22a890404e7 | [
"BSD-3-Clause-LBNL"
] | 13 | 2015-10-11T20:48:11.000Z | 2018-07-31T15:36:53.000Z | bin/jh_install.py | lsst-camera-dh/release | 4a2f4b5f8622a1c9e2c4e094995aa22a890404e7 | [
"BSD-3-Clause-LBNL"
] | 3 | 2016-07-06T16:56:09.000Z | 2020-05-19T18:10:51.000Z | #!/usr/bin/env python
import os
import glob
import shutil
import subprocess
import warnings
import configparser
class Parfile(dict):
def __init__(self, infile, section):
super(Parfile, self).__init__()
parser = configparser.ConfigParser()
parser.optionxform = str
result = parser.read(infile)
if not result:
raise RuntimeError("invalid or empty config file: {f}".format(f=infile))
for key, value in parser.items(section):
self[key] = self._cast(value)
def _cast(self, value):
if value == 'None':
return None
try:
if value.find('.') == -1 and value.find('e') == -1:
return int(value)
else:
return float(value)
except ValueError:
# Cannot cast as either int or float so just return the
# value as-is (presumably a string).
return value
def get_package_name(package):
pattern = os.path.join(package + '*', 'ups', '*.table')
return os.path.basename(glob.glob(pattern)[0]).split('.')[0]
class Installer(object):
_executable = '/bin/bash'
_github_org = 'https://github.com/lsst-camera-dh'
_github_elec_org = 'https://github.com/lsst-camera-electronics'
def __init__(self, version_file, inst_dir='.', python_exec='python',
hj_folders=('BNL_T03',), site='BNL'):
self.version_file = os.path.abspath(version_file)
if inst_dir is not None:
self.inst_dir = os.path.abspath(inst_dir)
shutil.copy(self.version_file,
os.path.join(self.inst_dir, 'installed_versions.txt'))
self.python_exec = python_exec
self.hj_folders = hj_folders
self.site = site
self._package_dirs = None
self._stack_dir = None
self._third_party_pars = None
self.curdir = os.path.abspath('.')
try:
self.pars = Parfile(self.version_file, 'jh')
except configparser.NoSectionError:
pass
def modules_install(self):
url = 'http://sourceforge.net/projects/modules/files/Modules/modules-3.2.10/modules-3.2.10.tar.gz'
inst_dir = self.inst_dir
commands = ";".join(["curl -L -O %(url)s",
"tar xzf modules-3.2.10.tar.gz",
"cd modules-3.2.10",
"./configure --prefix=%(inst_dir)s",
"make",
"make install",
"cd %(inst_dir)s"]) % locals()
subprocess.check_call(commands, shell=True, executable=self._executable)
@staticmethod
def github_download(package_name, version):
if not package_name.startswith('REB_'):
url = '/'.join((Installer._github_org, package_name, 'archive',
version + '.tar.gz'))
else:
url = '/'.join((Installer._github_elec_org, package_name, 'archive',
version + '.tar.gz'))
commands = ["curl -L -O " + url,
"tar xzf %(version)s.tar.gz" % locals()]
for command in commands:
subprocess.check_call(command, shell=True,
executable=Installer._executable)
@staticmethod
def github_clone(package_name, version):
if not version:
version = 'master'
url = '/'.join((Installer._github_org, package_name + '.git'))
command = f'git clone {url}; cd {package_name}; git checkout {version}'
subprocess.check_call(command, shell=True,
executable=Installer._executable)
def lcatr_install(self, package_name):
version = self.pars[package_name]
self.github_download(package_name, version)
inst_dir = self.inst_dir
python_exec = self.python_exec
command = "cd %(package_name)s-%(version)s/; %(python_exec)s setup.py install --prefix=%(inst_dir)s" % locals()
subprocess.check_call(command, shell=True, executable=self._executable)
@property
def package_dirs(self):
if self._package_dirs is None:
self._package_dirs = {}
try:
pars = Parfile(self.version_file, 'packages')
for package, version in pars.items():
package_dir = "%(package)s-%(version)s" % locals()
self._package_dirs[package] = os.path.join(self.inst_dir,
package_dir)
except configparser.NoSectionError:
pass
return self._package_dirs
@property
def stack_dir(self):
if self._stack_dir is None:
try:
pars = Parfile(self.version_file, 'dmstack')
self._stack_dir = pars['stack_dir']
except configparser.NoSectionError:
pass
return self._stack_dir
@property
def third_party_pars(self):
if self._third_party_pars is None:
try:
self._third_party_pars = Parfile(self.version_file,
'third_party')
except configparser.NoSectionError:
self._third_party_pars = dict()
return self._third_party_pars
def write_setup(self):
contents = "export INST_DIR=%s\n" % self.inst_dir
if self.stack_dir is not None:
contents += """export STACK_DIR=%s
source ${STACK_DIR}/loadLSST.bash
export EUPS_PATH=${INST_DIR}/eups:${EUPS_PATH}
""" % self.stack_dir
contents += self._eups_config()
if 'eo_utilities_dir' in self.third_party_pars:
eo_utilities_dir = self.third_party_pars['eo_utilities_dir']
contents += 'setup -r %(eo_utilities_dir)s\n' % locals()
contents += self._jh_config()
contents += self._package_env_vars()
contents += self._schema_paths()
contents += self._python_configs()
contents += "export OMP_NUM_THREADS=1\n"
contents += "export MPLBACKEND=svg\n"
contents += 'PS1="[jh]$ "\n'
output = open(os.path.join(self.inst_dir, 'setup.sh'), 'w')
output.write(contents)
output.close()
def _eups_config(self):
try:
pars = Parfile(self.version_file, 'eups_packages')
except configparser.NoSectionError:
return ''
return '\n'.join(['setup lsst_distrib'] +
['setup %s' % get_package_name(package)
for package in pars]) + '\n'
def _jh_config(self):
bin_dirs = [os.path.join('${INST_DIR}', os.path.split(x)[-1], 'bin')
for x in self.package_dirs.values()
if os.path.isdir(os.path.join(x, 'bin'))]
bin_path = ":".join(bin_dirs + ['${INST_DIR}/bin', '${PATH}'])
hj_version = self.pars['harnessed-jobs']
site = self.site
modules_dir = self.third_party_pars['modules_dir']
return """export HARNESSEDJOBSDIR=${INST_DIR}/harnessed-jobs-%(hj_version)s
export VIRTUAL_ENV=${INST_DIR}
source %(modules_dir)s/3.2.10/init/bash
export PATH=%(bin_path)s
export SITENAME=%(site)s
""" % locals()
def _schema_paths(self):
paths = []
for package, package_dir in self.package_dirs.items():
if not os.path.isdir(os.path.join(package_dir, 'schemas')):
continue
paths.append("${%s}/schemas" % self._env_var(package))
paths.extend(['${HARNESSEDJOBSDIR}/schemas', '${LCATR_SCHEMA_PATH}'])
return 'export LCATR_SCHEMA_PATH=' + ':'.join(paths) + '\n'
def _package_env_vars(self):
contents = ""
for package, package_dir in self.package_dirs.items():
subdir = os.path.split(package_dir.rstrip(os.path.sep))[-1]
env_var = self._env_var(package)
contents += ("export %s=${INST_DIR}/%s\n" % (env_var, subdir))
return contents
@staticmethod
def _env_var(package_name):
return package_name.replace('-', '').upper() + 'DIR'
def _module_path(self):
try:
module_path = glob.glob('%s/lib/python*/site-packages'
% self.inst_dir)[0][len(self.inst_dir):]
return os.path.join('${INST_DIR}', module_path.lstrip(os.path.sep))
except IndexError:
message = "%s/lib/python*/site-packages not found." % self.inst_dir
warnings.warn(message)
return ''
def _python_configs(self):
python_dirs = [os.path.join('${'+self._env_var(x)+'}', 'python')
for x in self.package_dirs]
for package_dir, path in self.third_party_pars.items():
if package_dir in ('modules_dir', 'eo_utilities_dir'):
continue
python_dirs.append(path)
python_dirs.extend(['${HARNESSEDJOBSDIR}/python', self._module_path(),
'${PYTHONPATH}'])
python_configs = "export PYTHONPATH=%s\n" % ":".join(python_dirs)
return python_configs
def jh(self):
os.chdir(self.inst_dir)
#self.modules_install()
self.lcatr_install('lcatr-harness')
self.lcatr_install('lcatr-schema')
self.lcatr_install('lcatr-modulefiles')
inst_dir = self.inst_dir
subprocess.check_call('ln -sf %(inst_dir)s/share/modulefiles %(inst_dir)s/Modules' % locals(), shell=True, executable=self._executable)
subprocess.check_call('touch `ls -d %(inst_dir)s/lib/python*/site-packages/lcatr`/__init__.py' % locals(), shell=True, executable=self._executable)
hj_version = self.pars['harnessed-jobs']
self.github_download('harnessed-jobs', hj_version)
for folder in self.hj_folders:
subprocess.check_call('ln -sf %(inst_dir)s/harnessed-jobs-%(hj_version)s/%(folder)s/* %(inst_dir)s/share' % locals(), shell=True, executable=self._executable)
self.eups_package_installer()
self.package_installer()
self.write_setup()
os.chdir(self.curdir)
def eups_package_installer(self):
try:
pars = Parfile(self.version_file, 'eups_packages')
except configparser.NoSectionError:
return
inst_dir = self.inst_dir
stack_dir = self.stack_dir.rstrip(os.path.sep)
ups_db_dir = '%(inst_dir)s/eups/ups_db' % locals()
if not os.path.isdir(ups_db_dir):
os.makedirs(ups_db_dir)
for package, version in pars.items():
if package == 'obs_lsst':
scons_command = 'scons lib python shebang examples doc policy python/lsst/obs/lsst/version.py'
else:
scons_command = 'scons'
if version == 'master':
self.github_clone(package, version)
else:
self.github_download(package, version)
package_name = get_package_name(package)
commands = """source %(stack_dir)s/loadLSST.bash; export EUPS_PATH=%(inst_dir)s/eups:${EUPS_PATH}; cd %(package)s*; eups declare %(package_name)s %(version)s -r . -c; setup %(package_name)s; %(scons_command)s""" % locals()
subprocess.check_call(commands, shell=True,
executable=self._executable)
def package_installer(self):
try:
pars = Parfile(self.version_file, 'packages')
except configparser.NoSectionError:
return
inst_dir = self.inst_dir
for package, version in pars.items():
self.github_download(package, version)
package_dir = "%(package)s-%(version)s" % locals()
hj_dir = "%(inst_dir)s/%(package_dir)s/harnessed_jobs" % locals()
if os.path.isdir(hj_dir):
command = 'ln -sf %(hj_dir)s/* %(inst_dir)s/share' % locals()
subprocess.check_call(command, executable=self._executable,
shell=True)
def jh_test(self):
os.chdir(self.inst_dir)
try:
pars = Parfile(self.version_file, 'eups_packages')
pars['eotest']
hj_version = self.pars['harnessed-jobs']
command = 'source ./setup.sh; python harnessed-jobs-%(hj_version)s/tests/setup_test.py' % locals()
subprocess.check_call(command, shell=True, executable=self._executable)
os.chdir(self.curdir)
except (configparser.NoSectionError, KeyError):
pass
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description="Job Harness Installer",
fromfile_prefix_chars="@")
parser.add_argument("version_file", help='software version file')
parser.add_argument('--inst_dir', type=str, default=None,
help='installation directory')
parser.add_argument('--site', type=str, default='SLAC',
help='Site (SLAC, BNL, etc.)')
parser.add_argument('--hj_folders', type=str, default="SLAC")
parser.add_argument('--python_exec', type=str, default='python')
parser.add_argument('--dev', action='store_true')
args = parser.parse_args()
installer = Installer(args.version_file, inst_dir=args.inst_dir,
python_exec=args.python_exec,
hj_folders=args.hj_folders.split(), site=args.site)
if args.inst_dir is not None:
installer.jh()
| 42.334375 | 234 | 0.585296 | 1,591 | 13,547 | 4.756757 | 0.159648 | 0.043473 | 0.021802 | 0.021406 | 0.356501 | 0.272859 | 0.1875 | 0.117865 | 0.104255 | 0.067653 | 0 | 0.003219 | 0.289215 | 13,547 | 319 | 235 | 42.467085 | 0.78274 | 0.00967 | 0 | 0.25 | 0 | 0.017606 | 0.189084 | 0.061736 | 0 | 0 | 0 | 0 | 0 | 1 | 0.080986 | false | 0.014085 | 0.024648 | 0.003521 | 0.190141 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
578a3ba58c10184a74ccd1c56dfd9b88d7e11466 | 4,136 | py | Python | graph1.py | SandraCoburn/graphs_guided | 4a1c471e33d4d9aa4215d243c7e93214ba0c6205 | [
"MIT"
] | null | null | null | graph1.py | SandraCoburn/graphs_guided | 4a1c471e33d4d9aa4215d243c7e93214ba0c6205 | [
"MIT"
] | null | null | null | graph1.py | SandraCoburn/graphs_guided | 4a1c471e33d4d9aa4215d243c7e93214ba0c6205 | [
"MIT"
] | null | null | null |
class Queue():
def __init__(self):
self.queue = []
def enqueue(self, value):
self.queue.append(value)
def dequeue(self):
if self.size() > 0:
return self.queue.pop(0)
else:
return None
def size(self):
return len(self.queue)
class Graph:
def __init__(self):
self.vertices = {}
def add_vertex(self, vertex_id):
self.vertices[vertex_id] = set()
def add_edge(self, v1, v2):
if v1 in self.vertices and v2 in self.vertices:
self.vertices[v1].add(v2)
else:
raise IndexError("nonexistent vertex")
def get_neighbors(self, vertex_id):
return self.vertices[vertex_id]
def bft(self, starting_vertex_id):
# Create an empty queue
q = Queue()
# Add starting vertex ID
q.enqueue(starting_vertex_id)
# Create set for visited verts
visited = set()
# While queue is not empty
while q.size() > 0:
# Dequeue a vert
v = q.dequeue()
# If not visited
if v not in visited:
# Visit it!
print(v)
# Mark as visited
visited.add(v)
# Add all neighbors to the queue
for neighbor in self.get_neighbors(v):
q.enqueue(neighbor)
def bfs(self, starting_vertex_id, target_vertex_id):
# Create an empty queue and enqueue A PATH TO the starting vertex ID
q = Queue()
q.enqueue([starting_vertex_id])
# Create a Set to store visited vertices
visited = set()
# While the queue is not empty..
while q.size() > 0:
# Dequeue the first PATH
path = q.dequeue()
# Grab the last vertex from the PATH
last_node = path[-1]
# If that vertex has not been visited...
if last_node not in visited:
# CHECK IF IT'S THE TARGET
if last_node == target_vertex_id:
# IF SO, RETURN PATH
return path
# Mark it as visited...
visited.add(last_node)
# Then add A PATH TO its neighbors to the back of the queue
for next_nbr in self.get_neighbors(last_node):
new_path = list(path)
# COPY THE PATH
new_path.append(next_nbr)
print("new path:", new_path)
# APPEND THE NEIGHOR TO THE BACK
q.enqueue(new_path)
def dft_recursive(self, starting_vertex_id, visited=None):
if visited is None:
visited = set()
visited.add(starting_vertex_id)
print("dft recursive", starting_vertex_id)
for neighbor in self.vertices[starting_vertex_id]:
if neighbor not in visited:
self.dft_recursive(neighbor, visited)
def dfs_recursive(self, vertex, ending_ver, visited=None, path=None):
if visited is None:
visited = set()
if path is None:
path = []
visited.add(vertex)
path = path + [vertex] #subtly makes a copy of the path
'''
line above is equivalent to:
path = list(path) #make a copy
path.append(vertex)
'''
if vertex == ending_ver:
return path
for neighbor in self.get_neighbors(vertex):
if neighbor not in visited:
new_path = self.dfs_recursive(neighbor, ending_ver, visited, path)
if new_path is not None:
print("recursive dfs:", new_path)
return new_path
return None
g = Graph()
g.add_vertex(1)
g.add_vertex(2)
g.add_vertex(3)
g.add_vertex(4)
g.add_vertex(5)
g.add_vertex(6)
g.add_edge(1, 2)
g.add_edge(1, 4)
g.add_edge(2, 3)
g.add_edge(4, 3)
g.add_edge(3, 6)
g.add_edge(6, 5)
g.add_edge(5, 4)
print(g.vertices)
g.bft(3)
g.dft_recursive(1)
g.dfs_recursive(1,6)
# self.vertices = {
# 1: {2}
# 2: set(1)
# } | 28.328767 | 82 | 0.541344 | 543 | 4,136 | 3.985267 | 0.184162 | 0.05915 | 0.073937 | 0.027726 | 0.156192 | 0.13586 | 0.057301 | 0.030499 | 0.030499 | 0 | 0 | 0.014965 | 0.369923 | 4,136 | 146 | 83 | 28.328767 | 0.815426 | 0.160542 | 0 | 0.215054 | 0 | 0 | 0.016231 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.129032 | false | 0 | 0 | 0.021505 | 0.236559 | 0.053763 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
578ab9ea563fb55426586552716c0169c0c95722 | 1,395 | py | Python | QuestionGenerator/qg_dataset.py | Sabokou/NLP | 6441791038f83225b489a5a9e1067a6e7b2e4f39 | [
"MIT"
] | 2 | 2021-12-06T15:00:26.000Z | 2021-12-06T15:15:56.000Z | QuestionGenerator/qg_dataset.py | Sabokou/NLP | 6441791038f83225b489a5a9e1067a6e7b2e4f39 | [
"MIT"
] | 8 | 2021-11-19T12:35:24.000Z | 2022-01-21T15:40:01.000Z | QuestionGenerator/qg_dataset.py | Sabokou/NLP | 6441791038f83225b489a5a9e1067a6e7b2e4f39 | [
"MIT"
] | null | null | null | import pandas as pd
import random
import datasets
import torch
from transformers import AutoTokenizer
class CustomQGDataset(torch.utils.data.Dataset):
def __int__(self,
tokenizer: AutoTokenizer,
data: datasets.Dataset,
max_length: int,
pad_mask_id: int):
self.data = pd.Dataframe(data)
self.max_length = max_length
self.pad_mask_id = pad_mask_id
self.tokenizer = tokenizer
def __getitem__(self, item_index: int):
item = self.data.iloc[item_index]
input_ids, attention_mask = self._tokenize_text(item.kontext)
labels, _ = self._tokenize_text(item.frage)
masked_labels = self._mask_label_padding(labels)
return {"input_ids": input_ids,
"attention_mask": attention_mask,
"labels": labels}
def _tokenize_text(self, text: str):
tokenized_text = self.tokenizer(
text,
padding="max_length",
max_length=self.max_length,
truncation=True,
return_tensors="pt"
)
return (tokenized_text["input_ids"].squeeze(),
tokenized_text["attention_mask"].squeeze())
def _mask_label_padding(self, labels: torch.Tensor) -> torch.Tensor:
labels[labels == self.tokenizer.pad_token_id] = self.pad_mask_id
return labels
| 33.214286 | 72 | 0.626523 | 158 | 1,395 | 5.202532 | 0.316456 | 0.065693 | 0.043796 | 0.043796 | 0.053528 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.284588 | 1,395 | 41 | 73 | 34.02439 | 0.823647 | 0 | 0 | 0 | 0 | 0 | 0.045878 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.138889 | 0 | 0.361111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
578ff9aba8a24e0f7ad1574d84f38e131a766460 | 5,073 | py | Python | app/core/management/commands/update_data.py | finish06/django-drugs-api | 160f27d2c1e66e6c9267bc771904daf1a6bcd273 | [
"MIT"
] | null | null | null | app/core/management/commands/update_data.py | finish06/django-drugs-api | 160f27d2c1e66e6c9267bc771904daf1a6bcd273 | [
"MIT"
] | null | null | null | app/core/management/commands/update_data.py | finish06/django-drugs-api | 160f27d2c1e66e6c9267bc771904daf1a6bcd273 | [
"MIT"
] | null | null | null | import requests
from datetime import date, timedelta, datetime
from django.core.management.base import BaseCommand
from core.models import Drug, Route, MOA
class Command(BaseCommand):
"""Django command to load the database"""
def build_moa_table(self, data_list):
self.stdout.write(f'{str(datetime.now())} -- Building MOA table')
database_moa = set()
for data in data_list:
try:
for moa in data.get('pharm_class', []):
if 'MoA' in moa:
database_moa.add(moa[:-6])
except Exception as err:
self.stdout.write('Invalid MoA structure:' + str(err))
continue
for item in database_moa:
exists = MOA.objects.filter(moa=item).exists()
if not exists:
MOA.objects.create(moa=item)
def link_drug_moa(self, data_list):
self.stdout.write("Linking MOA to drug table")
for data in data_list:
d = Drug.objects.get(
product_id=data.get('product_id', ""))
if d:
try:
for moa in data.get('pharm_class', []):
if 'MoA' in moa:
m = MOA.objects.get(moa=moa[:-6])
d.moa.add(m.id)
except Exception as err:
self.stdout.write("Invalid MOA link:" + str(err))
def build_routes_table(self, data_list):
self.stdout.write('Building routes table')
database_routes = set()
for data in data_list:
try:
for route in data.get('route', []):
database_routes.add(route)
except Exception as err:
self.stdout.write("Invalid route structure:" + str(err))
pass
for item in database_routes:
exists = Route.objects.filter(route=item).exists()
if not exists:
Route.objects.create(route=item)
def link_drug_routes(self, data_list):
self.stdout.write('Linking routes to drug table')
for data in data_list:
d = Drug.objects.get(product_id=data.get('product_id', ""))
if d:
try:
for route in data.get('route', []):
r = Route.objects.get(route=route)
d.routes.add(r.id)
except Exception as err:
self.stdout.write("Invalid route link:" + str(err))
def handle(self, *args, **options):
self.stdout.write("Loading the database...")
now = str(date.today().strftime("%Y%m%d"))
previous = str((date.today() - timedelta(120)).strftime("%Y%m%d"))
url = f"https://api.fda.gov/drug/ndc.json?search=marketing_start_date:[{previous}+TO+{now}]&limit=100" # noqa: E501
while True:
r = requests.get(url)
link = r.links
if r.status_code != 200:
quit()
else:
data_list = r.json()
self.build_routes_table(data_list['results'])
self.build_moa_table(data_list['results'])
database_drugs = []
self.stdout.write("Building drugs table")
for data in data_list['results']:
try:
product_id = data.get('product_id', "").lower()[:254]
if Drug.objects.filter(product_id=product_id).exists():
self.stdout.write("Drug exists: " + product_id)
continue
product_ndc = data.get('product_ndc', "").lower()[:13]
start_date = data.get('marketing_start_date', "").lower()[:8] # noqa: E501
end_date = data.get('listing_expiration_date', "").lower()[:8] # noqa: E501
generic_name = data.get('generic_name', "").lower()[:254]
brand_name = data.get('brand_name', "").capitalize()[:254]
dea_schedule = data.get('dea_schedule', "Legend")
drug = Drug(product_id=product_id,
product_ndc=product_ndc,
start_date=start_date,
end_date=end_date,
generic_name=generic_name,
brand_name=brand_name,
dea_schedule=dea_schedule)
if product_id:
database_drugs.append(drug)
except Exception as err:
self.stdout.write('Invalid drug structure' + str(err))
Drug.objects.bulk_create(database_drugs)
self.link_drug_routes(data_list['results'])
self.link_drug_moa(data_list['results'])
if link:
url = link['next']['url']
else:
self.stdout.write("Completed entire list")
break
self.stdout.write("Database load complete")
| 41.581967 | 124 | 0.51232 | 566 | 5,073 | 4.44523 | 0.208481 | 0.044515 | 0.083466 | 0.025835 | 0.315978 | 0.284976 | 0.266296 | 0.198728 | 0.163752 | 0.08903 | 0 | 0.010387 | 0.373743 | 5,073 | 121 | 125 | 41.92562 | 0.781555 | 0.013601 | 0 | 0.264151 | 0 | 0.009434 | 0.125901 | 0.008807 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04717 | false | 0.009434 | 0.037736 | 0 | 0.09434 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5790e6f787368e205bfe81bb7cd75a890b4366c1 | 354 | py | Python | labs_web/run_tests.py | okorienev/labs_web | 9bfd69ffc2196832523d5809cf921debe530f886 | [
"BSD-3-Clause"
] | 2 | 2018-04-22T11:34:09.000Z | 2018-05-09T20:14:48.000Z | labs_web/run_tests.py | AlexPraefectus/labs_web | 9bfd69ffc2196832523d5809cf921debe530f886 | [
"BSD-3-Clause"
] | null | null | null | labs_web/run_tests.py | AlexPraefectus/labs_web | 9bfd69ffc2196832523d5809cf921debe530f886 | [
"BSD-3-Clause"
] | null | null | null | import unittest
from pyvirtualdisplay import Display
# noinspection PyUnresolvedReferences
from labs_web.test import (TestServiceIsUp,
TestLogin,
TestTutorMenu)
if __name__ == "__main__":
display = Display(visible=0, size=(1366, 768))
display.start()
unittest.main()
display.stop()
| 25.285714 | 50 | 0.638418 | 32 | 354 | 6.78125 | 0.71875 | 0.101382 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.031373 | 0.279661 | 354 | 13 | 51 | 27.230769 | 0.819608 | 0.09887 | 0 | 0 | 0 | 0 | 0.025237 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.3 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
579110f232adc684355b4c444635654fefd58354 | 4,880 | py | Python | plotting/plot_mario_pk.py | sjforeman/RadioFisher | fe25f969de9a700c5697168ba9e0d2645c55ed81 | [
"AFL-3.0"
] | 3 | 2020-12-05T11:28:47.000Z | 2021-07-09T02:42:21.000Z | plotting/plot_mario_pk.py | sjforeman/RadioFisher | fe25f969de9a700c5697168ba9e0d2645c55ed81 | [
"AFL-3.0"
] | null | null | null | plotting/plot_mario_pk.py | sjforeman/RadioFisher | fe25f969de9a700c5697168ba9e0d2645c55ed81 | [
"AFL-3.0"
] | 2 | 2021-07-09T02:42:23.000Z | 2021-11-30T06:37:47.000Z | #!/usr/bin/python
"""
Process EOS Fisher matrices and plot P(k).
"""
import numpy as np
import pylab as P
from rfwrapper import rf
import matplotlib.patches
import matplotlib.cm
from units import *
from mpi4py import MPI
import os
import euclid
cosmo = rf.experiments.cosmo
#names = ["GBT", "BINGO", "WSRT", "APERTIF", "JVLA", "ASKAP", "KAT7", "MeerKAT", "SKA1", "SKAMID", "SKAMID_COMP", "iSKAMID", "iSKAMID_COMP", "SKA1_CV"]
#names = ["SKA1", "SKAMID", "SKAMID_COMP", "iSKAMID", "iSKAMID_COMP"]
names = ["SKAMID", "iSKAMID"] #"iSKAMID_COMP_BIGZ", "iSKA_CORE"]
#ls = ['k-', 'r-', 'b--', 'm-', 'c--']
cols = ['b', 'r']
colours = ['#22AD1A', '#3399FF', '#ED7624']
# Get f_bao(k) function
cosmo_fns, cosmo = rf.precompute_for_fisher(rf.experiments.cosmo, "camb/rf_matterpower.dat")
fbao = cosmo['fbao']
# Fiducial value and plotting
fig = P.figure()
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
for k in range(len(names)):
root = "output/" + names[k]
# Load cosmo fns.
dat = np.atleast_2d( np.genfromtxt(root+"-cosmofns-zc.dat") ).T
zc, Hc, dAc, Dc, fc = dat
z, H, dA, D, f = np.genfromtxt(root+"-cosmofns-smooth.dat").T
kc = np.genfromtxt(root+"-fisher-kc.dat").T
# Load Fisher matrices as fn. of z
Nbins = zc.size
F_list = [np.genfromtxt(root+"-fisher-full-%d.dat" % i) for i in range(Nbins)]
# Save P(k) rebinning info
#np.savetxt(root+"-rebin-Fbase-%d.dat" % i, np.array(binning_info['F_base']) )
#np.savetxt(root+"-rebin-cumul-%d.dat" % i, np.array(binning_info['cumul']) )
#np.savetxt(root+"-rebin-kgrid-%d.dat" % i, np.array(binning_info['kgrid']) )
#np.savetxt(root+"-rebin-Vfac-%d.dat" % i, np.array([binning_info['Vfac'],]) )
# EOS FISHER MATRIX
# Actually, (aperp, apar) are (D_A, H)
pnames = ['A', 'b_HI', 'Tb', 'sigma_NL', 'sigma8', 'n_s', 'f', 'aperp', 'apar',
'omegak', 'omegaDE', 'w0', 'wa', 'h', 'gamma', 'fNL']
pnames += ["pk%d" % i for i in range(kc.size)]
zfns = [1,]
F, lbls = rf.combined_fisher_matrix( F_list,
expand=zfns, names=pnames,
exclude=[2,4,5,6,7,8 ] )
# Just do the simplest thing for P(k)
cov = np.sqrt(1. / np.diag(F)[-kc.size:])
pk = cosmo['pk_nobao'](kc) * (1. + fbao(kc))
"""
# Remove elements with zero diagonal (completely unconstrained)
zero_idxs = np.where(np.diag(F) == 0.)[0]
print "Zero idxs:", zero_idxs
F = rf.fisher_with_excluded_params(F, excl=zero_idxs)
lbls = lbls[:-zero_idxs.size]
"""
yup, ydn = rf.fix_log_plot(pk, cov) # cov*pk
if names[k][0] is not 'i':
#ax1.errorbar(kc, pk, yerr=[ydn, yup], ls=ls[k], lw=1.5, label=names[k], ms='.')
ax1.errorbar(kc, fbao(kc), yerr=[ydn, yup], color=cols[k], ls='none',
lw=2.2, capthick=2.2, label=names[k], ms='.')
else:
#ax2.errorbar(kc, pk, yerr=[ydn, yup], ls=ls[k], lw=1.5, label=names[k], ms='.')
ax2.errorbar(kc, fbao(kc), yerr=[ydn, yup], color=cols[k], ls='none',
lw=2.2, capthick=2.2, label=names[k], ms='.')
#P.plot(kc, cov, ls[k])
# Print diags.
#for i in range(diags.size):
# #if i < diags2.size:
# # print "%2d %10s %3.4f %3.4f" % (i, lbls[i], diags[i], diags2[i])
# #else:
# print "%2d %10s %3.4f" % (i, lbls[i], diags[i])
#exit()
kk = np.logspace(-3., 1., 2000)
ax1.plot(kk, fbao(kk), 'k-', lw=2.5, alpha=0.6)
ax2.plot(kk, fbao(kk), 'k-', lw=2.5, alpha=0.6)
#ax1.plot(kk, cosmo['pk_nobao'](kk) * (1. + fbao(kk)), 'k-')
#ax2.plot(kk, cosmo['pk_nobao'](kk) * (1. + fbao(kk)), 'k-')
ax1.set_xscale('log')
#ax1.set_yscale('log')
ax1.set_xlim((4e-3, 1e0))
#ax1.set_ylim((1e1, 1e6))
ax1.set_ylim((-0.11, 0.11))
ax2.set_xscale('log')
#ax2.set_yscale('log')
ax2.set_xlim((4e-3, 1e0))
ax2.set_ylim((-0.11, 0.11))
#ax2.set_ylim((1e1, 1e6))
# Move subplots
# pos = [[x0, y0], [x1, y1]]
pos1 = ax1.get_position().get_points()
pos2 = ax2.get_position().get_points()
dy = pos1[0,1] - pos2[1,1]
l = pos1[0,0]
w = pos1[1,0] - pos1[0,0]
h = pos1[1,1] - pos1[0,1]
b = pos1[0,1]
ax1.set_position([l, b - 0.5*dy, w, h+0.5*dy])
ax2.set_position([l, b - h - dy, w, h+0.5*dy])
# Hide x labels in upper subplot
for tick in ax1.xaxis.get_major_ticks():
tick.label1.set_visible(False)
fontsize = 18
for tick in ax2.xaxis.get_major_ticks():
tick.label1.set_fontsize(fontsize)
for tick in ax1.yaxis.get_major_ticks():
tick.label1.set_fontsize(fontsize)
for tick in ax2.yaxis.get_major_ticks():
tick.label1.set_fontsize(fontsize)
ax2.set_xlabel(r"$k \,[\mathrm{Mpc}^{-1}]$", fontdict={'fontsize':'20'})
#ax.set_ylabel(r"$P(k)$", fontdict={'fontsize':'20'})
# Set size
P.gcf().set_size_inches(16.5,10.5)
P.savefig('mario-pk.png', dpi=100)
P.show()
| 31.483871 | 151 | 0.586885 | 804 | 4,880 | 3.471393 | 0.318408 | 0.015048 | 0.008957 | 0.025797 | 0.302759 | 0.276603 | 0.270871 | 0.18703 | 0.173415 | 0.154783 | 0 | 0.04903 | 0.197541 | 4,880 | 154 | 152 | 31.688312 | 0.663687 | 0.329303 | 0 | 0.068493 | 0 | 0 | 0.093845 | 0.015136 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.123288 | 0 | 0.123288 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5793b34f471dda596bba9a3c046bb983b62f46f9 | 26,185 | py | Python | maiconverter/simai/simai.py | donmai-me/MaiConverter | 709b44ba1410c3728a3349aff91dc7337e1d712c | [
"MIT"
] | 29 | 2020-10-19T08:26:11.000Z | 2022-03-30T04:45:48.000Z | maiconverter/simai/simai.py | donmai-me/MaiConverter | 709b44ba1410c3728a3349aff91dc7337e1d712c | [
"MIT"
] | 9 | 2020-11-23T11:58:53.000Z | 2021-12-17T17:55:32.000Z | maiconverter/simai/simai.py | donmai-me/MaiConverter | 709b44ba1410c3728a3349aff91dc7337e1d712c | [
"MIT"
] | 2 | 2021-03-03T14:30:52.000Z | 2021-03-07T02:46:14.000Z | from __future__ import annotations
import math
from typing import Optional, Tuple, List, Union
from lark import Lark
from .tools import (
get_measure_divisor,
convert_to_fragment,
get_rest,
parallel_parse_fragments,
)
from ..event import NoteType
from .simainote import TapNote, HoldNote, SlideNote, TouchTapNote, TouchHoldNote, BPM
from .simai_parser import SimaiTransformer
# I hate the simai format can we use bmson or stepmania chart format for
# community-made charts instead
from ..tool import measure_to_second, second_to_measure, offset_arg_to_measure
class SimaiChart:
"""A class that represents a simai chart. Contains notes and bpm
information. Does not include information such as
song name, chart difficulty, composer, chart maker, etc.
It only contains enough information to build a working simai
chart.
Attributes:
bpms: Contains bpm events of the chart.
notes: Contains notes of the chart.
"""
def __init__(self):
self.notes: List[
Union[TapNote, HoldNote, SlideNote, TouchTapNote, TouchHoldNote]
] = []
self.bpms: List[BPM] = []
self._divisor: Optional[float] = None
self._measure = 1.0
@classmethod
def from_str(cls, chart_text: str, message: Optional[str] = None) -> SimaiChart:
# TODO: Rewrite this
if message is None:
print("Parsing simai chart...", end="", flush=True)
else:
print(message, end="", flush=True)
simai_chart = cls()
chart_text = "".join(chart_text.split())
try:
events_list = parallel_parse_fragments(chart_text.split(","))
except:
print("ERROR")
raise
else:
print("Done")
for events in events_list:
star_positions = []
offset = 0
for event in events:
event_type = event["type"]
if event_type == "bpm":
simai_chart.set_bpm(simai_chart._measure, event["value"])
elif event_type == "divisor":
simai_chart._divisor = event["value"]
elif event_type == "tap":
is_break, is_ex, is_star = False, False, False
modifier = event["modifier"]
if "b" in modifier:
is_break = True
if "x" in modifier:
is_ex = True
if "$" in modifier:
is_star = True
if "`" in modifier:
# Equivalent to one tick in ma2 with resolution of 384
offset += 0.0027
else:
offset = 0
simai_chart.add_tap(
measure=simai_chart._measure + offset,
position=event["button"],
is_break=is_break,
is_star=is_star,
is_ex=is_ex,
)
elif event_type == "hold":
is_ex = False
modifier = event["modifier"]
if "x" in modifier:
is_ex = True
if "`" in modifier:
# Equivalent to one tick in ma2 with resolution of 384
offset += 0.0027
else:
offset = 0
simai_chart.add_hold(
measure=simai_chart._measure + offset,
position=event["button"],
duration=event["duration"],
is_ex=is_ex,
)
elif event_type == "slide":
is_break, is_ex, is_tapless = False, False, False
modifier = event["modifier"]
if "b" in modifier:
is_break = True
if "x" in modifier:
is_ex = True
if any([a in modifier for a in "?!$"]):
# Tapless slides
# ? means the slide has no tap
# ! produces a tapless slide with no path, just a moving star
# $ is a remnant of 2simai, it is equivalent to ?
is_tapless = True
if "*" in modifier:
# Chained slides should have the same offset
pass
elif "`" in modifier:
# Equivalent to one tick in ma2 with resolution of 384
offset += 0.0027
else:
offset = 0
if not (is_tapless or event["start_button"] in star_positions):
simai_chart.add_tap(
measure=simai_chart._measure + offset,
position=event["start_button"],
is_break=is_break,
is_star=True,
is_ex=is_ex,
)
star_positions.append(event["start_button"])
equivalent_bpm = event["equivalent_bpm"]
duration = event["duration"]
delay = 0.25
if equivalent_bpm is not None:
multiplier = (
simai_chart.get_bpm(simai_chart._measure) / equivalent_bpm
)
duration = multiplier * duration
delay = multiplier * delay
simai_chart.add_slide(
measure=simai_chart._measure + offset,
start_position=event["start_button"],
end_position=event["end_button"],
duration=duration,
pattern=event["pattern"],
delay=delay,
reflect_position=event["reflect_position"],
)
elif event_type == "touch_tap":
is_firework = False
modifier = event["modifier"]
if "f" in modifier:
is_firework = True
if "`" in modifier:
# Equivalent to one tick in ma2 with resolution of 384
offset += 0.0027
else:
offset = 0
simai_chart.add_touch_tap(
measure=simai_chart._measure + offset,
position=event["location"],
region=event["region"],
is_firework=is_firework,
)
elif event_type == "touch_hold":
is_firework = False
modifier = event["modifier"]
if "f" in modifier:
is_firework = True
if "`" in modifier:
# Equivalent to one tick in ma2 with resolution of 384
offset += 0.0027
else:
offset = 0
simai_chart.add_touch_hold(
measure=simai_chart._measure + offset,
position=event["location"],
region=event["region"],
duration=event["duration"],
is_firework=is_firework,
)
else:
raise Exception(f"Unknown event type: {event_type}")
simai_chart._measure += 1 / simai_chart._divisor
return simai_chart
@classmethod
def open(cls, file: str) -> SimaiChart:
"""Opens a text file containing only a Simai chart. Does NOT accept a regular Simai file which contains
metadata and multiple charts. Use `parse_file` to parse a normal Simai file.
Args:
file: The path of the Simai chart file.
Examples:
Open a Simai chart file named "example.txt" at current directory.
>>> simai = SimaiChart.open("./example.txt")
"""
with open(file, "r") as f:
chart = f.read()
return cls.from_str(chart)
def add_tap(
self,
measure: float,
position: int,
is_break: bool = False,
is_star: bool = False,
is_ex: bool = False,
) -> SimaiChart:
"""Adds a tap note to the list of notes.
Args:
measure: Time when the note starts, in terms of measures.
position: Button where the tap note happens.
is_break: Whether a tap note is a break note.
is_star: Whether a tap note is a star note.
is_ex: Whether a tap note is an ex note.
Examples:
Add a regular tap note at measure 1, break tap note at
measure 2, ex tap note at measure 2.5, star note at
measure 3, and a break star note at measure 5. All at
position 7.
>>> simai = SimaiChart()
>>> simai.add_tap(1, 7)
>>> simai.add_tap(2, 7, is_break=True)
>>> simai.add_tap(2.5, 7, is_ex=True)
>>> simai.add_tap(3, 7, is_star=True)
>>> simai.add_tap(5, 7, is_break=True, is_star=True)
"""
tap_note = TapNote(
measure=measure,
position=position,
is_break=is_break,
is_star=is_star,
is_ex=is_ex,
)
self.notes.append(tap_note)
return self
def del_tap(self, measure: float, position: int) -> SimaiChart:
"""Deletes a tap note from the list of notes.
Args:
measure: Time when the note starts, in terms of measures.
position: Button where the tap note happens.
Examples:
Remove tap note at measure 26.75 at button 4.
>>> simai = SimaiChart()
>>> simai.add_tap(26.5, 4)
>>> simai.del_tap(26.75, 4)
"""
tap_notes = [
x
for x in self.notes
if isinstance(x, TapNote)
and math.isclose(x.measure, measure, abs_tol=0.0001)
and x.position == position
]
for note in tap_notes:
self.notes.remove(note)
return self
def add_hold(
self,
measure: float,
position: int,
duration: float,
is_ex: bool = False,
) -> SimaiChart:
"""Adds a hold note to the list of notes.
Args:
measure: Time when the note starts, in terms of measures.
position: Button where the hold note happens.
duration: Total time duration of the hold note.
is_ex: Whether a hold note is an ex note.
Examples:
Add a regular hold note at button 2 at measure 1, with
duration of 5 measures. And an ex hold note at button
6 at measure 3, with duration of 0.5 measures.
>>> simai = SimaiChart()
>>> simai.add_hold(1, 2, 5)
>>> simai.add_hold(3, 6, 0.5, is_ex=True)
"""
hold_note = HoldNote(measure, position, duration, is_ex)
self.notes.append(hold_note)
return self
def del_hold(self, measure: float, position: int) -> SimaiChart:
"""Deletes the matching hold note in the list of notes. If there are multiple
matches, all matching notes are deleted. If there are no match, nothing happens.
Args:
measure: Time when the note starts, in terms of measures.
position: Button where the hold note happens.
Examples:
Add a regular hold note at button 0 at measure 3.25 with duration of 2 measures
and delete it.
>>> simai = SimaiChart()
>>> simai.add_hold(3.25, 0, 2)
>>> simai.del_hold(3.25, 0)
"""
hold_notes = [
x
for x in self.notes
if isinstance(x, HoldNote)
and math.isclose(x.measure, measure, abs_tol=0.0001)
and x.position == position
]
for note in hold_notes:
self.notes.remove(note)
return self
def add_slide(
self,
measure: float,
start_position: int,
end_position: int,
duration: float,
pattern: str,
delay: float = 0.25,
reflect_position: Optional[int] = None,
) -> SimaiChart:
"""Adds both a slide note to the list of notes.
Args:
measure: Time when the slide starts, in
terms of measures.
start_position: Button where the slide starts.
end_position: Button where the slide ends.
duration: Total duration of the slide, in terms of
measures. Includes slide delay.
pattern: The one or two character slide pattern used.
delay: Duration from when the slide appears and when it
starts to move, in terms of measures. Defaults to 0.25.
reflect_position: The button where the 'V' slide will first go to.
Optional, defaults to None.
Examples:
Add a '-' slide at measure 2.25 from button 1 to button 5 with
duration of 1.5 measures
>>> simai = SimaiChart()
>>> simai.add_slide(2.25, 1, 5, 1.5, "-")
>>> simai.add_slide(3, 2, 7, 0.5, "V", reflect_position=4)
"""
slide_note = SlideNote(
measure,
start_position,
end_position,
duration,
pattern,
delay,
reflect_position,
)
self.notes.append(slide_note)
return self
def del_slide(
self,
measure: float,
start_position: int,
end_position: int,
) -> SimaiChart:
slide_notes = [
x
for x in self.notes
if isinstance(x, SlideNote)
and math.isclose(x.measure, measure, abs_tol=0.0001)
and x.position == start_position
and x.end_position == end_position
]
for note in slide_notes:
self.notes.remove(note)
return self
def add_touch_tap(
self,
measure: float,
position: int,
region: str,
is_firework: bool = False,
) -> SimaiChart:
touch_tap_note = TouchTapNote(measure, position, region, is_firework)
self.notes.append(touch_tap_note)
return self
def del_touch_tap(
self,
measure: float,
position: int,
region: str,
) -> SimaiChart:
touch_taps = [
x
for x in self.notes
if isinstance(x, TouchTapNote)
and math.isclose(x.measure, measure, abs_tol=0.0001)
and x.position == position
and x.region == region
]
for note in touch_taps:
self.notes.remove(note)
return self
def add_touch_hold(
self,
measure: float,
position: int,
region: str,
duration: float,
is_firework: bool = False,
) -> SimaiChart:
touch_hold_note = TouchHoldNote(
measure, position, region, duration, is_firework
)
self.notes.append(touch_hold_note)
return self
def del_touch_hold(
self,
measure: float,
position: int,
region: str,
) -> SimaiChart:
touch_holds = [
x
for x in self.notes
if isinstance(x, TouchHoldNote)
and math.isclose(x.measure, measure, abs_tol=0.0001)
and x.position == position
and x.region == region
]
for note in touch_holds:
self.notes.remove(note)
return self
def set_bpm(self, measure: float, bpm: float) -> SimaiChart:
"""Sets the bpm at given measure.
Note:
If BPM event is already defined at given measure,
the method will overwrite it.
Args:
measure: Time, in measures, where the bpm is defined.
bpm: The tempo in beat per minutes.
Examples:
In a chart, the initial bpm is 180 then changes
to 250 in measure 12.
>>> simai = SimaiChart()
>>> simai.set_bpm(0, 180)
>>> simai.set_bpm(12, 250)
"""
self.del_bpm(measure)
bpm_event = BPM(measure, bpm)
self.bpms.append(bpm_event)
return self
def get_bpm(self, measure: float) -> float:
"""Gets the bpm at given measure.
Args:
measure: Time, in measures.
Returns:
Returns the bpm defined at given measure or None.
Raises:
ValueError: When measure is negative, there are no BPMs
defined, or there are no starting BPM defined.
Examples:
In a chart, the initial bpm is 180 then changes
to 250 in measure 12.
>>> simai = SimaiChart()
>>> simai.get_bpm(0)
180.0
>>> simai.get_bpm(11.99)
180.0
>>> simai.get_bpm(12)
250.0
"""
if len(self.bpms) == 0:
raise ValueError("No BPMs defined")
if not any([0.0 <= x.measure <= 1.0 for x in self.bpms]):
raise ValueError("No starting BPM defined")
self.bpms.sort(key=lambda x: x.measure)
previous_bpm = self.bpms[0].bpm
for bpm in self.bpms:
if math.isclose(measure, bpm.measure, abs_tol=0.0001):
return bpm.bpm
if bpm.measure > measure:
break
previous_bpm = bpm.bpm
return previous_bpm
def del_bpm(self, measure: float) -> SimaiChart:
"""Deletes the bpm at given measure.
Note:
If there are no BPM defined for that measure, nothing happens.
Args:
measure: Time, in measures, where the bpm is defined.
Examples:
Delete the BPM change defined at measure 24.
>>> simai = SimaiChart()
>>> simai.del_bpm(24)
"""
bpms = [
x for x in self.bpms if math.isclose(x.measure, measure, abs_tol=0.0001)
]
for x in bpms:
self.bpms.remove(x)
return self
def offset(self, offset: Union[float, str]) -> SimaiChart:
offset = offset_arg_to_measure(offset, self.second_to_measure)
for note in self.notes:
note.measure = round(note.measure + offset, 4)
for bpm in self.bpms:
if 0 <= bpm.measure <= 1:
continue
bpm.measure = round(bpm.measure + offset, 4)
return self
def measure_to_second(self, measure: float) -> float:
bpms = [(bpm.measure, bpm.bpm) for bpm in self.bpms]
return measure_to_second(measure, bpms)
def second_to_measure(self, seconds: float) -> float:
bpms = [(bpm.measure, bpm.bpm) for bpm in self.bpms]
measure = second_to_measure(seconds, bpms)
return measure
def export(self, max_den: int = 1000) -> str:
# TODO: Rewrite this
measures = [event.measure for event in self.notes + self.bpms]
measures += [int(i) for i in measures]
measures.append(0.0)
measures = list(set(measures))
last_whole_measure = max([int(measure) for measure in measures])
measures.sort()
# whole_divisors contains divisors that fit perfectly all notes in one measure.
# It either contains an integer or None.
whole_divisors: List[Union[int, None]] = []
for whole_measure in range(last_whole_measure + 1):
note_measures = [
note_measure
for note_measure in measures
if int(note_measure) == whole_measure
]
whole_divisors.append(get_measure_divisor(note_measures))
# last_measure takes into account slide and hold notes' end measure
last_measure = 0.0
# measure_tick is our time-tracking variable. Used to know what measure
# are we in-between rests ","
measure_tick = 0.0
# previous_divisor is used for comparing to current_divisor
# to know if we should add a "{}" indicator
previous_divisor: Optional[int] = None
# previous_measure_int is used for comparing to current measure.
# If we are in a new whole measure, add a new line and add the divisor.
previous_measure_int = 0
# Our resulting chart in text form. Assuming that string fits in memory
result = ""
for (i, current_measure) in enumerate(measures):
bpm = [bpm for bpm in self.bpms if bpm.measure == current_measure]
notes = [note for note in self.notes if note.measure == current_measure]
hold_slides = [
note
for note in notes
if note.note_type
in [
NoteType.hold,
NoteType.ex_hold,
NoteType.touch_hold,
NoteType.complete_slide,
]
]
for hold_slide in hold_slides:
# Get hold and slide end measure and compare with last_measure
if hold_slide.note_type == NoteType.complete_slide:
last_measure = max(
current_measure + hold_slide.delay + hold_slide.duration,
last_measure,
)
else:
last_measure = max(
current_measure + hold_slide.duration, last_measure
)
whole_divisor = whole_divisors[int(current_measure)]
if i == len(measures) - 1:
# We are at the end so let's check if there are any
# active holds or slides
if last_measure > current_measure:
(whole, current_divisor, rest_amount) = get_rest(
current_measure,
last_measure,
current_divisor=(
previous_divisor if whole_divisor is None else whole_divisor
),
max_den=max_den,
)
else:
# Nothing to do
current_divisor = (
previous_divisor if whole_divisor is None else whole_divisor
)
whole, rest_amount = 0, 0
else:
# Why doesn't Python have a safe list 'get' method
next_measure: Optional[float] = (
measures[i + 1] if i + 1 < len(measures) else None
)
after_next_measure: Optional[float] = (
measures[i + 2] if i + 2 < len(measures) else None
)
(whole, current_divisor, rest_amount) = get_rest(
current_measure,
next_measure,
after_next_measure=after_next_measure,
current_divisor=(
previous_divisor if whole_divisor is None else whole_divisor
),
max_den=max_den,
)
if (
previous_divisor != current_divisor
or int(measure_tick) > previous_measure_int
):
result += "\n"
result += convert_to_fragment(
notes + bpm,
self.get_bpm(current_measure + 1),
current_divisor,
max_den=max_den,
)
previous_divisor = current_divisor
previous_measure_int = int(measure_tick)
else:
result += convert_to_fragment(
notes + bpm, self.get_bpm(current_measure + 1), max_den=max_den
)
measure_tick = current_measure
for _ in range(rest_amount):
result += ","
measure_tick += 1 / current_divisor
if whole > 0:
if current_divisor != 1:
result += "{1}"
previous_divisor = 1
for _ in range(whole):
result += ","
measure_tick += 1
measure_tick = round(measure_tick * 10000) / 10000
result += ",\nE\n"
return result
def parse_file_str(
file: str, lark_file: str = "simai.lark"
) -> Tuple[str, List[Tuple[int, SimaiChart]]]:
parser = Lark.open(lark_file, rel_to=__file__, parser="lalr")
dicts: List[dict] = SimaiTransformer().transform(parser.parse(file))
title = ""
charts: List[Tuple[int, SimaiChart]] = []
for element in dicts:
if element["type"] == "title":
title: str = element["value"]
elif element["type"] == "chart":
num, chart = element["value"]
simai_chart = SimaiChart.from_str(chart, message=f"Parsing chart #{num}...")
charts.append((num, simai_chart))
return title, charts
def parse_file(
path: str,
encoding: str = "UTF-8",
lark_file: str = "simai.lark",
) -> Tuple[str, List[Tuple[int, SimaiChart]]]:
with open(path, encoding=encoding) as f:
simai = f.read()
print(f"Parsing Simai file at {path}")
try:
result = parse_file_str(simai, lark_file=lark_file)
except:
print(f"Error parsing Simai file at {path}")
raise
else:
print(f"Done parsing Simai file at {path}")
return result
| 33.962387 | 111 | 0.512011 | 2,903 | 26,185 | 4.477093 | 0.118843 | 0.022313 | 0.017235 | 0.01308 | 0.418096 | 0.352466 | 0.310072 | 0.278757 | 0.269755 | 0.221743 | 0 | 0.01782 | 0.410655 | 26,185 | 770 | 112 | 34.006494 | 0.824391 | 0.2367 | 0 | 0.398357 | 0 | 0 | 0.029688 | 0 | 0 | 0 | 0 | 0.002597 | 0 | 1 | 0.045175 | false | 0.002053 | 0.01848 | 0 | 0.110883 | 0.014374 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
579551794296fb2f74daf8b0bf94f211a131c99c | 5,504 | py | Python | zipline/data/loader.py | hebpmo/zipline | 396469b29e7e0daea4fe1e8a1c18f6c7eeb92780 | [
"Apache-2.0"
] | 4 | 2018-11-17T20:04:53.000Z | 2021-12-10T14:47:30.000Z | zipline/data/loader.py | t330883522/zipline | 396469b29e7e0daea4fe1e8a1c18f6c7eeb92780 | [
"Apache-2.0"
] | null | null | null | zipline/data/loader.py | t330883522/zipline | 396469b29e7e0daea4fe1e8a1c18f6c7eeb92780 | [
"Apache-2.0"
] | 3 | 2018-11-17T20:04:50.000Z | 2020-03-01T11:11:41.000Z | #
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
"""
该模块原设计是从网络读取数据,存储在本地文件。
现更改为直接从本地sql数据库中读取,去掉相应辅助函数。
"""
import os
import logbook
import pandas as pd
# # 读取本地指数作为基准收益率
#from .benchmarks import get_benchmark_returns
from .benchmarks_cn import get_benchmark_returns
# #
from . import treasuries, treasuries_can, treasuries_cn
from zipline.utils.calendars import get_calendar
logger = logbook.Logger('Loader')
# 所有指数均对应treasuries_cn
# # Mapping from index symbol to appropriate bond data
INDEX_MAPPING = {
'000300':
(treasuries_cn,'treasury_curves_cn.csv',''),
'SPY':
(treasuries, 'treasury_curves.csv', 'www.federalreserve.gov'),
'^GSPTSE':
(treasuries_can, 'treasury_curves_can.csv', 'bankofcanada.ca'),
'^FTSE': # use US treasuries until UK bonds implemented
(treasuries, 'treasury_curves.csv', 'www.federalreserve.gov'),
}
# 测试使用
def get_benchmark_filename(symbol):
return "%s_benchmark.csv" % symbol
def load_market_data(trading_day=None, trading_days=None, bm_symbol='000300',
environ=None):
"""
Load benchmark returns and treasury yield curves for the given calendar and
benchmark symbol.
Benchmarks are downloaded as a Series from Google Finance. Treasury curves
are US Treasury Bond rates and are downloaded from 'www.federalreserve.gov'
by default. For Canadian exchanges, a loader for Canadian bonds from the
Bank of Canada is also available.
Results downloaded from the internet are cached in
~/.zipline/data. Subsequent loads will attempt to read from the cached
files before falling back to redownload.
Parameters
----------
trading_day : pandas.CustomBusinessDay, optional
A trading_day used to determine the latest day for which we
expect to have data. Defaults to an NYSE trading day.
trading_days : pd.DatetimeIndex, optional
A calendar of trading days. Also used for determining what cached
dates we should expect to have cached. Defaults to the NYSE calendar.
bm_symbol : str, optional
Symbol for the benchmark index to load. Defaults to 'SPY', the Google
ticker for the S&P 500.
Returns
-------
(benchmark_returns, treasury_curves) : (pd.Series, pd.DataFrame)
Notes
-----
Both return values are DatetimeIndexed with values dated to midnight in UTC
of each stored date. The columns of `treasury_curves` are:
'1month', '3month', '6month',
'1year','2year','3year','5year','7year','10year','20year','30year'
"""
calendar = get_calendar('SZSH')
#if trading_day is None:
# trading_day = get_calendar().trading_day
#if trading_days is None:
# trading_days = get_calendar().all_sessions
if trading_day is None:
# 更改为calendar.day
trading_day = calendar.day
if trading_days is None:
trading_days = calendar.all_sessions
first_date = trading_days[0]
now = pd.Timestamp.utcnow()
# We expect to have benchmark and treasury data that's current up until
# **two** full trading days prior to the most recently completed trading
# day.
# Example:
# On Thu Oct 22 2015, the previous completed trading day is Wed Oct 21.
# However, data for Oct 21 doesn't become available until the early morning
# hours of Oct 22. This means that there are times on the 22nd at which we
# cannot reasonably expect to have data for the 21st available. To be
# conservative, we instead expect that at any time on the 22nd, we can
# download data for Tuesday the 20th, which is two full trading days prior
# to the date on which we're running a test.
# We'll attempt to download new data if the latest entry in our cache is
# before this date.
#last_date = trading_days[trading_days.get_loc(now, method='ffill') - 2]
# # 根据实际情况调整偏移天数
local_now = pd.Timestamp('now')
offset = 1
refresh_time = local_now.normalize().replace(hour=18)
actual_end = calendar.actual_last_session
if local_now.date() > actual_end.date():
offset = 0
elif local_now > refresh_time:
offset = 0
last_date = trading_days[trading_days.get_loc(now, method='ffill') - offset]
br = get_benchmark_returns(bm_symbol, first_date, last_date)
tc = treasuries_cn.get_treasury_data(first_date, last_date)
# combine dt indices and reindex using ffill then bfill
all_dt = br.index.union(tc.index)
br = br.reindex(all_dt, method='ffill').fillna(method='bfill')
tc = tc.reindex(all_dt, method='ffill').fillna(method='bfill')
benchmark_returns = br[br.index.slice_indexer(first_date, last_date)]
treasury_curves = tc[tc.index.slice_indexer(first_date, last_date)]
return benchmark_returns, treasury_curves
def load_prices_from_csv(filepath, identifier_col, tz='UTC'):
data = pd.read_csv(filepath, index_col=identifier_col)
data.index = pd.DatetimeIndex(data.index, tz=tz)
data.sort_index(inplace=True)
return data
def load_prices_from_csv_folder(folderpath, identifier_col, tz='UTC'):
data = None
for file in os.listdir(folderpath):
if '.csv' not in file:
continue
raw = load_prices_from_csv(os.path.join(folderpath, file),
identifier_col, tz)
if data is None:
data = raw
else:
data = pd.concat([data, raw], axis=1)
return data
| 35.057325 | 80 | 0.695131 | 760 | 5,504 | 4.893421 | 0.35 | 0.041409 | 0.016133 | 0.018284 | 0.172627 | 0.124765 | 0.124765 | 0.066147 | 0.026889 | 0.026889 | 0 | 0.014617 | 0.216933 | 5,504 | 156 | 81 | 35.282051 | 0.84826 | 0.490916 | 0 | 0.096774 | 0 | 0 | 0.088526 | 0.033815 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064516 | false | 0 | 0.096774 | 0.016129 | 0.225806 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
57963381f1dc1d4f4dd6d34752d9c532853f4edc | 4,333 | py | Python | invenio_communities/invitations/services/permissions.py | max-moser/invenio-communities | cfdd6a8b36bf6856db4fd55123c832700eb32376 | [
"MIT"
] | null | null | null | invenio_communities/invitations/services/permissions.py | max-moser/invenio-communities | cfdd6a8b36bf6856db4fd55123c832700eb32376 | [
"MIT"
] | null | null | null | invenio_communities/invitations/services/permissions.py | max-moser/invenio-communities | cfdd6a8b36bf6856db4fd55123c832700eb32376 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2022 Northwestern University.
#
# Invenio-Communities is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""Invitation request type permission policy."""
from elasticsearch_dsl.query import Q
from flask_principal import UserNeed
from invenio_records_permissions.generators import Generator, SystemProcess
from invenio_requests.customizations.base import BaseRequestPermissionPolicy
from ...permissions import CommunityOwner, CommunityManager, \
CommunityRoleManager, create_community_role_need
from .common import REQUEST_TYPE_ID
# Invitation Generators
class InvitationCommunityOwner(Generator):
"""Allows owner of the community of the invitation."""
def needs(self, request=None, **kwargs):
"""Enabling Needs.
:param record: an invitation
"""
invitation = request
# Symptom of how api.py objects are falsey if dict empty
if invitation is None:
return []
# Topic contains the community
community_uuid = invitation["topic"]["community"]
return [create_community_role_need(community_uuid, "owner")]
def query_filter(self, identity=None, **kwargs):
"""Filters for current identity as owner."""
community_uuids = [
CommunityRoleManager.from_need(n).community_uuid
for n in identity.provides
if (
n.method == "role" and
CommunityRoleManager.check_string(n.value) and
CommunityRoleManager.from_need(n).role == "owner"
)
]
return (
Q("term", type=REQUEST_TYPE_ID) &
Q("terms", **{"topic.community": community_uuids})
)
class InvitationCommunityManager(Generator):
"""Allows manager of the community of the invitation."""
def needs(self, request=None, **kwargs):
"""Enabling Needs.
:param request: an invitation
"""
invitation = request
# Symptom of how api.py objects are falsey if dict empty
if invitation is None:
return []
community_uuid = invitation["topic"]["community"]
return [create_community_role_need(community_uuid, "manager")]
def query_filter(self, identity=None, **kwargs):
"""Filters for current identity as owner."""
community_uuids = [
CommunityRoleManager.from_need(n).community_uuid
for n in identity.provides
if (
CommunityRoleManager.check_need(n) and
CommunityRoleManager.from_need(n).role == "manager"
)
]
return (
Q("term", type=REQUEST_TYPE_ID) &
Q("terms", **{"topic.community": community_uuids})
)
class InvitationInvitee(Generator):
"""Allows invited entity of the invitation."""
def needs(self, request=None, **kwargs):
"""Enabling Needs.
:param request: an invitation
"""
invitation = request
# Symptom of how api.py objects are falsey if dict empty
if invitation is None:
return []
receiver = invitation["receiver"]
# TODO: add group
user_id = int(receiver.get("user"))
return [UserNeed(user_id)]
class InvitationPermissionPolicy(BaseRequestPermissionPolicy):
"""Invitation permission policy."""
# Passed record is a community
can_create = [
CommunityOwner(), CommunityManager(), SystemProcess()
]
# Passed record is an invitation
can_read = [
InvitationCommunityOwner(), InvitationCommunityManager(),
InvitationInvitee(), SystemProcess()
]
can_update = [
InvitationCommunityOwner(), InvitationCommunityManager(),
SystemProcess()
]
can_action_cancel = [
InvitationCommunityOwner(), InvitationCommunityManager(),
SystemProcess()
]
can_action_accept = [InvitationInvitee(), SystemProcess()]
can_action_decline = [InvitationInvitee(), SystemProcess()]
# Comments (passed record is a request event)
# TODO
# can_comment = [
# InvitationCommunityOwner(), InvitationCommunityManager(),
# InvitationInvitee(), SystemProcess()
# ]
| 30.730496 | 77 | 0.641588 | 419 | 4,333 | 6.515513 | 0.305489 | 0.010989 | 0.041026 | 0.042491 | 0.484615 | 0.431868 | 0.405495 | 0.405495 | 0.405495 | 0.405495 | 0 | 0.001566 | 0.263097 | 4,333 | 140 | 78 | 30.95 | 0.853429 | 0.256174 | 0 | 0.472973 | 0 | 0 | 0.037395 | 0 | 0 | 0 | 0 | 0.007143 | 0 | 1 | 0.067568 | false | 0 | 0.081081 | 0 | 0.391892 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
579682cdad3d82b199ae188f5472ed06803934c3 | 2,072 | py | Python | sims/cb/cytofpy/attempt1/test.py | luiarthur/cytof5 | 6b4df5e9fd94bfd586e96579b8c618fdf6f913ed | [
"MIT"
] | 1 | 2020-01-30T21:56:52.000Z | 2020-01-30T21:56:52.000Z | sims/cb/cytofpy/attempt3/test.py | luiarthur/cytof5 | 6b4df5e9fd94bfd586e96579b8c618fdf6f913ed | [
"MIT"
] | 27 | 2018-12-20T18:22:25.000Z | 2021-02-24T03:13:32.000Z | sims/cb/cytofpy/attempt3/test.py | luiarthur/cytof5 | 6b4df5e9fd94bfd586e96579b8c618fdf6f913ed | [
"MIT"
] | null | null | null | import torch
from torch.distributions import Normal
from Cytof import Cytof
from simdata import simdata
def mixLz(i, n, j, etaz, muz, sig, y):
Lz = muz.size(2)
assert(Lz == etaz.size(2))
out = torch.empty(Lz)
for l in range(Lz):
out[l] = torch.log(etaz[i, j, l, 0]) + Normal(muz[0, 0, l, 0], sig[i]).log_prob(y[i][n, j, 0, 0])
return out.logsumexp(0)
def mixJ(i, n, k, eta0, eta1, mu0, mu1, sig, Z, y):
J = y[0].size(1)
out = 0.0
for j in range(J):
zjk = Z[0, j, k]
etaz = eta1 if zjk == 1 else eta0
muz = mu1.cumsum(2) if zjk == 1 else -mu0.cumsum(2)
out += mixLz(i, n, j, etaz, muz, sig, y)
return out
def mixK(i, n, eta0, eta1, mu0, mu1, sig, Z, W, y):
K = Z.size(2)
out = torch.empty(K)
for k in range(K):
out[k] = W[i, k].log() + mixJ(i, n, k, eta0, eta1, mu0, mu1, sig, Z, y)
return out.logsumexp(0)
def naive_loglike(params, data, K, L, N):
I = len(N)
J = data['y'][0].size(1)
y = data['y']
Z = params['Z']
W = params['W']
eta0 = params['eta0']
eta1 = params['eta1']
mu0 = params['mu0']
mu1 = params['mu1']
sig = params['sig']
Nsum = sum(N)
ll = 0.0
for i in range(I):
for n in range(N[i]):
ll += mixK(i, n, eta0, eta1, mu0, mu1, sig, Z, W, y) / Nsum
return ll
if __name__ == '__main__':
print('testing to see if I am broadcasting correctly...')
torch.manual_seed(0)
L0 = 3
L1 = 4
N = [100, 50, 80]
J = 8
K = 4
L = [L0, L1]
eps = 1e-6
data = simdata(N=N, L0=L0, L1=L1, J=J, a_W=[2., 3., 4., 3.])
model = Cytof(data=data['data'], K=K, L=L)
vp = model.init_vp()
real_params = model.sample_real_params(vp)
params = model.to_param_space(real_params)
ll_model = model.loglike(real_params, data['data'])
ll_naive = naive_loglike(params, data['data'], K, L, N)
diff = ll_model - ll_naive
print('ll_model: {} | ll_naive: {} | diff: {}'.format(ll_model, ll_naive, diff))
assert(abs(diff / ll_model) < eps)
| 26.227848 | 105 | 0.545367 | 367 | 2,072 | 2.997275 | 0.234332 | 0.012727 | 0.04 | 0.050909 | 0.234545 | 0.129091 | 0.129091 | 0.129091 | 0.094545 | 0.094545 | 0 | 0.051965 | 0.275579 | 2,072 | 78 | 106 | 26.564103 | 0.680879 | 0 | 0 | 0.031746 | 0 | 0 | 0.061293 | 0 | 0 | 0 | 0 | 0 | 0.031746 | 1 | 0.063492 | false | 0 | 0.063492 | 0 | 0.190476 | 0.031746 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
57985d4c3986c9219d0352042ff6e9f1c6d559ec | 1,347 | py | Python | dalib/translation/cyclegan/transform.py | ashok-arjun/Transfer-Learning-Library | e13bb3e37bc7ca74a382cfdc85a640deaf0333b3 | [
"MIT"
] | 20 | 2021-10-22T01:31:39.000Z | 2022-03-11T10:00:36.000Z | dalib/translation/cyclegan/transform.py | ashok-arjun/Transfer-Learning-Library | e13bb3e37bc7ca74a382cfdc85a640deaf0333b3 | [
"MIT"
] | 2 | 2021-11-11T02:31:39.000Z | 2022-02-23T12:11:22.000Z | dalib/translation/cyclegan/transform.py | ashok-arjun/Transfer-Learning-Library | e13bb3e37bc7ca74a382cfdc85a640deaf0333b3 | [
"MIT"
] | 2 | 2021-11-15T05:30:01.000Z | 2021-12-19T23:18:30.000Z | import torch
import torch.nn as nn
import torchvision.transforms as T
from common.vision.transforms import Denormalize
class Translation(nn.Module):
"""
Image Translation Transform Module
Args:
generator (torch.nn.Module): An image generator, e.g. :meth:`~dalib.translation.cyclegan.resnet_9_generator`
device (torch.device): device to put the generator. Default: 'cpu'
mean (tuple): the normalized mean for image
std (tuple): the normalized std for image
Input:
- image (PIL.Image): raw image in shape H x W x C
Output:
raw image in shape H x W x 3
"""
def __init__(self, generator, device=torch.device("cpu"), mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)):
super(Translation, self).__init__()
self.generator = generator.to(device)
self.device = device
self.pre_process = T.Compose([
T.ToTensor(),
T.Normalize(mean, std)
])
self.post_process = T.Compose([
Denormalize(mean, std),
T.ToPILImage()
])
def forward(self, image):
image = self.pre_process(image.copy()) # C x H x W
image = image.to(self.device)
generated_image = self.generator(image.unsqueeze(dim=0)).squeeze(dim=0).cpu()
return self.post_process(generated_image)
| 32.071429 | 116 | 0.621381 | 183 | 1,347 | 4.486339 | 0.371585 | 0.014616 | 0.014616 | 0.019488 | 0.060901 | 0.060901 | 0.046285 | 0.046285 | 0 | 0 | 0 | 0.01608 | 0.261321 | 1,347 | 41 | 117 | 32.853659 | 0.809045 | 0.321455 | 0 | 0.090909 | 0 | 0 | 0.003484 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.181818 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
579aa6226b9cc748668794f8d6319fc5e7ee7817 | 3,188 | py | Python | day07/python/thomg/config_amps.py | clssn/aoc-2019 | a978e5235855be937e60a1e7f88d1ef9b541be15 | [
"MIT"
] | 22 | 2019-11-27T08:28:46.000Z | 2021-04-27T05:37:08.000Z | day07/python/thomg/config_amps.py | clssn/aoc-2019 | a978e5235855be937e60a1e7f88d1ef9b541be15 | [
"MIT"
] | 77 | 2019-11-16T17:22:42.000Z | 2021-05-10T20:36:36.000Z | day07/python/thomg/config_amps.py | clssn/aoc-2019 | a978e5235855be937e60a1e7f88d1ef9b541be15 | [
"MIT"
] | 43 | 2019-11-27T06:36:51.000Z | 2021-11-03T20:56:48.000Z | import intcomp
def quinaryRepAsList(integer):
quinary = []
if(integer >= 3125):
print("error in quinary representation: only numbers up to 3124 are supported (you tried "+str(integer)+")")
# 5^4
digit = int(integer/625)
quinary.append(digit)
integer -= digit*625
# 5^3
digit = int(integer/125)
quinary.append(digit)
integer -= digit*125
# 5^2
digit = int(integer/25)
quinary.append(digit)
integer -= digit*25
# 5^1
digit = int(integer/5)
quinary.append(digit)
integer -= digit*5
# 5^0 (should just be the rest)
quinary.append(integer)
return quinary
maxOutput = 0
for n in range(3125):
quinary = quinaryRepAsList(n)
# skip if one phase setting would be used multiple times:
if len(quinary) > len(set(quinary)):
continue
inputA = [quinary[0], 0]
outputA = intcomp.executeProgramFromFile("program", inputA, 1, 0)[0]
inputB = [quinary[1], outputA[0]]
outputB = intcomp.executeProgramFromFile("program", inputB, 1, 0)[0]
inputC = [quinary[2], outputB[0]]
outputC = intcomp.executeProgramFromFile("program", inputC, 1, 0)[0]
inputD = [quinary[3], outputC[0]]
outputD = intcomp.executeProgramFromFile("program", inputD, 1, 0)[0]
inputE = [quinary[4], outputD[0]]
outputE = intcomp.executeProgramFromFile("program", inputE, 1, 0)[0]
if maxOutput < outputE[0]:
maxOutput = outputE[0]
print("output for star 1: "+str(maxOutput))
maxOutput = 0
for n in range(3125):
quinary = quinaryRepAsList(n)
# skip if one phase setting would be used multiple times:
if len(quinary) > len(set(quinary)):
continue
quinary = [d+5 for d in quinary]
# initialize all programs with their phase settings
inputA = [quinary[0], 0]
(outputA, programA, pointerA) = intcomp.executeProgramFromFile("program", inputA, 0, 1)
inputB = [quinary[1], outputA[0]]
(outputB, programB, pointerB) = intcomp.executeProgramFromFile("program", inputB, 0, 1)
inputC = [quinary[2], outputB[0]]
(outputC, programC, pointerC) = intcomp.executeProgramFromFile("program", inputC, 0, 1)
inputD = [quinary[3], outputC[0]]
(outputD, programD, pointerD) = intcomp.executeProgramFromFile("program", inputD, 0, 1)
inputE = [quinary[4], outputD[0]]
(outputE, programE, pointerE) = intcomp.executeProgramFromFile("program", inputE, 0, 1)
while len(outputE) < 2:
inputA = [outputE[0]]
(outputA, programA, pointerA) = intcomp.executeProgram(programA, pointerA, inputA, 0, 1)
inputB = [outputA[0]]
(outputB, programB, pointerB) = intcomp.executeProgram(programB, pointerB, inputB, 0, 1)
inputC = [outputB[0]]
(outputC, programB, pointerC) = intcomp.executeProgram(programC, pointerC, inputC, 0, 1)
inputD = [outputC[0]]
(outputD, programC, pointerD) = intcomp.executeProgram(programD, pointerD, inputD, 0, 1)
inputE = [outputD[0]]
(outputE, programD, pointerE) = intcomp.executeProgram(programE, pointerE, inputE, 0, 1)
if maxOutput < outputE[0]:
maxOutput = outputE[0]
print("output for star 2: "+str(maxOutput))
| 39.358025 | 116 | 0.65276 | 383 | 3,188 | 5.43342 | 0.229765 | 0.139356 | 0.172994 | 0.048054 | 0.41951 | 0.314753 | 0.173955 | 0.173955 | 0.173955 | 0.173955 | 0 | 0.045346 | 0.211418 | 3,188 | 80 | 117 | 39.85 | 0.782418 | 0.064931 | 0 | 0.41791 | 0 | 0 | 0.064266 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014925 | false | 0 | 0.014925 | 0 | 0.044776 | 0.044776 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
579eb36fcae30024d36d774ac693fb5ed1c3e24a | 24,443 | py | Python | model.py | smearle/pytorch-a2c-ppo-acktr-micropolis | 699a6f6e65e8bab5533074945cc9aa7827919a59 | [
"MIT"
] | 3 | 2020-07-13T08:44:36.000Z | 2022-03-18T01:17:59.000Z | model.py | smearle/pytorch-baselines-micropolis | 699a6f6e65e8bab5533074945cc9aa7827919a59 | [
"MIT"
] | null | null | null | model.py | smearle/pytorch-baselines-micropolis | 699a6f6e65e8bab5533074945cc9aa7827919a59 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from distributions import Categorical2D
from utils import init, init_normc_
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class Policy(nn.Module):
def __init__(self, obs_shape, action_space, base_kwargs=None, curiosity=False, algo='A2C', model='MicropolisBase', args=None):
super(Policy, self).__init__()
self.curiosity = curiosity
self.args = args
if base_kwargs is None:
base_kwargs = {}
if len(obs_shape) == 3:
if curiosity:
self.base = MicropolisBase_ICM(obs_shape[0], **base_kwargs)
elif args.model == 'squeeze':
self.base = MicropolisBase(obs_shape[0], **base_kwargs, map_width=args.map_width)
else:
self.base = MicropolisBase_fixedmap(obs_shape[0], **base_kwargs, map_width=args.map_width)
elif len(obs_shape) == 1:
self.base = MLPBase(obs_shape[0], **base_kwargs)
else:
raise NotImplementedError
if action_space.__class__.__name__ == "Discrete":
if True:
num_outputs = action_space.n
self.dist = Categorical2D(self.base.output_size, num_outputs)
else:
num_outputs = action_space.n
self.dist = Categorical2D(self.base.output_size, num_outputs)
elif action_space.__class__.__name__ == "Box":
num_outputs = action_space.shape[0]
if self.args.env_name == 'MicropolisPaintEnv-v0':
self.dist = None
else:
# self.dist = DiagGaussian(self.base.output_size, num_outputs)
self.dist = Categorical2D(self.base.output_size, num_outputs)
else:
raise NotImplementedError
@property
def is_recurrent(self):
return self.base.is_recurrent
@property
def recurrent_hidden_state_size(self):
"""Size of rnn_hx."""
return self.base.recurrent_hidden_state_size
def forward(self, inputs, rnn_hxs, masks):
raise NotImplementedError
def act(self, inputs, rnn_hxs, masks, deterministic=False,
player_act=None, icm_enabled=False):
''' assumes player actions can only occur on env rank 0'''
value, actor_features, rnn_hxs = self.base(inputs, rnn_hxs, masks)
action_bin = None
if 'paint' in self.args.env_name.lower():
dist = torch.distributions.binomial.Binomial(1, actor_features)
action = dist.sample()
action_log_probs = dist.log_prob(action)
else:
dist = self.dist(actor_features)
if player_act:
# force the model to sample the player-selected action
play_features = actor_features
play_features = play_features.view(actor_features.size(0), -1)
play_features.fill_(-99999)
play_features[:1, player_act] = 99999
play_features = play_features.view(actor_features.shape)
play_dist = self.dist(play_features)
action = play_dist.sample()
# backprop is sent through the original distribution
action_log_probs = dist.log_probs(action)
else:
if deterministic:
action = dist.mode()
else:
action = dist.sample()
action_log_probs = dist.log_probs(action)
if icm_enabled:
action_bin = torch.zeros(dist.probs.shape)
action_ixs = torch.Tensor(list(range(dist.probs.size(0)))).unsqueeze(1).long()
action_i = torch.cat((action_ixs.cuda(), action.cuda()), 1)
action_bin[action_i[:,0], action_i[:,1]] = 1
if torch.cuda.current_device() > 0:
action_bin = action_bin.cuda()
return value, action, action_log_probs, action_bin, rnn_hxs
def icm_act(self, inputs):
s1, pred_s1, pred_a = self.base(inputs, None, None, icm=True)
return s1, pred_s1, self.dist(pred_a).probs
def get_value(self, inputs, rnn_hxs, masks):
value, _, _ = self.base(inputs, rnn_hxs, masks)
return value
def evaluate_icm(self, inputs):
s1, pred_s1, pred_a = self.base(inputs, None, None, icm=True)
return s1, pred_s1, self.dist(pred_a).probs
def evaluate_actions(self, inputs, rnn_hxs, masks, action):
value, actor_features, rnn_hxs = self.base(inputs, rnn_hxs, masks)
if 'paint' in self.args.env_name.lower():
dist = torch.distributions.binomial.Binomial(1, actor_features)
action_log_probs = dist.log_prob(action)
dist_entropy = None
#dist_entropy = (dist.logits * dist.probs).mean()
else:
dist = self.dist(actor_features)
action_log_probs = dist.log_probs(action)
dist_entropy = dist.entropy().mean()
return value, action_log_probs, dist_entropy, rnn_hxs
class NNBase(nn.Module):
def __init__(self, recurrent, recurrent_input_size, hidden_size):
super(NNBase, self).__init__()
self._hidden_size = hidden_size
self._recurrent = recurrent
if recurrent:
self.gru = nn.GRUCell(recurrent_input_size, hidden_size)
nn.init.orthogonal_(self.gru.weight_ih.data)
nn.init.orthogonal_(self.gru.weight_hh.data)
self.gru.bias_ih.data.fill_(0)
self.gru.bias_hh.data.fill_(0)
@property
def is_recurrent(self):
return self._recurrent
@property
def recurrent_hidden_state_size(self):
if self._recurrent:
return self._hidden_size
return 1
@property
def output_size(self):
return self._hidden_size
def _forward_gru(self, x, hxs, masks):
if x.size(0) == hxs.size(0):
x = hxs = self.gru(x, hxs * masks)
else:
# x is a (T, N, -1) tensor that has been flatten to (T * N, -1)
N = hxs.size(0)
T = int(x.size(0) / N)
# unflatten
x = x.view(T, N, x.size(1))
# Same deal with masks
masks = masks.view(T, N, 1)
outputs = []
for i in range(T):
hx = hxs = self.gru(x[i], hxs * masks[i])
outputs.append(hx)
# assert len(outputs) == T
# x is a (T, N, -1) tensor
x = torch.stack(outputs, dim=0)
# flatten
x = x.view(T * N, -1)
return x, hxs
class MicropolisBase_fixedmap(NNBase):
def __init__(self, num_inputs, recurrent=False, hidden_size=512, map_width=20):
super(MicropolisBase_fixedmap, self).__init__(recurrent, hidden_size, hidden_size)
self.map_width = map_width
init_ = lambda m: init(m,
nn.init.dirac_,
lambda x: nn.init.constant_(x, 0.1),
nn.init.calculate_gain('relu'))
self.skip_compress = init_(nn.Conv2d(num_inputs, 15, 1, stride=1))
self.conv_0 = nn.Conv2d(num_inputs, 64, 1, 1, 0)
init_(self.conv_0)
self.conv_1 = nn.Conv2d(64, 64, 5, 1, 2)
init_(self.conv_1)
self.conv_2 = nn.Conv2d(64, 64, 3, 1, 1)
init_(self.conv_2)
self.critic_compress = init_(nn.Conv2d(79, 8, 1, 1, 1))
init_ = lambda m: init(m,
nn.init.dirac_,
lambda x: nn.init.constant_(x, 0))
self.actor_compress = init_(nn.Conv2d(79, 19, 3, 1, 1))
self.critic_conv_1 = init_(nn.Conv2d(8, 1, self.map_width, self.map_width, 0))
# self.critic_conv_2 = init_(nn.Conv2d(1, 1, 2, 1, 0)) # for 40x40 map
self.train()
def forward(self, inputs, rnn_hxs, masks):
x = inputs
x = F.relu(self.conv_0(x))
skip_input = F.relu(self.skip_compress(inputs))
x = F.relu(self.conv_1(x))
for i in range(self.map_width):
#print(self.conv_2.weight)
x = self.conv_2(x)
x = F.relu(x)
x = torch.cat((x, skip_input), 1)
values = F.relu(self.critic_compress(x))
values = self.critic_conv_1(values)
values = values.view(values.size(0), -1)
actions = self.actor_compress(x)
return values, actions, rnn_hxs
class MicropolisBase(NNBase):
def __init__(self, num_inputs, recurrent=False, hidden_size=512, map_width=20):
super(MicropolisBase, self).__init__(recurrent, hidden_size, hidden_size)
self.map_width = map_width
init_ = lambda m: init(m,
nn.init.dirac_,
lambda x: nn.init.constant_(x, 0.1),
nn.init.calculate_gain('relu'))
linit_ = lambda m: init(m,
nn.init.orthogonal_,
lambda x: nn.init.constant_(x, 0))
self.conv_00 = init_(nn.Conv2d(num_inputs, 64, 1, 1, 0))
self.conv_0 = init_(nn.Conv2d(64, 64, 3, 3, 0))
self.conv_1 = init_(nn.Conv2d(64, 64, 3, 1, 1))
#self.lin_0 = linit_(nn.Linear(1024, 1024))
self.val_cmprs = init_(nn.Conv2d(64, 64, 9, 1, 4))
self.val_conv_0 = init_(nn.Conv2d(64, 64, 3, 1, 1))
self.num_maps = 3 # how many different sizes
for i in range(self.num_maps):
setattr(self, 'upsample_{}'.format(i), nn.Upsample(size=(1+2*(i+1))))
self.act_convt = init_(nn.ConvTranspose2d(64 + 64, 64, 3, 3, 0))
init_ = lambda m: init(m,
nn.init.dirac_,
lambda x: nn.init.constant_(x, 0))
self.act_tomap = init_(nn.Conv2d(64, 19, 5, 1, 2))
self.val_conv = init_(nn.Conv2d(64, 1, 1, 1, 0))
self.train()
def forward(self, inputs, rnn_hxs, masks):
x = inputs
x = x_0 = F.relu(self.conv_00(x))
x_cmps = []
for i in range(self.num_maps):
x = F.relu(self.conv_0(x))
x_cmps += [x]
for i in range(1):
x = F.relu(self.conv_1(x))
cmprs_shape = x.shape
#x = x.view(x.size(0), -1)
#x = torch.tanh(self.lin_0(x))
#x = x.view(*cmprs_shape)
vals = F.relu(self.val_cmprs(x))
vals = F.relu(self.val_conv_0(vals))
vals = (self.val_conv(vals))
acts = x
for i in range(self.num_maps):
#upsample = getattr(self, 'upsample_{}'.format(i))
#acts = upsample(acts)
#acts = F.pad(acts, (1, 1, 1, 1))
x_0 = x_cmps[self.num_maps-1-i]
acts = torch.cat((acts, x_0), 1)
acts = F.relu(self.act_convt(acts))
acts = F.relu(self.act_tomap(acts))
return vals.view(vals.size(0), -1), acts, rnn_hxs
class MicropolisBase_ICM(MicropolisBase_fixedmap):
def __init__(self, num_inputs, recurrent=False, hidden_size=512):
super(MicropolisBase_ICM, self).__init__(num_inputs, recurrent, hidden_size)
### ICM feature encoder
init_ = lambda m: init(m,
nn.init.dirac_,
lambda x: nn.init.constant_(x, 0),
nn.init.calculate_gain('relu'))
num_skip_inputs=15
self.num_action_channels=19
self.icm_state_in = init_(nn.Conv2d(num_inputs, 64, 3, 1, 1))
self.icm_state_conv_0 = init_(nn.Conv2d(64, 64, 3, 1, 1))
self.icm_state_out = init_(nn.Conv2d(64, 64, 3, 1, 1))
self.icm_pred_a_in = init_(nn.Conv2d((num_inputs) * 2, 128, 3, 1, 1))
self.icm_pred_a_conv_0 = init_(nn.Conv2d(128, 128, 3, 1, 1))
self.icm_pred_s_in = init_(nn.Conv2d((num_inputs) + self.num_action_channels, 64, 1, 1, 0))
self.icm_pred_s_conv_0 = init_(nn.Conv2d(64, 64, 3, 1, 1))
self.icm_pred_s_conv_1 = init_(nn.Conv2d(64, 64, 3, 1, 1))
self.icm_pred_s_conv_2 = init_(nn.Conv2d(64, 64, 3, 1, 1))
self.icm_pred_s_conv_3 = init_(nn.Conv2d(64, 64, 3, 1, 1))
self.icm_pred_s_conv_4 = init_(nn.Conv2d(64, 64, 3, 1, 1))
self.icm_pred_s_conv_5 = init_(nn.Conv2d(64, 64, 3, 1, 1))
self.icm_pred_s_conv_6 = init_(nn.Conv2d(64, 64, 3, 1, 1))
self.icm_pred_s_conv_7 = init_(nn.Conv2d(64, 64, 3, 1, 1))
self.icm_pred_s_conv_8 = init_(nn.Conv2d(64, 64, 3, 1, 1))
self.icm_pred_s_conv_9 = init_(nn.Conv2d(64, 64, 3, 1, 1))
self.icm_pred_s_conv_10 = init_(nn.Conv2d(64, 64, 3, 1, 1))
#self.icm_skip_compress = init_(nn.Conv2d(num_inputs, 15, 1, stride=1))
init_ = lambda m: init(m,
nn.init.dirac_,
lambda x: nn.init.constant_(x, 0))
self.icm_pred_a_out = init_(nn.Conv2d(128, self.num_action_channels, 7, 1, 3))
self.icm_pred_s_out = init_(nn.Conv2d(64 + 64, num_inputs, 1, 1, 0))
self.train()
def forward(self, inputs, rnn_hxs, masks, icm=False):
if icm == False:
return super().forward(inputs, rnn_hxs, masks)
else:
# Encode state feature-maps
s0_in, s1_in, a1 = inputs
a1 = a1.view(a1.size(0), self.num_action_channels, 20, 20)
s0 = s0_in
# s0 = F.relu(self.icm_state_in(s0))
# for i in range(1):
# s0 = F.relu(self.icm_state_conv_0(s0))
# s0 = F.relu(self.icm_state_out(s0))
##s0_skip = F.relu(self.icm_skip_compress(s0))
s1 = s1_in
# s1 = F.relu(self.icm_state_in(s1))
# for i in range(1):
# s1 = F.relu(self.icm_state_conv_0(s1))
# s1 = F.relu(self.icm_state_out(s1))
##s1_skip = F.relu(self.icm_skip_compress(s1_in))
# Predict outcome state feature-map and action dist.
if True:
a1 = a1.cuda()
s0 = s0.cuda()
#print(a1.is_cuda, s0.is_cuda)
pred_s1 = pred_s1_0 = F.relu(self.icm_pred_s_in(torch.cat((s0, a1), 1)))
for i in range(2):
pred_s1 = F.relu(self.icm_pred_s_conv_0(pred_s1))
for i in range(2):
pred_s1 = F.relu(self.icm_pred_s_conv_1(pred_s1))
for i in range(2):
pred_s1 = F.relu(self.icm_pred_s_conv_2(pred_s1))
for i in range(2):
pred_s1 = F.relu(self.icm_pred_s_conv_3(pred_s1))
for i in range(2):
pred_s1 = F.relu(self.icm_pred_s_conv_4(pred_s1))
for i in range(2):
pred_s1 = F.relu(self.icm_pred_s_conv_5(pred_s1))
for i in range(2):
pred_s1 = F.relu(self.icm_pred_s_conv_6(pred_s1))
for i in range(2):
pred_s1 = F.relu(self.icm_pred_s_conv_7(pred_s1))
for i in range(2):
pred_s1 = F.relu(self.icm_pred_s_conv_8(pred_s1))
for i in range(2):
pred_s1 = F.relu(self.icm_pred_s_conv_9(pred_s1))
for i in range(2):
pred_s1 = F.relu(self.icm_pred_s_conv_10(pred_s1))
pred_s1 = torch.cat((pred_s1, pred_s1_0), 1)
pred_s1 = self.icm_pred_s_out(pred_s1)
pred_a = F.relu(self.icm_pred_a_in(torch.cat((s0, s1), 1)))
for i in range(1):
pred_a = F.relu(self.icm_pred_a_conv_0(pred_a))
pred_a = self.icm_pred_a_out(pred_a)
pred_a = pred_a.view(pred_a.size(0), -1)
return s1, pred_s1, pred_a
def feature_state_size(self):
return (32, 20, 20)
class MicropolisBase_acktr(NNBase):
def __init__(self, num_inputs, recurrent=False, hidden_size=512):
super(MicropolisBase, self).__init__(recurrent, hidden_size, hidden_size)
init_ = lambda m: init(m,
nn.init.dirac_,
lambda x: nn.init.constant_(x, 0),
nn.init.calculate_gain('relu'))
import sys
#self.skip_compress = init_(nn.Conv2d(num_inputs, 15, 1, stride=1))
self.conv_0 = nn.Conv2d(num_inputs, 64, 1, 1, 0)
init_(self.conv_0)
self.conv_1 = nn.Conv2d(64, 64, 5, 1, 2)
init_(self.conv_1)
#self.conv_2 = nn.Conv2d(64, 64, 3, 1, 0)
#init_(self.conv_2)
#self.conv_3 = nn.ConvTranspose2d(64, 64, 3, 1, 0)
#init_(self.conv_3)
self.actor_compress = init_(nn.Conv2d(64, 20, 3, 1, 1))
self.critic_compress = init_(nn.Conv2d(64, 8, 1, 1, 1))
init_ = lambda m: init(m,
nn.init.dirac_,
lambda x: nn.init.constant_(x, 0))
self.critic_conv_1 = init_(nn.Conv2d(8, 1, 20, 20, 0))
self.train()
def forward(self, inputs, rnn_hxs, masks):
x = inputs
x = F.relu(self.conv_0(x))
#skip_input = F.relu(self.skip_compress(inputs))
x = F.relu(self.conv_1(x))
#for i in range(5):
# x = F.relu(self.conv_2(x))
#for j in range(5):
# x = F.relu(self.conv_3(x))
#x = torch.cat((x, skip_input), 1)
values = F.relu(self.critic_compress(x))
values = self.critic_conv_1(values)
values = values.view(values.size(0), -1)
actions = F.relu(self.actor_compress(x))
return values, actions, rnn_hxs
class MicropolisBase_1d(NNBase):
def __init__(self, num_inputs, recurrent=False, hidden_size=512):
super(MicropolisBase, self).__init__(recurrent, hidden_size, hidden_size)
init_ = lambda m: init(m,
nn.init.dirac_,
lambda x: nn.init.constant_(x, 0),
nn.init.calculate_gain('relu'))
import sys
self.skip_compress = init_(nn.Conv2d(num_inputs, 15, 1, stride=1))
self.conv_0 = nn.Conv2d(num_inputs, 64, 1, 1, 0)
init_(self.conv_0)
self.conv_1 = nn.Conv2d(64, 64, 5, 1, 2)
init_(self.conv_1)
self.conv_2 = nn.Conv2d(1, 1, 3, 1, 0)
init_(self.conv_2)
self.conv_2_chan = nn.ConvTranspose2d(1, 1, (1, 3), 1, 0)
init_(self.conv_2_chan)
self.conv_3 = nn.ConvTranspose2d(1, 1, 3, 1, 0)
init_(self.conv_3)
self.conv_3_chan = nn.Conv2d(1, 1, (1, 3), 1, 0)
init_(self.conv_3_chan)
self.actor_compress = init_(nn.Conv2d(79, 20, 3, 1, 1))
self.critic_compress = init_(nn.Conv2d(79, 8, 1, 1, 1))
init_ = lambda m: init(m,
nn.init.dirac_,
lambda x: nn.init.constant_(x, 0))
self.critic_conv_1 = init_(nn.Conv2d(8, 1, 20, 20, 0))
self.train()
def forward(self, inputs, rnn_hxs, masks):
x = inputs
x = F.relu(self.conv_0(x))
skip_input = F.relu(self.skip_compress(inputs))
x = F.relu(self.conv_1(x))
num_batch = x.size(0)
for i in range(5):
w, h = x.size(2), x.size(3)
num_chan = x.size(1)
x = x.view(num_batch * num_chan, 1, w, h)
x = F.relu(self.conv_2(x))
w, h = x.size(2), x.size(3)
x = x.view(num_batch, num_chan, w, h)
x = x.permute(0, 2, 3, 1)
x = x.view(num_batch, 1, w * h, num_chan)
x = F.relu(self.conv_2_chan(x))
num_chan = x.size(3)
x = x.view(num_batch, num_chan, w, h)
for j in range(5):
w, h = x.size(2), x.size(3)
num_chan = x.size(1)
x = x.view(num_batch * num_chan, 1, w, h)
x = F.relu(self.conv_3(x))
w, h = x.size(2), x.size(3)
x = x.view(num_batch, num_chan, w, h)
x = x.permute(0, 2, 3, 1)
x = x.view(num_batch, 1, w * h, num_chan)
x = F.relu(self.conv_3_chan(x))
num_chan = x.size(3)
x = x.view(num_batch, num_chan, w, h)
x = torch.cat((x, skip_input), 1)
values = F.relu(self.critic_compress(x))
values = self.critic_conv_1(values)
values = values.view(values.size(0), -1)
actions = F.relu(self.actor_compress(x))
return values, actions, rnn_hxs
class MicropolisBase_0(NNBase):
def __init__(self, num_inputs, recurrent=False, hidden_size=512):
super(MicropolisBase, self).__init__(recurrent, hidden_size, hidden_size)
init_ = lambda m: init(m,
nn.init.dirac_,
lambda x: nn.init.constant_(x, 0),
nn.init.calculate_gain('relu'))
import sys
if sys.version[0] == '2':
num_inputs=104
# assert num_inputs / 4 == 25
self.conv_A_0 = nn.Conv2d(num_inputs, 64, 5, 1, 2)
init_(self.conv_A_0)
self.conv_B_0 = nn.Conv2d(num_inputs, 64, 3, 1, 2)
init_(self.conv_B_0)
self.conv_A_1 = nn.Conv2d(64, 64, 5, 1, 2)
init_(self.conv_A_1)
self.conv_B_1 = nn.Conv2d(64, 64, 3, 1, 1)
init_(self.conv_B_1)
self.input_compress = nn.Conv2d(num_inputs, 15, 1, stride=1)
init_(self.input_compress)
self.actor_compress = nn.Conv2d(79, 18, 3, 1, 1)
init_(self.actor_compress)
self.critic_compress = init_(nn.Conv2d(79, 8, 1, 1, 0))
# self.critic_conv_0 = init_(nn.Conv2d(16, 1, 20, 1, 0))
init_ = lambda m: init(m,
nn.init.dirac_,
lambda x: nn.init.constant_(x, 0))
self.critic_conv_1 = init_(nn.Conv2d(8, 1, 20, 1, 0))
self.train()
def forward(self, inputs, rnn_hxs, masks):
# inputs = torch.Tensor(inputs)
# inputs =inputs.view((1,) + inputs.shape)
x = inputs
x_A = self.conv_A_0(x)
x_A = F.relu(x_A)
x_B = self.conv_B_0(x)
x_B = F.relu(x_B)
for i in range(2):
# x = torch.cat((x, inputs[:,-26:]), 1)
x_A = F.relu(self.conv_A_1(x_A))
for i in range(5):
x_B = F.relu(self.conv_B_1(x_B))
x = torch.mul(x_A, x_B)
skip_input = F.relu(self.input_compress(inputs))
x = torch.cat ((x, skip_input), 1)
values = F.relu(self.critic_compress(x))
# values = F.relu(self.critic_conv_0(values))
values = self.critic_conv_1(values).view(values.size(0), -1)
actions = F.relu(self.actor_compress(x))
return values, actions, rnn_hxs
class CNNBase(NNBase):
def __init__(self, num_inputs, recurrent=False, hidden_size=512):
super(CNNBase, self).__init__(recurrent, hidden_size, hidden_size)
init_ = lambda m: init(m,
nn.init.orthogonal_,
lambda x: nn.init.constant_(x, 0),
nn.init.calculate_gain('relu'))
self.main = nn.Sequential(
init_(nn.Conv2d(num_inputs, 32, 8, stride=4)),
nn.ReLU(),
init_(nn.Conv2d(32, 64, 4, stride=2)),
nn.ReLU(),
init_(nn.Conv2d(64, 32, 3, stride=1)),
nn.ReLU(),
Flatten(),
init_(nn.Linear(32 * 7 * 7, hidden_size)),
nn.ReLU()
)
init_ = lambda m: init(m,
nn.init.orthogonal_,
lambda x: nn.init.constant_(x, 0))
self.critic_linear = init_(nn.Linear(hidden_size, 1))
self.train()
def forward(self, inputs, rnn_hxs, masks):
x = self.main(inputs / 255.0)
if self.is_recurrent:
x, rnn_hxs = self._forward_gru(x, rnn_hxs, masks)
return self.critic_linear(x), x, rnn_hxs
class MLPBase(NNBase):
def __init__(self, num_inputs, recurrent=False, hidden_size=64):
super(MLPBase, self).__init__(recurrent, num_inputs, hidden_size)
if recurrent:
num_inputs = hidden_size
init_ = lambda m: init(m,
init_normc_,
lambda x: nn.init.constant_(x, 0))
self.actor = nn.Sequential(
init_(nn.Linear(num_inputs, hidden_size)),
nn.Tanh(),
init_(nn.Linear(hidden_size, hidden_size)),
nn.Tanh()
)
self.critic = nn.Sequential(
init_(nn.Linear(num_inputs, hidden_size)),
nn.Tanh(),
init_(nn.Linear(hidden_size, hidden_size)),
nn.Tanh()
)
self.critic_linear = init_(nn.Linear(hidden_size, 1))
self.train()
def forward(self, inputs, rnn_hxs, masks):
x = inputs
if self.is_recurrent:
x, rnn_hxs = self._forward_gru(x, rnn_hxs, masks)
hidden_critic = self.critic(x)
hidden_actor = self.actor(x)
return self.critic_linear(hidden_critic), hidden_actor, rnn_hxs
| 34.670922 | 130 | 0.567524 | 3,617 | 24,443 | 3.583633 | 0.068012 | 0.038266 | 0.038189 | 0.02407 | 0.683459 | 0.623592 | 0.585172 | 0.530165 | 0.504243 | 0.488736 | 0 | 0.052148 | 0.304913 | 24,443 | 704 | 131 | 34.72017 | 0.710771 | 0.080677 | 0 | 0.505092 | 0 | 0 | 0.004732 | 0.000937 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065173 | false | 0 | 0.016293 | 0.010183 | 0.150713 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
579ed3db39142cf3ce1e69da8a371c415dd9bdb8 | 1,213 | py | Python | src/classes/inference/SamplerUtils.py | jvarela-zenika/sketch-code-fork | a185364325d67818452e5c9ca8e85ef4d6324295 | [
"MIT",
"Unlicense"
] | null | null | null | src/classes/inference/SamplerUtils.py | jvarela-zenika/sketch-code-fork | a185364325d67818452e5c9ca8e85ef4d6324295 | [
"MIT",
"Unlicense"
] | 4 | 2020-09-26T00:43:07.000Z | 2022-02-10T01:07:34.000Z | src/classes/inference/SamplerUtils.py | jvarela-zenika/sketch-code-fork | a185364325d67818452e5c9ca8e85ef4d6324295 | [
"MIT",
"Unlicense"
] | null | null | null | from __future__ import print_function
from __future__ import absolute_import
from lorem_text import lorem
import string
import random
class SamplerUtils:
@staticmethod
def get_random_title():
return lorem.words(random.randrange(1, 4, 1)).upper()
@staticmethod
def get_random_link():
return lorem.words(random.randrange(2, 5, 1))
@staticmethod
def get_random_paragraph():
return lorem.paragraph()
@staticmethod
def get_random_text(length_text=10, space_number=1, with_upper_case=True):
results = []
lo
while len(results) < length_text:
char = random.choice(string.ascii_letters[:26])
results.append(char)
if with_upper_case:
results[0] = results[0].upper()
current_spaces = []
while len(current_spaces) < space_number:
space_pos = random.randint(2, length_text - 3)
if space_pos in current_spaces:
break
results[space_pos] = " "
if with_upper_case:
results[space_pos + 1] = results[space_pos - 1].upper()
current_spaces.append(space_pos)
return ''.join(results)
| 25.808511 | 78 | 0.625721 | 144 | 1,213 | 4.986111 | 0.368056 | 0.066852 | 0.100279 | 0.133705 | 0.147632 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019585 | 0.284419 | 1,213 | 46 | 79 | 26.369565 | 0.807604 | 0 | 0 | 0.176471 | 0 | 0 | 0.000824 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.147059 | 0.088235 | 0.411765 | 0.029412 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
57a05d458ecebc90692b509abec1c0bdec5f402e | 3,553 | py | Python | reinforced_scinet/data.py | HendrikPN/reinforced_scinet | b57c9d1d997cc56647db4faa0690364e7039a5ee | [
"Apache-2.0"
] | 4 | 2020-01-11T17:29:27.000Z | 2021-03-05T15:17:22.000Z | reinforced_scinet/data.py | aswanthkrishna/reinforced_scinet | b520f0c73bb1cdf0d0595f0df32372c96946d963 | [
"Apache-2.0"
] | 9 | 2020-01-13T17:26:50.000Z | 2020-10-28T09:52:42.000Z | reinforced_scinet/data.py | aswanthkrishna/reinforced_scinet | b520f0c73bb1cdf0d0595f0df32372c96946d963 | [
"Apache-2.0"
] | 2 | 2020-03-26T00:39:33.000Z | 2020-11-13T20:29:28.000Z | # Copyright 2020 reinforced_scinet (https://github.com/hendrikpn/reinforced_scinet)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch.multiprocessing import Process
import sys, os
from config import Config
class DataProcess(Process):
def __init__(self, experience_q, env_id, data_id):
"""
This process generates the data from a trained RL agent to train selection neurons
in an environment.
Args:
experience_q (mp.Queue): Shared memory queue containing experiences across workers of the same type.
env_id (str): The id of the environment instance this data process is creating data for.
data_id (int): The id of the data process.
"""
super(DataProcess, self).__init__()
self.experience_q = experience_q
self.env_id = env_id
self.id = data_id
# torch.Tensor: The training data for selection.
self.o_train_data = torch.empty((Config.DATA_TRAIN_SIZE, Config.INPUT_SIZE))
# torch.Tensor: The training data for actions.
self.a_train_data = torch.empty((Config.DATA_TRAIN_SIZE, Config.NUM_ACTIONS * Config.NUM_ACTIONS_PREDICT))
# torch.Tensor: The training data for selection.
self.t_train_data = torch.empty((Config.DATA_TRAIN_SIZE, 1), dtype=torch.double)
def run(self):
"""
Here we generate the data which is used for training of selection neurons.
The process is as follows,
(i) Generates data that lies within the policy of an agent. Colelcts data from experience queue.
(ii) Save data to the file system.
"""
print(f'Starting training data generation #{self.id}.')
sys.stdout.flush()
# (i) Get data for training from optimal policy of agents.
self._get_data()
# (ii) Save data
torch.save(self.o_train_data, 'data/'+Config.ENV_NAME+'_o_'+self.env_id+'.pth')
torch.save(self.a_train_data, 'data/'+Config.ENV_NAME+'_a_'+self.env_id+'.pth')
torch.save(self.t_train_data, 'data/'+Config.ENV_NAME+'_t_'+self.env_id+'.pth')
print(f'Finished data generation #{self.id}')
sys.stdout.flush()
# ----------------- helper methods ---------------------------------------------------------------------
def _get_data(self):
"""
Creates data which lies within the optimal policy of a learned agent.
To this end, we just collect the data from the `experience_q` of the workers.
"""
index = 0
batch_cut = Config.DATA_TRAIN_SIZE
while index < Config.DATA_TRAIN_SIZE:
o_batch, a_batch, t_batch = self.experience_q.get()
batch_size = len(o_batch)
self.o_train_data[index:index+batch_size] = o_batch[:batch_cut]
self.a_train_data[index:index+batch_size] = a_batch[:batch_cut]
self.t_train_data[index:index+batch_size] = t_batch[:batch_cut]
index += batch_size
batch_cut -= batch_size
| 43.329268 | 114 | 0.652969 | 494 | 3,553 | 4.516194 | 0.327935 | 0.036307 | 0.033617 | 0.042582 | 0.232631 | 0.232631 | 0.147019 | 0.094128 | 0.039444 | 0 | 0 | 0.003686 | 0.23642 | 3,553 | 81 | 115 | 43.864198 | 0.818651 | 0.471714 | 0 | 0.060606 | 0 | 0 | 0.06756 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.121212 | 0 | 0.242424 | 0.060606 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
57a135adde7d70a83367b36c529a7887d6feee35 | 4,309 | py | Python | lib/vapi/base.py | bsmita/vision-tools | 90f034bf7494a2d90883f2b4cbc7c4fe619df5a6 | [
"Apache-2.0"
] | null | null | null | lib/vapi/base.py | bsmita/vision-tools | 90f034bf7494a2d90883f2b4cbc7c4fe619df5a6 | [
"Apache-2.0"
] | null | null | null | lib/vapi/base.py | bsmita/vision-tools | 90f034bf7494a2d90883f2b4cbc7c4fe619df5a6 | [
"Apache-2.0"
] | null | null | null | # IBM_PROLOG_BEGIN_TAG
#
# Copyright 2019,2020 IBM International Business Machines Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# IBM_PROLOG_END_TAG
import os
import logging as logger
import sys
from vapi.server import Server
from vapi.projects import Projects
from vapi.Datasets import Datasets
from vapi.Files import Files
from vapi.FileUserKeys import FileUserKeys
from vapi.FileUserMetadata import FileUserMetadata
from vapi.Categories import Categories
from vapi.ObjectTags import ObjectTags
from vapi.Objectlabels import ObjectLabels
from vapi.ActionTags import ActionTags
from vapi.ActionLabels import ActionLabels
from vapi.Dltasks import DlTasks
from vapi.DnnScripts import DnnScripts
from vapi.TrainedModels import TrainedModels
from vapi.DeployedModels import DeployedModels
from vapi.InferenceResults import InferenceResults
from vapi.Users import Users
class Base:
def __init__(self, host=None, token=None, instance=None, log_http_traffic=False):
# Get required parameters from ENV if not provided on input
env_var = ""
try:
env_var = "VAPI_HOST"
if host is None:
host = os.environ[env_var]
env_var = "VAPI_TOKEN"
if token is None:
token = os.environ[env_var]
except KeyError:
msg = F"Could not find '{env_var}' information in environment or input parameters"
logger.error(" PAIV:" + msg)
raise
# Get optional parameters from ENV if not provided -- fallback to defaults if not present in ENV
env_var = "VAPI_INSTANCE"
if instance is None:
if env_var in os.environ:
instance = os.environ[env_var]
else:
instance = "visual-insights"
logger.info(F"PAIV: setting up server with host={host}, instance={instance}")
self.server = Server(host, token, instance, log_http_traffic)
if self.server is not None:
self.projects = Projects(self.server)
self.datasets = Datasets(self.server)
self.files = Files(self.server)
self.file_keys = FileUserKeys(self.server)
self.file_metadata = FileUserMetadata(self.server)
self.categories = Categories(self.server)
self.object_tags = ObjectTags(self.server)
self.object_labels = ObjectLabels(self.server)
self.action_tags = ActionTags(self.server)
self.action_labels = ActionLabels(self.server)
self.inference_results = InferenceResults(self.server)
self.dl_tasks = DlTasks(self.server)
self.trained_models = TrainedModels(self.server)
self.deployed_models = DeployedModels(self.server)
self.dnnscripts = DnnScripts(self.server)
self.users = Users(self.server)
def raw_http_request(self):
""" Gets the raw HTTP request for the last request that was sent"""
return self.server.get_raw_req()
def raw_http_response(self):
""" Gets the raw response object for the last request that was sent"""
return self.server.raw_rsp()
def status_code(self):
""" Get the status code from the last server request"""
return self.server.status_code()
def rsp_ok(self):
""" Check for OK status from last server request"""
return self.server.rsp_ok()
def http_request_str(self):
""" Gets the HTTP request that generated the current response"""
return self.server.http_request_str()
def json(self):
""" Get the json data from the last server response"""
return self.server.json()
def text(self):
""" Get response body as a string """
return self.server.text()
| 36.82906 | 104 | 0.677419 | 554 | 4,309 | 5.176895 | 0.310469 | 0.087169 | 0.073222 | 0.01569 | 0.074616 | 0.074616 | 0.030683 | 0.030683 | 0.030683 | 0.030683 | 0 | 0.003705 | 0.248317 | 4,309 | 116 | 105 | 37.146552 | 0.881754 | 0.266651 | 0 | 0 | 0 | 0 | 0.060362 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.109589 | false | 0 | 0.273973 | 0 | 0.493151 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
57a1ba7cef774b8bf412a3562c88fe82b9c50ec6 | 4,422 | py | Python | src/mnist/mnist_file_loader.py | daengdaengLee/mnist-python-numpy | 909771bfc802e7031ab84af5776251e66fbc9a63 | [
"MIT"
] | null | null | null | src/mnist/mnist_file_loader.py | daengdaengLee/mnist-python-numpy | 909771bfc802e7031ab84af5776251e66fbc9a63 | [
"MIT"
] | 23 | 2019-10-26T08:56:28.000Z | 2021-06-02T00:37:18.000Z | src/mnist/mnist_file_loader.py | daengdaengLee/mnist-python-numpy | 909771bfc802e7031ab84af5776251e66fbc9a63 | [
"MIT"
] | null | null | null | import os
import requests
import gzip
class MNISTFileLoader():
__base_url = "http://yann.lecun.com/exdb/mnist"
__filenames = {
"train": {
"image": {
"file": "train-images.idx3-ubyte",
"gz": "train-images-idx3-ubyte.gz",
},
"label": {
"file": "train-labels.idx1-ubyte",
"gz": "train-labels-idx1-ubyte.gz",
},
},
"test": {
"image": {
"file": "t10k-images.idx3-ubyte",
"gz": "t10k-images-idx3-ubyte.gz",
},
"label": {
"file": "t10k-labels.idx1-ubyte",
"gz": "t10k-labels-idx1-ubyte.gz",
},
}
}
def __init__(self, path="./MNIST_Data"):
self.__path = path
self.__filepaths = {
"train": {
"image": {
"file": os.path.join(
path, self.__filenames["train"]["image"]["file"]),
"gz": os.path.join(
path, self.__filenames["train"]["image"]["gz"]),
},
"label": {
"file": os.path.join(
path, self.__filenames["train"]["label"]["file"]),
"gz": os.path.join(
path, self.__filenames["train"]["label"]["gz"]),
},
},
"test": {
"image": {
"file": os.path.join(
path, self.__filenames["test"]["image"]["file"]),
"gz": os.path.join(
path, self.__filenames["test"]["image"]["gz"]),
},
"label": {
"file": os.path.join(
path, self.__filenames["test"]["label"]["file"]),
"gz": os.path.join(
path, self.__filenames["test"]["label"]["gz"]),
},
},
}
if not os.path.exists(self.__path):
os.makedirs(self.__path)
def get_train_image(self):
self.__prepare_file("train", "image")
return self.__read_file("train", "image", "file")
def get_train_label(self):
self.__prepare_file("train", "label")
return self.__read_file("train", "label", "file")
def get_test_image(self):
self.__prepare_file("test", "image")
return self.__read_file("test", "image", "file")
def get_test_label(self):
self.__prepare_file("test", "label")
return self.__read_file("test", "label", "file")
def __check_file_exist(self, *keys):
path = self.__filepaths
for key in keys:
path = path[key]
file_path = path["file"]
gz_path = path["gz"]
print(f"check file exist :\n[{file_path}]\n[{gz_path}]")
is_file_exist = os.path.exists(file_path)
is_gz_exist = os.path.exists(gz_path)
return is_file_exist, is_gz_exist
def __fetch_file(self, *keys):
filepath = self.__filepaths
for key in keys:
filepath = filepath[key]
filename = self.__filenames
for key in keys:
filename = filename[key]
print(f"fetch file :\n[{filename}]")
url = f"{self.__base_url}/{filename}"
with open(filepath, "wb") as f:
response = requests.get(url)
f.write(response.content)
def __unzip_file(self, *keys):
filepath = self.__filepaths
for key in keys:
filepath = filepath[key]
gz_path = filepath["gz"]
file_path = filepath["file"]
print(f"unzip file :\n[{gz_path}]")
with gzip.open(gz_path, 'rb') as gz_file:
with open(file_path, "wb") as unzip_file:
unzip_file.write(gz_file.read())
def __prepare_file(self, *keys):
is_file_exist, is_gz_exist = self.__check_file_exist(*keys)
if not is_file_exist and not is_gz_exist:
self.__fetch_file(*keys, "gz")
self.__unzip_file(*keys)
return
if not is_file_exist:
self.__unzip_file(*keys)
return
def __read_file(self, *keys):
filepath = self.__filepaths
for key in keys:
filepath = filepath[key]
return open(filepath, "rb")
| 31.361702 | 74 | 0.483944 | 470 | 4,422 | 4.253191 | 0.138298 | 0.033017 | 0.04002 | 0.056028 | 0.552776 | 0.332666 | 0.274137 | 0.274137 | 0.224112 | 0.13907 | 0 | 0.005749 | 0.370647 | 4,422 | 140 | 75 | 31.585714 | 0.71254 | 0 | 0 | 0.294118 | 0 | 0 | 0.154003 | 0.056309 | 0 | 0 | 0 | 0 | 0 | 1 | 0.084034 | false | 0 | 0.02521 | 0 | 0.201681 | 0.02521 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
57aa0db30ff124614822051a84b8bac83800beba | 2,037 | py | Python | dpctl/tests/test_utils.py | diptorupd/dpctl | 99aba3bd4a1f4d963c97a15815160085241ffbed | [
"Apache-2.0"
] | 39 | 2020-09-29T16:00:59.000Z | 2022-03-30T19:08:05.000Z | dpctl/tests/test_utils.py | diptorupd/dpctl | 99aba3bd4a1f4d963c97a15815160085241ffbed | [
"Apache-2.0"
] | 600 | 2020-09-22T22:47:43.000Z | 2022-03-31T16:20:13.000Z | dpctl/tests/test_utils.py | 1e-to/dpctl | 29c2cbc34a82f7007f8e170d9b2548ab3e2b48d4 | [
"Apache-2.0"
] | 14 | 2020-10-02T09:45:56.000Z | 2022-02-08T09:20:25.000Z | # Data Parallel Control (dpctl)
#
# Copyright 2020-2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Defines unit test cases for utility functions.
"""
import pytest
import dpctl
import dpctl.utils
def test_get_execution_queue_input_validation():
with pytest.raises(TypeError):
dpctl.utils.get_execution_queue(dict())
def test_get_execution_queue():
try:
q = dpctl.SyclQueue()
q2 = dpctl.SyclQueue()
except dpctl.SyclQueueCreationError:
pytest.skip("Queue could not be create for default device")
exec_q = dpctl.utils.get_execution_queue(())
assert exec_q is None
exec_q = dpctl.utils.get_execution_queue([q])
assert exec_q is q
exec_q = dpctl.utils.get_execution_queue([q, q, q, q])
assert exec_q is q
exec_q = dpctl.utils.get_execution_queue((q, q, None, q))
assert exec_q is None
exec_q = dpctl.utils.get_execution_queue(
(
q,
q2,
q,
)
)
assert exec_q is q
def test_get_execution_queue_nonequiv():
try:
q = dpctl.SyclQueue("cpu")
d1, d2 = q.sycl_device.create_sub_devices(partition=[1, 1])
ctx = dpctl.SyclContext([q.sycl_device, d1, d2])
q1 = dpctl.SyclQueue(ctx, d1)
q2 = dpctl.SyclQueue(ctx, d2)
except dpctl.SyclQueueCreationError:
pytest.skip("Queue could not be create for default device")
exec_q = dpctl.utils.get_execution_queue((q, q1, q2))
assert exec_q is None
| 28.291667 | 74 | 0.67894 | 291 | 2,037 | 4.608247 | 0.38488 | 0.044743 | 0.126771 | 0.11484 | 0.413125 | 0.325876 | 0.314691 | 0.314691 | 0.313945 | 0.313945 | 0 | 0.016624 | 0.232204 | 2,037 | 71 | 75 | 28.690141 | 0.840793 | 0.322533 | 0 | 0.358974 | 0 | 0 | 0.06701 | 0 | 0 | 0 | 0 | 0 | 0.153846 | 1 | 0.076923 | false | 0 | 0.076923 | 0 | 0.153846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
57aaf44d33bcd375ef1fd44d6fbfe6f11b7dfc56 | 102,386 | py | Python | numba/parfor.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 1 | 2021-08-14T13:48:12.000Z | 2021-08-14T13:48:12.000Z | numba/parfor.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | numba/parfor.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | #
# Copyright (c) 2017 Intel Corporation
# SPDX-License-Identifier: BSD-2-Clause
#
"""
This module transforms data-parallel operations such as Numpy calls into
'Parfor' nodes, which are nested loops that can be parallelized.
It also implements optimizations such as loop fusion, and extends the rest of
compiler analysis and optimizations to support Parfors.
This is similar to ParallelAccelerator package in Julia:
https://github.com/IntelLabs/ParallelAccelerator.jl
'Parallelizing Julia with a Non-invasive DSL', T. Anderson et al., ECOOP'17.
"""
from __future__ import print_function, division, absolute_import
import types as pytypes # avoid confusion with numba.types
import sys
from functools import reduce
from collections import defaultdict
import numba
from numba import ir, ir_utils, types, typing, rewrites, config, analysis
from numba import array_analysis, postproc, typeinfer
from numba.typing.templates import infer_global, AbstractTemplate
from numba import stencilparfor
from numba.stencilparfor import StencilPass
from numba.ir_utils import (
mk_unique_var,
next_label,
mk_alloc,
get_np_ufunc_typ,
mk_range_block,
mk_loop_header,
find_op_typ,
get_name_var_table,
replace_vars,
replace_vars_inner,
visit_vars,
visit_vars_inner,
remove_dels,
remove_dead,
copy_propagate,
get_block_copies,
apply_copy_propagate,
dprint_func_ir,
find_topo_order,
get_stmt_writes,
rename_labels,
get_call_table,
simplify,
simplify_CFG,
has_no_side_effect,
canonicalize_array_math,
add_offset_to_labels,
find_callname,
guard,
require,
compile_to_numba_ir,
get_definition,
replace_arg_nodes,
replace_returns)
from numba.analysis import (compute_use_defs, compute_live_map,
compute_dead_maps, compute_cfg_from_blocks)
from numba.controlflow import CFGraph
from numba.typing import npydecl, signature
from numba.types.functions import Function
from numba.array_analysis import (random_int_args, random_1arg_size,
random_2arg_sizelast, random_3arg_sizelast,
random_calls)
import copy
import numpy
# circular dependency: import numba.npyufunc.dufunc.DUFunc
sequential_parfor_lowering = False
class prange(object):
def __new__(cls, *args):
return range(*args)
class internal_prange(object):
def __new__(cls, *args):
return range(*args)
_reduction_ops = {
'sum': ('+=', '+', 0),
'dot': ('+=', '+', 0),
'prod': ('*=', '*', 1),
}
def min_parallel_impl(in_arr):
A = in_arr.ravel()
val = numba.targets.builtins.get_type_max_value(A.dtype)
for i in numba.parfor.internal_prange(len(A)):
val = min(val, A[i])
return val
def max_parallel_impl(in_arr):
A = in_arr.ravel()
val = numba.targets.builtins.get_type_min_value(A.dtype)
for i in numba.parfor.internal_prange(len(A)):
val = max(val, A[i])
return val
def argmin_parallel_impl(in_arr):
A = in_arr.ravel()
init_val = numba.targets.builtins.get_type_max_value(A.dtype)
ival = numba.typing.builtins.IndexValue(0, init_val)
for i in numba.parfor.internal_prange(len(A)):
curr_ival = numba.typing.builtins.IndexValue(i, A[i])
ival = min(ival, curr_ival)
return ival.index
def argmax_parallel_impl(in_arr):
A = in_arr.ravel()
init_val = numba.targets.builtins.get_type_min_value(A.dtype)
ival = numba.typing.builtins.IndexValue(0, init_val)
for i in numba.parfor.internal_prange(len(A)):
curr_ival = numba.typing.builtins.IndexValue(i, A[i])
ival = max(ival, curr_ival)
return ival.index
replace_functions_map = {('argmin', 'numpy'): argmin_parallel_impl,
('argmax', 'numpy'): argmax_parallel_impl,
('min', 'numpy'): min_parallel_impl,
('max', 'numpy'): max_parallel_impl,}
class LoopNest(object):
'''The LoopNest class holds information of a single loop including
the index variable (of a non-negative integer value), and the
range variable, e.g. range(r) is 0 to r-1 with step size 1.
'''
def __init__(self, index_variable, start, stop, step):
self.index_variable = index_variable
self.start = start
self.stop = stop
self.step = step
def __repr__(self):
return ("LoopNest(index_variable = {}, range = ({}, {}, {}))".
format(self.index_variable, self.start, self.stop, self.step))
class Parfor(ir.Expr, ir.Stmt):
id_counter = 0
def __init__(
self,
loop_nests,
init_block,
loop_body,
loc,
index_var,
equiv_set,
pattern,
no_sequential_lowering=False):
super(Parfor, self).__init__(
op='parfor',
loc=loc
)
self.id = type(self).id_counter
type(self).id_counter += 1
#self.input_info = input_info
#self.output_info = output_info
self.loop_nests = loop_nests
self.init_block = init_block
self.loop_body = loop_body
self.index_var = index_var
self.params = None # filled right before parallel lowering
self.equiv_set = equiv_set
# The parallel patterns this parfor was generated from and their options
# for example, a parfor could be from the stencil pattern with
# the neighborhood option
self.patterns = [pattern]
# if True, this parfor shouldn't be lowered sequentially even with the
# sequential lowering option
self.no_sequential_lowering = no_sequential_lowering
if config.DEBUG_ARRAY_OPT_STATS:
print('Parallel for-loop #{} is produced from {} at {}'.format(
self.id, pattern[0], loc))
def __repr__(self):
return repr(self.loop_nests) + \
repr(self.loop_body) + repr(self.index_var)
def list_vars(self):
"""list variables used (read/written) in this parfor by
traversing the body and combining block uses.
"""
all_uses = []
for l, b in self.loop_body.items():
for stmt in b.body:
all_uses += stmt.list_vars()
for loop in self.loop_nests:
all_uses.append(loop.index_variable)
if isinstance(loop.start, ir.Var):
all_uses.append(loop.start)
if isinstance(loop.stop, ir.Var):
all_uses.append(loop.stop)
if isinstance(loop.step, ir.Var):
all_uses.append(loop.step)
for stmt in self.init_block.body:
all_uses += stmt.list_vars()
return all_uses
def get_shape_classes(self, var):
return self.equiv_set.get_shape_classes(var)
def dump(self, file=None):
file = file or sys.stdout
print(("begin parfor {}".format(self.id)).center(20, '-'), file=file)
print("index_var = ", self.index_var)
for loopnest in self.loop_nests:
print(loopnest, file=file)
print("init block:", file=file)
self.init_block.dump()
for offset, block in sorted(self.loop_body.items()):
print('label %s:' % (offset,), file=file)
block.dump(file)
print(("end parfor {}".format(self.id)).center(20, '-'), file=file)
def _analyze_parfor(parfor, equiv_set, typemap, array_analysis):
block = parfor.init_block
scope = block.scope
new_body = []
for inst in block.body:
pre, post = array_analysis._analyze_inst(None, scope, equiv_set, inst)
for instr in pre:
new_body.append(instr)
new_body.append(inst)
for instr in post:
new_body.append(instr)
block.body = new_body
return [], []
array_analysis.array_analysis_extensions[Parfor] = _analyze_parfor
class ParforPass(object):
"""ParforPass class is responsible for converting Numpy
calls in Numba intermediate representation to Parfors, which
will lower into either sequential or parallel loops during lowering
stage.
"""
def __init__(self, func_ir, typemap, calltypes, return_type, typingctx, options):
self.func_ir = func_ir
self.typemap = typemap
self.calltypes = calltypes
self.typingctx = typingctx
self.return_type = return_type
self.options = options
self.array_analysis = array_analysis.ArrayAnalysis(typingctx, func_ir, typemap,
calltypes)
ir_utils._max_label = max(func_ir.blocks.keys())
def run(self):
"""run parfor conversion pass: replace Numpy calls
with Parfors when possible and optimize the IR."""
self.func_ir.blocks = simplify_CFG(self.func_ir.blocks)
# remove Del statements for easier optimization
remove_dels(self.func_ir.blocks)
# e.g. convert A.sum() to np.sum(A) for easier match and optimization
canonicalize_array_math(self.func_ir, self.typemap,
self.calltypes, self.typingctx)
# some numpy functions are given parallel implementations
if self.options.numpy:
self._replace_parallel_functions(self.func_ir.blocks)
# run array analysis, a pre-requisite for parfor translation
self.array_analysis.run(self.func_ir.blocks)
# run stencil translation to parfor
if self.options.stencil:
stencil_pass = StencilPass(self.func_ir, self.typemap, self.calltypes,
self.array_analysis, self.typingctx)
stencil_pass.run()
# prange is always parallelized
if self.options.prange:
self._convert_prange(self.func_ir.blocks)
if self.options.setitem:
self._convert_setitem(self.func_ir.blocks)
if self.options.numpy:
self._convert_numpy(self.func_ir.blocks)
if self.options.reduction:
self._convert_reduce(self.func_ir.blocks)
dprint_func_ir(self.func_ir, "after parfor pass")
simplify(self.func_ir, self.typemap, self.calltypes)
if self.options.fusion:
# try fuse before maximize
self.fuse_parfors(self.array_analysis, self.func_ir.blocks)
# reorder statements to maximize fusion
maximize_fusion(self.func_ir, self.func_ir.blocks)
dprint_func_ir(self.func_ir, "after maximize fusion")
# try fuse again after maximize
self.fuse_parfors(self.array_analysis, self.func_ir.blocks)
dprint_func_ir(self.func_ir, "after fusion")
# simplify again
simplify(self.func_ir, self.typemap, self.calltypes)
# push function call variables inside parfors so gufunc function
# wouldn't need function variables as argument
push_call_vars(self.func_ir.blocks, {}, {})
# simplify again
simplify(self.func_ir, self.typemap, self.calltypes)
dprint_func_ir(self.func_ir, "after optimization")
if config.DEBUG_ARRAY_OPT == 1:
print("variable types: ", sorted(self.typemap.items()))
print("call types: ", self.calltypes)
# run post processor again to generate Del nodes
post_proc = postproc.PostProcessor(self.func_ir)
post_proc.run()
if self.func_ir.is_generator:
fix_generator_types(self.func_ir.generator_info, self.return_type,
self.typemap)
if sequential_parfor_lowering:
lower_parfor_sequential(
self.typingctx, self.func_ir, self.typemap, self.calltypes)
else:
# prepare for parallel lowering
# add parfor params to parfors here since lowering is destructive
# changing the IR after this is not allowed
parfor_ids = get_parfor_params(self.func_ir.blocks)
if config.DEBUG_ARRAY_OPT_STATS:
name = self.func_ir.func_id.func_qualname
n_parfors = len(parfor_ids)
if n_parfors > 0:
after_fusion = ("After fusion" if self.options.fusion
else "With fusion disabled")
print(('{}, function {} has '
'{} parallel for-loop(s) #{}.').format(
after_fusion, name, n_parfors, parfor_ids))
else:
print('Function {} has no Parfor.'.format(name))
return
def _replace_parallel_functions(self, blocks):
"""
Replace functions with their parallel implemntation in
replace_functions_map if available.
The implementation code is inlined to enable more optimization.
"""
from numba.inline_closurecall import inline_closure_call
modified = False
work_list = list(blocks.items())
while work_list:
label, block = work_list.pop()
for i, instr in enumerate(block.body):
if isinstance(instr, ir.Assign):
lhs = instr.target
expr = instr.value
if isinstance(expr, ir.Expr) and expr.op == 'call':
func_def = guard(get_definition, self.func_ir, expr.func)
callname = guard(find_callname, self.func_ir, expr)
if callname in replace_functions_map:
new_func = replace_functions_map[callname]
g = copy.copy(self.func_ir.func_id.func.__globals__)
g['numba'] = numba
# inline the parallel implementation
inline_closure_call(self.func_ir, g,
block, i, new_func, self.typingctx,
(self.typemap[expr.args[0].name],),
self.typemap, self.calltypes, work_list)
modified = True
# current block is modified, skip the rest
break
def _convert_numpy(self, blocks):
"""
Convert supported Numpy functions, as well as arrayexpr nodes, to
parfor nodes.
"""
topo_order = find_topo_order(blocks)
# variables available in the program so far (used for finding map
# functions in array_expr lowering)
avail_vars = []
for label in topo_order:
block = blocks[label]
new_body = []
equiv_set = self.array_analysis.get_equiv_set(label)
for instr in block.body:
if isinstance(instr, ir.Assign):
expr = instr.value
lhs = instr.target
if self._is_C_order(lhs.name):
# only translate C order since we can't allocate F
if guard(self._is_supported_npycall, expr):
instr = self._numpy_to_parfor(equiv_set, lhs, expr)
elif isinstance(expr, ir.Expr) and expr.op == 'arrayexpr':
instr = self._arrayexpr_to_parfor(
equiv_set, lhs, expr, avail_vars)
elif guard(self._is_supported_npyreduction, expr):
instr = self._reduction_to_parfor(equiv_set, lhs, expr)
avail_vars.append(lhs.name)
new_body.append(instr)
block.body = new_body
def _convert_reduce(self, blocks):
"""
Find reduce() calls and convert them to parfors.
"""
topo_order = find_topo_order(blocks)
for label in topo_order:
block = blocks[label]
new_body = []
equiv_set = self.array_analysis.get_equiv_set(label)
for instr in block.body:
if guard(self._is_reduce_assign, instr):
expr = instr.value
lhs = instr.target
parfor = guard(self._reduce_to_parfor, equiv_set, lhs, expr)
if parfor:
instr = parfor
new_body.append(instr)
block.body = new_body
return
def _convert_setitem(self, blocks):
# convert setitem expressions like A[C] = c or A[C] = B[C] to parfor,
# where C is a boolean array.
topo_order = find_topo_order(blocks)
# variables available in the program so far (used for finding map
# functions in array_expr lowering)
avail_vars = []
for label in topo_order:
block = blocks[label]
new_body = []
equiv_set = self.array_analysis.get_equiv_set(label)
for instr in block.body:
if isinstance(instr, ir.StaticSetItem) or isinstance(instr, ir.SetItem):
loc = instr.loc
target = instr.target
index = instr.index if isinstance(instr, ir.SetItem) else instr.index_var
value = instr.value
target_typ = self.typemap[target.name]
index_typ = self.typemap[index.name]
value_typ = self.typemap[value.name]
if isinstance(target_typ, types.npytypes.Array):
if (isinstance(index_typ, types.npytypes.Array) and
isinstance(index_typ.dtype, types.Boolean) and
target_typ.ndim == index_typ.ndim):
if isinstance(value_typ, types.Number):
instr = self._setitem_to_parfor(equiv_set,
loc, target, index, value)
elif isinstance(value_typ, types.npytypes.Array):
val_def = guard(get_definition, self.func_ir,
value.name)
if (isinstance(val_def, ir.Expr) and
val_def.op == 'getitem' and
val_def.index.name == index.name):
instr = self._setitem_to_parfor(equiv_set,
loc, target, index, val_def.value)
else:
shape = equiv_set.get_shape(instr)
if shape != None:
instr = self._setitem_to_parfor(equiv_set,
loc, target, index, value, shape=shape)
new_body.append(instr)
block.body = new_body
def _convert_prange(self, blocks):
call_table, _ = get_call_table(blocks)
cfg = compute_cfg_from_blocks(blocks)
for loop in cfg.loops().values():
if len(loop.entries) != 1 or len(loop.exits) != 1:
continue
entry = list(loop.entries)[0]
for inst in blocks[entry].body:
# if prange call
if (isinstance(inst, ir.Assign) and isinstance(inst.value, ir.Expr)
and inst.value.op == 'call'
and self._is_prange(inst.value.func.name, call_table)):
body_labels = list(loop.body - {loop.header})
args = inst.value.args
prange_kind = self._get_prange_kind(inst.value.func.name,
call_table)
# find loop index variable (pair_first in header block)
for stmt in blocks[loop.header].body:
if (isinstance(stmt, ir.Assign)
and isinstance(stmt.value, ir.Expr)
and stmt.value.op == 'pair_first'):
loop_index = stmt.target.name
break
# loop_index may be assigned to other vars
# get header copies to find all of them
cps, _ = get_block_copies({0: blocks[loop.header]},
self.typemap)
cps = cps[0]
loop_index_vars = set(t for t, v in cps if v == loop_index)
loop_index_vars.add(loop_index)
start = 0
step = 1
size_var = args[0]
if len(args) == 2:
start = args[0]
size_var = args[1]
if len(args) == 3:
start = args[0]
size_var = args[1]
try:
step = self.func_ir.get_definition(args[2])
except KeyError:
raise NotImplementedError(
"Only known step size is supported for prange")
if not isinstance(step, ir.Const):
raise NotImplementedError(
"Only constant step size is supported for prange")
step = step.value
if step != 1:
raise NotImplementedError(
"Only constant step size of 1 is supported for prange")
# set l=l for dead remove
inst.value = inst.target
scope = blocks[entry].scope
loc = inst.loc
init_block = ir.Block(scope, loc)
body = {l: blocks[l] for l in body_labels}
index_var = ir.Var(
scope, mk_unique_var("parfor_index"), loc)
self.typemap[index_var.name] = types.intp
index_var_map = {v: index_var for v in loop_index_vars}
replace_vars(body, index_var_map)
parfor_loop = LoopNest(index_var, start, size_var, step)
parfor = Parfor([parfor_loop], init_block, body, loc, index_var,
self.array_analysis.get_equiv_set(entry),
("prange", prange_kind))
# add parfor to entry block, change jump target to exit
jump = blocks[entry].body.pop()
blocks[entry].body.append(parfor)
jump.target = list(loop.exits)[0]
blocks[entry].body.append(jump)
# remove jumps back to header block
for l in body_labels:
last_inst = body[l].body[-1]
if isinstance(
last_inst,
ir.Jump) and last_inst.target == loop.header:
body[l].body.pop()
# remove loop blocks from top level dict
blocks.pop(loop.header)
for l in body_labels:
blocks.pop(l)
# run on parfor body
parfor_blocks = wrap_parfor_blocks(parfor)
# but we also need to make up equiv_set for init_block
backup_equivset = self.array_analysis.equiv_sets.get(0, None)
self.array_analysis.equiv_sets[0] = parfor.equiv_set
self._convert_prange(parfor_blocks)
if self.options.setitem:
self._convert_setitem(parfor_blocks)
if self.options.numpy:
self._convert_numpy(parfor_blocks)
if self.options.reduction:
self._convert_reduce(parfor_blocks)
parfor_blocks = rename_labels(parfor_blocks)
unwrap_parfor_blocks(parfor, parfor_blocks)
# restore equiv_set for real block 0
if backup_equivset:
self.array_analysis.equiv_sets[0] = backup_equivset
# run convert again to handle other prange loops
return self._convert_prange(blocks)
def _is_prange(self, func_var, call_table):
# prange can be either getattr (numba.prange) or global (prange)
if func_var not in call_table:
return False
call = call_table[func_var]
return len(call) > 0 and (call[0] == 'prange' or call[0] == prange
or call[0] == 'internal_prange' or call[0] == internal_prange)
def _get_prange_kind(self, func_var, call_table):
"""see if prange is user prange or internal"""
# prange can be either getattr (numba.prange) or global (prange)
assert func_var in call_table
call = call_table[func_var]
assert len(call) > 0
kind = 'user'
if call[0] == 'internal_prange' or call[0] == internal_prange:
kind = 'internal'
return kind
def _is_reduce_assign(self, inst):
"""
See if inst is an assignment with a reduce() call as value.
"""
require(isinstance(inst, ir.Assign))
rhs = inst.value
require(isinstance(rhs, ir.Expr))
require(rhs.op == 'call')
callname = find_callname(self.func_ir, rhs)
return (callname == ('reduce', 'builtins')
or callname == ('reduce', '_functools'))
def _is_C_order(self, arr_name):
typ = self.typemap[arr_name]
return isinstance(typ, types.npytypes.Array) and typ.layout == 'C' and typ.ndim > 0
def _make_index_var(self, scope, index_vars, body_block):
ndims = len(index_vars)
loc = body_block.loc
if ndims > 1:
tuple_var = ir.Var(scope, mk_unique_var(
"$parfor_index_tuple_var"), loc)
self.typemap[tuple_var.name] = types.containers.UniTuple(
types.intp, ndims)
tuple_call = ir.Expr.build_tuple(list(index_vars), loc)
tuple_assign = ir.Assign(tuple_call, tuple_var, loc)
body_block.body.append(tuple_assign)
return tuple_var, types.containers.UniTuple(types.intp, ndims)
elif ndims == 1:
return index_vars[0], types.intp
else:
raise NotImplementedError(
"Parfor does not handle arrays of dimension 0")
def _mk_parfor_loops(self, size_vars, scope, loc):
"""
Create loop index variables and build LoopNest objects for a parfor.
"""
loopnests = []
index_vars = []
for size_var in size_vars:
index_var = ir.Var(scope, mk_unique_var("parfor_index"), loc)
index_vars.append(index_var)
self.typemap[index_var.name] = types.intp
loopnests.append(LoopNest(index_var, 0, size_var, 1))
return index_vars, loopnests
def _arrayexpr_to_parfor(self, equiv_set, lhs, arrayexpr, avail_vars):
"""generate parfor from arrayexpr node, which is essentially a
map with recursive tree.
"""
scope = lhs.scope
loc = lhs.loc
expr = arrayexpr.expr
arr_typ = self.typemap[lhs.name]
el_typ = arr_typ.dtype
# generate loopnests and size variables from lhs correlations
size_vars = equiv_set.get_shape(lhs)
index_vars, loopnests = self._mk_parfor_loops(size_vars, scope, loc)
# generate init block and body
init_block = ir.Block(scope, loc)
init_block.body = mk_alloc(self.typemap, self.calltypes, lhs,
tuple(size_vars), el_typ, scope, loc)
body_label = next_label()
body_block = ir.Block(scope, loc)
expr_out_var = ir.Var(scope, mk_unique_var("$expr_out_var"), loc)
self.typemap[expr_out_var.name] = el_typ
index_var, index_var_typ = self._make_index_var(
scope, index_vars, body_block)
body_block.body.extend(
_arrayexpr_tree_to_ir(
self.func_ir,
self.typingctx,
self.typemap,
self.calltypes,
equiv_set,
init_block,
expr_out_var,
expr,
index_var,
index_vars,
avail_vars))
parfor = Parfor(loopnests, init_block, {}, loc, index_var, equiv_set,
('arrayexpr {}'.format(repr_arrayexpr(arrayexpr.expr)),))
setitem_node = ir.SetItem(lhs, index_var, expr_out_var, loc)
self.calltypes[setitem_node] = signature(
types.none, self.typemap[lhs.name], index_var_typ, el_typ)
body_block.body.append(setitem_node)
parfor.loop_body = {body_label: body_block}
if config.DEBUG_ARRAY_OPT == 1:
parfor.dump()
return parfor
def _setitem_to_parfor(self, equiv_set, loc, target, index, value, shape=None):
"""generate parfor from setitem node with a boolean or slice array indices.
The value can be either a scalar or an array variable, and if a boolean index
is used for the latter case, the same index must be used for the value too.
"""
scope = target.scope
arr_typ = self.typemap[target.name]
el_typ = arr_typ.dtype
index_typ = self.typemap[index.name]
init_block = ir.Block(scope, loc)
if shape:
# Slice index is being used on the target array, we'll have to create
# a sub-array so that the target dimension matches the given shape.
assert(isinstance(index_typ, types.BaseTuple) or
isinstance(index_typ, types.SliceType))
# setitem has a custom target shape
size_vars = shape
# create a new target array via getitem
subarr_var = ir.Var(scope, mk_unique_var("$subarr"), loc)
getitem_call = ir.Expr.getitem(target, index, loc)
subarr_typ = typing.arraydecl.get_array_index_type( arr_typ, index_typ).result
self.typemap[subarr_var.name] = subarr_typ
self.calltypes[getitem_call] = signature(subarr_typ, arr_typ,
index_typ)
init_block.append(ir.Assign(getitem_call, subarr_var, loc))
target = subarr_var
else:
# Otherwise it is a boolean array that is used as index.
assert(isinstance(index_typ, types.ArrayCompatible))
size_vars = equiv_set.get_shape(target)
bool_typ = index_typ.dtype
# generate loopnests and size variables from lhs correlations
loopnests = []
index_vars = []
for size_var in size_vars:
index_var = ir.Var(scope, mk_unique_var("parfor_index"), loc)
index_vars.append(index_var)
self.typemap[index_var.name] = types.intp
loopnests.append(LoopNest(index_var, 0, size_var, 1))
# generate body
body_label = next_label()
body_block = ir.Block(scope, loc)
index_var, index_var_typ = self._make_index_var(
scope, index_vars, body_block)
parfor = Parfor(loopnests, init_block, {}, loc, index_var, equiv_set,
('setitem',))
if shape:
# slice subarray
parfor.loop_body = {body_label: body_block}
true_block = body_block
end_label = None
else:
# boolean mask
true_label = next_label()
true_block = ir.Block(scope, loc)
end_label = next_label()
end_block = ir.Block(scope, loc)
parfor.loop_body = {body_label: body_block,
true_label: true_block,
end_label: end_block,
}
mask_var = ir.Var(scope, mk_unique_var("$mask_var"), loc)
self.typemap[mask_var.name] = bool_typ
mask_val = ir.Expr.getitem(index, index_var, loc)
body_block.body.extend([
ir.Assign(mask_val, mask_var, loc),
ir.Branch(mask_var, true_label, end_label, loc)
])
value_typ = self.typemap[value.name]
if isinstance(value_typ, types.npytypes.Array):
value_var = ir.Var(scope, mk_unique_var("$value_var"), loc)
self.typemap[value_var.name] = value_typ.dtype
getitem_call = ir.Expr.getitem(value, index_var, loc)
self.calltypes[getitem_call] = signature(
value_typ.dtype, value_typ, index_var_typ)
true_block.body.append(ir.Assign(getitem_call, value_var, loc))
else:
value_var = value
setitem_node = ir.SetItem(target, index_var, value_var, loc)
self.calltypes[setitem_node] = signature(
types.none, self.typemap[target.name], index_var_typ, el_typ)
true_block.body.append(setitem_node)
if end_label:
true_block.body.append(ir.Jump(end_label, loc))
if config.DEBUG_ARRAY_OPT == 1:
parfor.dump()
return parfor
def _is_supported_npycall(self, expr):
"""check if we support parfor translation for
this Numpy call.
"""
call_name, mod_name = find_callname(self.func_ir, expr)
if not mod_name.startswith('numpy'):
return False
if call_name in ['zeros', 'ones']:
return True
if mod_name == 'numpy.random' and call_name in random_calls:
return True
# TODO: add more calls
if call_name == 'dot':
# only translate matrix/vector and vector/vector multiply to parfor
# (don't translate matrix/matrix multiply)
if (self._get_ndims(expr.args[0].name) <= 2 and
self._get_ndims(expr.args[1].name) == 1):
return True
return False
def _is_supported_npyreduction(self, expr):
"""check if we support parfor translation for
this Numpy reduce call.
"""
func_name, mod_name = find_callname(self.func_ir, expr)
return mod_name == 'numpy' and (func_name in _reduction_ops)
def _get_ndims(self, arr):
# return len(self.array_analysis.array_shape_classes[arr])
return self.typemap[arr].ndim
def _numpy_to_parfor(self, equiv_set, lhs, expr):
call_name, mod_name = find_callname(self.func_ir, expr)
args = expr.args
kws = dict(expr.kws)
if call_name in ['zeros', 'ones'] or mod_name == 'numpy.random':
return self._numpy_map_to_parfor(equiv_set, call_name, lhs, args, kws, expr)
if call_name == 'dot':
assert len(args) == 2 or len(args) == 3
# if 3 args, output is allocated already
out = None
if len(args) == 3:
out = args[2]
if 'out' in kws:
out = kws['out']
in1 = args[0]
in2 = args[1]
el_typ = self.typemap[lhs.name].dtype
assert self._get_ndims(
in1.name) <= 2 and self._get_ndims(
in2.name) == 1
# loop range correlation is same as first dimention of 1st input
size_vars = equiv_set.get_shape(in1)
size_var = size_vars[0]
scope = lhs.scope
loc = expr.loc
index_var = ir.Var(scope, mk_unique_var("parfor_index"), lhs.loc)
self.typemap[index_var.name] = types.intp
loopnests = [LoopNest(index_var, 0, size_var, 1)]
init_block = ir.Block(scope, loc)
parfor = Parfor(loopnests, init_block, {}, loc, index_var, equiv_set,
('{} function'.format(call_name),))
if self._get_ndims(in1.name) == 2:
# for 2D input, there is an inner loop
# correlation of inner dimension
inner_size_var = size_vars[1]
# loop structure: range block, header block, body
range_label = next_label()
header_label = next_label()
body_label = next_label()
out_label = next_label()
if out is None:
alloc_nodes = mk_alloc(self.typemap, self.calltypes, lhs,
size_var, el_typ, scope, loc)
init_block.body = alloc_nodes
else:
out_assign = ir.Assign(out, lhs, loc)
init_block.body = [out_assign]
init_block.body.extend(
_gen_dotmv_check(
self.typemap,
self.calltypes,
in1,
in2,
lhs,
scope,
loc))
# sum_var = 0
const_node = ir.Const(0, loc)
const_var = ir.Var(scope, mk_unique_var("$const"), loc)
self.typemap[const_var.name] = el_typ
const_assign = ir.Assign(const_node, const_var, loc)
sum_var = ir.Var(scope, mk_unique_var("$sum_var"), loc)
self.typemap[sum_var.name] = el_typ
sum_assign = ir.Assign(const_var, sum_var, loc)
range_block = mk_range_block(
self.typemap, 0, inner_size_var, 1, self.calltypes, scope, loc)
range_block.body = [
const_assign, sum_assign] + range_block.body
range_block.body[-1].target = header_label # fix jump target
phi_var = range_block.body[-2].target
header_block = mk_loop_header(self.typemap, phi_var,
self.calltypes, scope, loc)
header_block.body[-1].truebr = body_label
header_block.body[-1].falsebr = out_label
phi_b_var = header_block.body[-2].target
body_block = _mk_mvdot_body(self.typemap, self.calltypes,
phi_b_var, index_var, in1, in2,
sum_var, scope, loc, el_typ)
body_block.body[-1].target = header_label
out_block = ir.Block(scope, loc)
# lhs[parfor_index] = sum_var
setitem_node = ir.SetItem(lhs, index_var, sum_var, loc)
self.calltypes[setitem_node] = signature(
types.none, self.typemap[lhs.name], types.intp, el_typ)
out_block.body = [setitem_node]
parfor.loop_body = {
range_label: range_block,
header_label: header_block,
body_label: body_block,
out_label: out_block}
else: # self._get_ndims(in1.name)==1 (reduction)
NotImplementedError("no reduction for dot() " + expr)
if config.DEBUG_ARRAY_OPT == 1:
print("generated parfor for numpy call:")
parfor.dump()
return parfor
# return error if we couldn't handle it (avoid rewrite infinite loop)
raise NotImplementedError("parfor translation failed for ", expr)
def _numpy_map_to_parfor(self, equiv_set, call_name, lhs, args, kws, expr):
"""generate parfor from Numpy calls that are maps.
"""
scope = lhs.scope
loc = lhs.loc
arr_typ = self.typemap[lhs.name]
el_typ = arr_typ.dtype
# generate loopnests and size variables from lhs correlations
size_vars = equiv_set.get_shape(lhs)
index_vars, loopnests = self._mk_parfor_loops(size_vars, scope, loc)
# generate init block and body
init_block = ir.Block(scope, loc)
init_block.body = mk_alloc(self.typemap, self.calltypes, lhs,
tuple(size_vars), el_typ, scope, loc)
body_label = next_label()
body_block = ir.Block(scope, loc)
expr_out_var = ir.Var(scope, mk_unique_var("$expr_out_var"), loc)
self.typemap[expr_out_var.name] = el_typ
index_var, index_var_typ = self._make_index_var(
scope, index_vars, body_block)
if call_name == 'zeros':
value = ir.Const(0, loc)
elif call_name == 'ones':
value = ir.Const(1, loc)
elif call_name in random_calls:
# remove size arg to reuse the call expr for single value
_remove_size_arg(call_name, expr)
# update expr type
new_arg_typs, new_kw_types = _get_call_arg_types(
expr, self.typemap)
self.calltypes.pop(expr)
self.calltypes[expr] = self.typemap[expr.func.name].get_call_type(
typing.Context(), new_arg_typs, new_kw_types)
value = expr
else:
NotImplementedError(
"Map of numpy.{} to parfor is not implemented".format(call_name))
value_assign = ir.Assign(value, expr_out_var, loc)
body_block.body.append(value_assign)
parfor = Parfor(loopnests, init_block, {}, loc, index_var, equiv_set,
('{} function'.format(call_name,)))
setitem_node = ir.SetItem(lhs, index_var, expr_out_var, loc)
self.calltypes[setitem_node] = signature(
types.none, self.typemap[lhs.name], index_var_typ, el_typ)
body_block.body.append(setitem_node)
parfor.loop_body = {body_label: body_block}
if config.DEBUG_ARRAY_OPT == 1:
print("generated parfor for numpy map:")
parfor.dump()
return parfor
def _reduction_to_parfor(self, equiv_set, lhs, expr):
from numba.targets.builtins import get_type_max_value, get_type_min_value
call_name, mod_name = find_callname(self.func_ir, expr)
args = expr.args
if call_name in _reduction_ops:
acc_op, im_op, init_val = _reduction_ops[call_name]
assert len(args) in [1, 2] # vector dot has 2 args
in1 = args[0]
arr_typ = self.typemap[in1.name]
in_typ = arr_typ.dtype
im_op_func_typ = find_op_typ(im_op, [in_typ, in_typ])
el_typ = im_op_func_typ.return_type
ndims = arr_typ.ndim
# For full reduction, loop range correlation is same as 1st input
size_vars = equiv_set.get_shape(in1)
assert ndims == len(size_vars)
scope = lhs.scope
loc = expr.loc
index_vars, loopnests = self._mk_parfor_loops(size_vars, scope, loc)
acc_var = lhs
# init block has to init the reduction variable
init_const = ir.Const(el_typ(init_val), loc)
init_block = ir.Block(scope, loc)
init_block.body.append(ir.Assign(init_const, acc_var, loc))
# loop body accumulates acc_var
acc_block = ir.Block(scope, loc)
tmp_var = ir.Var(scope, mk_unique_var("$val"), loc)
self.typemap[tmp_var.name] = in_typ
index_var, index_var_type = self._make_index_var(
scope, index_vars, acc_block)
getitem_call = ir.Expr.getitem(in1, index_var, loc)
self.calltypes[getitem_call] = signature(
in_typ, arr_typ, index_var_type)
acc_block.body.append(ir.Assign(getitem_call, tmp_var, loc))
if call_name is 'dot':
# dot has two inputs
tmp_var1 = tmp_var
in2 = args[1]
tmp_var2 = ir.Var(scope, mk_unique_var("$val"), loc)
self.typemap[tmp_var2.name] = in_typ
getitem_call2 = ir.Expr.getitem(in2, index_var, loc)
self.calltypes[getitem_call2] = signature(
in_typ, arr_typ, index_var_type)
acc_block.body.append(ir.Assign(getitem_call2, tmp_var2, loc))
mult_call = ir.Expr.binop('*', tmp_var1, tmp_var2, loc)
mult_func_typ = find_op_typ('*', [in_typ, in_typ])
self.calltypes[mult_call] = mult_func_typ
tmp_var = ir.Var(scope, mk_unique_var("$val"), loc)
self.typemap[tmp_var.name] = mult_func_typ
acc_block.body.append(ir.Assign(mult_call, tmp_var, loc))
acc_call = ir.Expr.inplace_binop(
acc_op, im_op, acc_var, tmp_var, loc)
# for some reason, type template of += returns None,
# so type template of + should be used
self.calltypes[acc_call] = im_op_func_typ
# FIXME: we had to break assignment: acc += ... acc ...
# into two assignment: acc_tmp = ... acc ...; x = acc_tmp
# in order to avoid an issue in copy propagation.
acc_tmp_var = ir.Var(scope, mk_unique_var("$acc"), loc)
self.typemap[acc_tmp_var.name] = el_typ
acc_block.body.append(ir.Assign(acc_call, acc_tmp_var, loc))
acc_block.body.append(ir.Assign(acc_tmp_var, acc_var, loc))
loop_body = {next_label(): acc_block}
# parfor
parfor = Parfor(loopnests, init_block, loop_body, loc, index_var,
equiv_set, ('{} function'.format(call_name),))
return parfor
# return error if we couldn't handle it (avoid rewrite infinite loop)
raise NotImplementedError("parfor translation failed for ", expr)
def _reduce_to_parfor(self, equiv_set, lhs, expr):
"""
Convert a reduce() call to a parfor.
The call arguments should be (func, array, init_value).
"""
from numba.inline_closurecall import check_reduce_func
args = expr.args
scope = lhs.scope
loc = expr.loc
call_name = args[0]
reduce_func = get_definition(self.func_ir, call_name)
check_reduce_func(self.func_ir, reduce_func)
in_arr = args[1]
init_val = args[2]
arr_typ = self.typemap[in_arr.name]
in_typ = arr_typ.dtype
size_vars = equiv_set.get_shape(in_arr)
assert len(size_vars) == 1, """only parallel reduce() on 1D arrays is
supported"""
index_vars, loopnests = self._mk_parfor_loops([size_vars[0]], scope, loc)
acc_var = lhs
# init block has to init the reduction variable
init_block = ir.Block(scope, loc)
init_block.body.append(ir.Assign(init_val, acc_var, loc))
# loop body accumulates acc_var
acc_block = ir.Block(scope, loc)
# make getitem
tmp_var = ir.Var(scope, mk_unique_var("$val"), loc)
self.typemap[tmp_var.name] = in_typ
index_var = index_vars[0]
index_var_type = types.intp
getitem_call = ir.Expr.getitem(in_arr, index_var, loc)
self.calltypes[getitem_call] = signature(
in_typ, arr_typ, index_var_type)
reduce_f_ir = compile_to_numba_ir(reduce_func,
self.func_ir.func_id.func.__globals__,
self.typingctx,
(in_typ, in_typ),
self.typemap,
self.calltypes)
loop_body = reduce_f_ir.blocks
acc_label = next_label()
loop_body[acc_label] = acc_block
first_reduce_block = reduce_f_ir.blocks[min(reduce_f_ir.blocks.keys())]
first_reduce_block.body.insert(0, ir.Assign(getitem_call, tmp_var, loc))
replace_arg_nodes(first_reduce_block, [acc_var, tmp_var])
replace_returns(loop_body, acc_var, acc_label)
parfor = Parfor(loopnests, init_block, loop_body, loc, index_var,
equiv_set, ('{} function'.format(call_name),))
return parfor
def fuse_parfors(self, array_analysis, blocks):
for label, block in blocks.items():
equiv_set = array_analysis.get_equiv_set(label)
fusion_happened = True
while fusion_happened:
fusion_happened = False
new_body = []
i = 0
while i < len(block.body) - 1:
stmt = block.body[i]
next_stmt = block.body[i + 1]
if isinstance(stmt, Parfor) and isinstance(next_stmt, Parfor):
fused_node = try_fuse(equiv_set, stmt, next_stmt)
if fused_node is not None:
fusion_happened = True
new_body.append(fused_node)
self.fuse_recursive_parfor(fused_node)
i += 2
continue
new_body.append(stmt)
if isinstance(stmt, Parfor):
self.fuse_recursive_parfor(stmt)
i += 1
new_body.append(block.body[-1])
block.body = new_body
return
def fuse_recursive_parfor(self, parfor):
blocks = wrap_parfor_blocks(parfor)
maximize_fusion(self.func_ir, blocks)
arr_analysis = array_analysis.ArrayAnalysis(self.typingctx, self.func_ir,
self.typemap, self.calltypes)
arr_analysis.run(blocks)
self.fuse_parfors(arr_analysis, blocks)
unwrap_parfor_blocks(parfor)
def _remove_size_arg(call_name, expr):
"remove size argument from args or kws"
# remove size kwarg
kws = dict(expr.kws)
kws.pop('size', '')
expr.kws = tuple(kws.items())
# remove size arg if available
if call_name in random_1arg_size + random_int_args:
# these calls have only a "size" argument or list of ints
# so remove all args
expr.args = []
if call_name in random_3arg_sizelast:
# normal, uniform, ... have 3 args, last one is size
if len(expr.args) == 3:
expr.args.pop()
if call_name in random_2arg_sizelast:
# have 2 args, last one is size
if len(expr.args) == 2:
expr.args.pop()
if call_name == 'randint':
# has 4 args, 3rd one is size
if len(expr.args) == 3:
expr.args.pop()
if len(expr.args) == 4:
dt_arg = expr.args.pop()
expr.args.pop() # remove size
expr.args.append(dt_arg)
if call_name == 'triangular':
# has 4 args, last one is size
if len(expr.args) == 4:
expr.args.pop()
return
def _get_call_arg_types(expr, typemap):
new_arg_typs = []
for arg in expr.args:
new_arg_typs.append(typemap[arg.name])
new_kw_types = {}
for name, arg in expr.kws:
new_kw_types[name] = typemap[arg.name]
return tuple(new_arg_typs), new_kw_types
def _gen_dotmv_check(typemap, calltypes, in1, in2, out, scope, loc):
"""compile dot() check from linalg module and insert a call to it"""
# save max_label since pipeline is called recursively
saved_max_label = ir_utils._max_label
from numba import njit
from numba.targets.linalg import dot_3_mv_check_args
check_func = njit(dot_3_mv_check_args)
# g_var = Global(dot_3_mv_check_args)
g_var = ir.Var(scope, mk_unique_var("$check_mv"), loc)
func_typ = types.functions.Dispatcher(check_func)
typemap[g_var.name] = func_typ
g_obj = ir.Global("dot_3_mv_check_args", check_func, loc)
g_assign = ir.Assign(g_obj, g_var, loc)
# dummy_var = call g_var(in1, in2, out)
call_node = ir.Expr.call(g_var, [in1, in2, out], (), loc)
calltypes[call_node] = func_typ.get_call_type(
typing.Context(), [typemap[in1.name], typemap[in2.name], typemap[out.name]], {})
dummy_var = ir.Var(scope, mk_unique_var("$call_out_dummy"), loc)
typemap[dummy_var.name] = types.none
call_assign = ir.Assign(call_node, dummy_var, loc)
ir_utils._max_label = saved_max_label
return [g_assign, call_assign]
def _mk_mvdot_body(typemap, calltypes, phi_b_var, index_var, in1, in2, sum_var,
scope, loc, el_typ):
"""generate array inner product (X[p,:], v[:]) for parfor of np.dot(X,v)"""
body_block = ir.Block(scope, loc)
# inner_index = phi_b_var
inner_index = ir.Var(scope, mk_unique_var("$inner_index"), loc)
typemap[inner_index.name] = types.intp
inner_index_assign = ir.Assign(phi_b_var, inner_index, loc)
# tuple_var = build_tuple(index_var, inner_index)
tuple_var = ir.Var(scope, mk_unique_var("$tuple_var"), loc)
typemap[tuple_var.name] = types.containers.UniTuple(types.intp, 2)
tuple_call = ir.Expr.build_tuple([index_var, inner_index], loc)
tuple_assign = ir.Assign(tuple_call, tuple_var, loc)
# X_val = getitem(X, tuple_var)
X_val = ir.Var(scope, mk_unique_var("$" + in1.name + "_val"), loc)
typemap[X_val.name] = el_typ
getitem_call = ir.Expr.getitem(in1, tuple_var, loc)
calltypes[getitem_call] = signature(el_typ, typemap[in1.name],
typemap[tuple_var.name])
getitem_assign = ir.Assign(getitem_call, X_val, loc)
# v_val = getitem(V, inner_index)
v_val = ir.Var(scope, mk_unique_var("$" + in2.name + "_val"), loc)
typemap[v_val.name] = el_typ
v_getitem_call = ir.Expr.getitem(in2, inner_index, loc)
calltypes[v_getitem_call] = signature(
el_typ, typemap[in2.name], types.intp)
v_getitem_assign = ir.Assign(v_getitem_call, v_val, loc)
# add_var = X_val * v_val
add_var = ir.Var(scope, mk_unique_var("$add_var"), loc)
typemap[add_var.name] = el_typ
add_call = ir.Expr.binop('*', X_val, v_val, loc)
calltypes[add_call] = signature(el_typ, el_typ, el_typ)
add_assign = ir.Assign(add_call, add_var, loc)
# acc_var = sum_var + add_var
acc_var = ir.Var(scope, mk_unique_var("$acc_var"), loc)
typemap[acc_var.name] = el_typ
acc_call = ir.Expr.inplace_binop('+=', '+', sum_var, add_var, loc)
calltypes[acc_call] = signature(el_typ, el_typ, el_typ)
acc_assign = ir.Assign(acc_call, acc_var, loc)
# sum_var = acc_var
final_assign = ir.Assign(acc_var, sum_var, loc)
# jump to header
b_jump_header = ir.Jump(-1, loc)
body_block.body = [
inner_index_assign,
tuple_assign,
getitem_assign,
v_getitem_assign,
add_assign,
acc_assign,
final_assign,
b_jump_header]
return body_block
def _arrayexpr_tree_to_ir(
func_ir,
typingctx,
typemap,
calltypes,
equiv_set,
init_block,
expr_out_var,
expr,
parfor_index_tuple_var,
all_parfor_indices,
avail_vars):
"""generate IR from array_expr's expr tree recursively. Assign output to
expr_out_var and returns the whole IR as a list of Assign nodes.
"""
el_typ = typemap[expr_out_var.name]
scope = expr_out_var.scope
loc = expr_out_var.loc
out_ir = []
if isinstance(expr, tuple):
op, arr_expr_args = expr
arg_vars = []
for arg in arr_expr_args:
arg_out_var = ir.Var(scope, mk_unique_var("$arg_out_var"), loc)
typemap[arg_out_var.name] = el_typ
out_ir += _arrayexpr_tree_to_ir(func_ir,
typingctx,
typemap,
calltypes,
equiv_set,
init_block,
arg_out_var,
arg,
parfor_index_tuple_var,
all_parfor_indices,
avail_vars)
arg_vars.append(arg_out_var)
if op in npydecl.supported_array_operators:
el_typ1 = typemap[arg_vars[0].name]
if len(arg_vars) == 2:
el_typ2 = typemap[arg_vars[1].name]
func_typ = find_op_typ(op, [el_typ1, el_typ2])
ir_expr = ir.Expr.binop(op, arg_vars[0], arg_vars[1], loc)
if op == '/':
func_typ, ir_expr = _gen_np_divide(
arg_vars[0], arg_vars[1], out_ir, typemap)
else:
func_typ = find_op_typ(op, [el_typ1])
ir_expr = ir.Expr.unary(op, arg_vars[0], loc)
calltypes[ir_expr] = func_typ
el_typ = func_typ.return_type
out_ir.append(ir.Assign(ir_expr, expr_out_var, loc))
for T in array_analysis.MAP_TYPES:
if isinstance(op, T):
# elif isinstance(op, (np.ufunc, DUFunc)):
# function calls are stored in variables which are not removed
# op is typing_key to the variables type
func_var_name = _find_func_var(typemap, op, avail_vars)
func_var = ir.Var(scope, mk_unique_var(func_var_name), loc)
typemap[func_var.name] = typemap[func_var_name]
func_var_def = func_ir.get_definition(func_var_name)
ir_expr = ir.Expr.call(func_var, arg_vars, (), loc)
call_typ = typemap[func_var.name].get_call_type(
typing.Context(), [el_typ] * len(arg_vars), {})
calltypes[ir_expr] = call_typ
el_typ = call_typ.return_type
#signature(el_typ, el_typ)
out_ir.append(ir.Assign(func_var_def, func_var, loc))
out_ir.append(ir.Assign(ir_expr, expr_out_var, loc))
elif isinstance(expr, ir.Var):
var_typ = typemap[expr.name]
if isinstance(var_typ, types.Array):
el_typ = var_typ.dtype
ir_expr = _gen_arrayexpr_getitem(
equiv_set,
expr,
parfor_index_tuple_var,
all_parfor_indices,
el_typ,
calltypes,
typingctx,
typemap,
init_block,
out_ir)
else:
# assert typemap[expr.name]==el_typ
el_typ = var_typ
ir_expr = expr
out_ir.append(ir.Assign(ir_expr, expr_out_var, loc))
elif isinstance(expr, ir.Const):
el_typ = typing.Context().resolve_value_type(expr.value)
out_ir.append(ir.Assign(expr, expr_out_var, loc))
if len(out_ir) == 0:
raise NotImplementedError(
"Don't know how to translate array expression '%r'" % (expr,))
typemap.pop(expr_out_var.name, None)
typemap[expr_out_var.name] = el_typ
return out_ir
def _gen_np_divide(arg1, arg2, out_ir, typemap):
"""generate np.divide() instead of / for array_expr to get numpy error model
like inf for division by zero (test_division_by_zero).
"""
scope = arg1.scope
loc = arg1.loc
# g_np_var = Global(numpy)
g_np_var = ir.Var(scope, mk_unique_var("$np_g_var"), loc)
typemap[g_np_var.name] = types.misc.Module(numpy)
g_np = ir.Global('np', numpy, loc)
g_np_assign = ir.Assign(g_np, g_np_var, loc)
# attr call: div_attr = getattr(g_np_var, divide)
div_attr_call = ir.Expr.getattr(g_np_var, "divide", loc)
attr_var = ir.Var(scope, mk_unique_var("$div_attr"), loc)
func_var_typ = get_np_ufunc_typ(numpy.divide)
typemap[attr_var.name] = func_var_typ
attr_assign = ir.Assign(div_attr_call, attr_var, loc)
# divide call: div_attr(arg1, arg2)
div_call = ir.Expr.call(attr_var, [arg1, arg2], (), loc)
func_typ = func_var_typ.get_call_type(
typing.Context(), [typemap[arg1.name], typemap[arg2.name]], {})
out_ir.extend([g_np_assign, attr_assign])
return func_typ, div_call
def _gen_arrayexpr_getitem(
equiv_set,
var,
parfor_index_tuple_var,
all_parfor_indices,
el_typ,
calltypes,
typingctx,
typemap,
init_block,
out_ir):
"""if there is implicit dimension broadcast, generate proper access variable
for getitem. For example, if indices are (i1,i2,i3) but shape is (c1,0,c3),
generate a tuple with (i1,0,i3) for access. Another example: for (i1,i2,i3)
and (c1,c2) generate (i2,i3).
"""
loc = var.loc
index_var = parfor_index_tuple_var
var_typ = typemap[var.name]
ndims = typemap[var.name].ndim
num_indices = len(all_parfor_indices)
size_vars = equiv_set.get_shape(var) or []
size_consts = [equiv_set.get_equiv_const(x) for x in size_vars]
if ndims == 0:
# call np.ravel
ravel_var = ir.Var(var.scope, mk_unique_var("$ravel"), loc)
ravel_typ = types.npytypes.Array(dtype=var_typ.dtype, ndim=1, layout='C')
typemap[ravel_var.name] = ravel_typ
stmts = ir_utils.gen_np_call('ravel', numpy.ravel, ravel_var, [var], typingctx, typemap, calltypes)
init_block.body.extend(stmts)
var = ravel_var
# Const(0)
const_node = ir.Const(0, var.loc)
const_var = ir.Var(var.scope, mk_unique_var("$const_ind_0"), loc)
typemap[const_var.name] = types.intp
const_assign = ir.Assign(const_node, const_var, loc)
out_ir.append(const_assign)
index_var = const_var
elif ndims == 1:
# Use last index for 1D arrays
index_var = all_parfor_indices[-1]
elif any([x != None for x in size_consts]):
# Need a tuple as index
ind_offset = num_indices - ndims
tuple_var = ir.Var(var.scope, mk_unique_var(
"$parfor_index_tuple_var_bcast"), loc)
typemap[tuple_var.name] = types.containers.UniTuple(types.intp, ndims)
# Just in case, const var for size 1 dim access index: $const0 =
# Const(0)
const_node = ir.Const(0, var.loc)
const_var = ir.Var(var.scope, mk_unique_var("$const_ind_0"), loc)
typemap[const_var.name] = types.intp
const_assign = ir.Assign(const_node, const_var, loc)
out_ir.append(const_assign)
index_vars = []
for i in reversed(range(ndims)):
size_var = size_vars[i]
size_const = size_consts[i]
if size_const == 1:
index_vars.append(const_var)
else:
index_vars.append(all_parfor_indices[ind_offset + i])
index_vars = list(reversed(index_vars))
tuple_call = ir.Expr.build_tuple(index_vars, loc)
tuple_assign = ir.Assign(tuple_call, tuple_var, loc)
out_ir.append(tuple_assign)
index_var = tuple_var
ir_expr = ir.Expr.getitem(var, index_var, loc)
calltypes[ir_expr] = signature(el_typ, typemap[var.name],
typemap[index_var.name])
return ir_expr
def _find_func_var(typemap, func, avail_vars):
"""find variable in typemap which represents the function func.
"""
for v in avail_vars:
t = typemap[v]
# Function types store actual functions in typing_key.
if isinstance(t, Function) and t.typing_key == func:
return v
raise RuntimeError("ufunc call variable not found")
def lower_parfor_sequential(typingctx, func_ir, typemap, calltypes):
ir_utils._max_label = max(ir_utils._max_label,
ir_utils.find_max_label(func_ir.blocks))
parfor_found = False
new_blocks = {}
for (block_label, block) in func_ir.blocks.items():
block_label, parfor_found = _lower_parfor_sequential_block(
block_label, block, new_blocks, typemap, calltypes, parfor_found)
# old block stays either way
new_blocks[block_label] = block
func_ir.blocks = new_blocks
# rename only if parfor found and replaced (avoid test_flow_control error)
if parfor_found:
func_ir.blocks = rename_labels(func_ir.blocks)
dprint_func_ir(func_ir, "after parfor sequential lowering")
simplify(func_ir, typemap, calltypes)
dprint_func_ir(func_ir, "after parfor sequential simplify")
return
def _lower_parfor_sequential_block(
block_label,
block,
new_blocks,
typemap,
calltypes,
parfor_found):
scope = block.scope
i = _find_first_parfor(block.body)
while i != -1:
parfor_found = True
inst = block.body[i]
loc = inst.init_block.loc
# split block across parfor
prev_block = ir.Block(scope, loc)
prev_block.body = block.body[:i]
block.body = block.body[i + 1:]
# previous block jump to parfor init block
init_label = next_label()
prev_block.body.append(ir.Jump(init_label, loc))
new_blocks[init_label] = inst.init_block
new_blocks[block_label] = prev_block
block_label = next_label()
ndims = len(inst.loop_nests)
for i in range(ndims):
loopnest = inst.loop_nests[i]
# create range block for loop
range_label = next_label()
header_label = next_label()
range_block = mk_range_block(
typemap,
loopnest.start,
loopnest.stop,
loopnest.step,
calltypes,
scope,
loc)
range_block.body[-1].target = header_label # fix jump target
phi_var = range_block.body[-2].target
new_blocks[range_label] = range_block
header_block = mk_loop_header(typemap, phi_var, calltypes,
scope, loc)
header_block.body[-2].target = loopnest.index_variable
new_blocks[header_label] = header_block
# jump to this new inner loop
if i == 0:
inst.init_block.body.append(ir.Jump(range_label, loc))
header_block.body[-1].falsebr = block_label
else:
new_blocks[prev_header_label].body[-1].truebr = range_label
header_block.body[-1].falsebr = prev_header_label
prev_header_label = header_label # to set truebr next loop
# last body block jump to inner most header
body_last_label = max(inst.loop_body.keys())
inst.loop_body[body_last_label].body.append(
ir.Jump(header_label, loc))
# inner most header jumps to first body block
body_first_label = min(inst.loop_body.keys())
header_block.body[-1].truebr = body_first_label
# add parfor body to blocks
for (l, b) in inst.loop_body.items():
l, parfor_found = _lower_parfor_sequential_block(
l, b, new_blocks, typemap, calltypes, parfor_found)
new_blocks[l] = b
i = _find_first_parfor(block.body)
return block_label, parfor_found
def _find_first_parfor(body):
for (i, inst) in enumerate(body):
if isinstance(inst, Parfor) and not inst.no_sequential_lowering:
return i
return -1
def get_parfor_params(blocks):
"""find variables used in body of parfors from outside and save them.
computed as live variables at entry of first block.
"""
# since parfor wrap creates a back-edge to first non-init basic block,
# live_map[first_non_init_block] contains variables defined in parfor body
# that could be undefined before. So we only consider variables that are
# actually defined before the parfor body in the program.
parfor_ids = set()
pre_defs = set()
_, all_defs = compute_use_defs(blocks)
topo_order = find_topo_order(blocks)
for label in topo_order:
block = blocks[label]
for i, parfor in _find_parfors(block.body):
# find variable defs before the parfor in the same block
dummy_block = ir.Block(block.scope, block.loc)
dummy_block.body = block.body[:i]
before_defs = compute_use_defs({0: dummy_block}).defmap[0]
pre_defs |= before_defs
parfor.params = get_parfor_params_inner(parfor, pre_defs)
parfor_ids.add(parfor.id)
pre_defs |= all_defs[label]
return parfor_ids
def get_parfor_params_inner(parfor, pre_defs):
blocks = wrap_parfor_blocks(parfor)
cfg = compute_cfg_from_blocks(blocks)
usedefs = compute_use_defs(blocks)
live_map = compute_live_map(cfg, blocks, usedefs.usemap, usedefs.defmap)
parfor_ids = get_parfor_params(blocks)
if config.DEBUG_ARRAY_OPT_STATS:
n_parfors = len(parfor_ids)
if n_parfors > 0:
after_fusion = ("After fusion" if self.options.fusion
else "With fusion disabled")
print(('After fusion, parallel for-loop {} has '
'{} nested Parfor(s) #{}.').format(
after_fusion, parfor.id, n_parfors, parfor_ids))
unwrap_parfor_blocks(parfor)
keylist = sorted(live_map.keys())
init_block = keylist[0]
first_non_init_block = keylist[1]
before_defs = usedefs.defmap[init_block] | pre_defs
params = live_map[first_non_init_block] & before_defs
return params
def _find_parfors(body):
for i, inst in enumerate(body):
if isinstance(inst, Parfor):
yield i, inst
def get_parfor_outputs(parfor, parfor_params):
"""get arrays that are written to inside the parfor and need to be passed
as parameters to gufunc.
"""
# FIXME: The following assumes the target of all SetItem are outputs,
# which is wrong!
last_label = max(parfor.loop_body.keys())
outputs = []
for blk in parfor.loop_body.values():
for stmt in blk.body:
if isinstance(stmt, ir.SetItem):
if stmt.index.name == parfor.index_var.name:
outputs.append(stmt.target.name)
# make sure these written arrays are in parfor parameters (live coming in)
outputs = list(set(outputs) & set(parfor_params))
return sorted(outputs)
def get_parfor_reductions(parfor, parfor_params, calltypes, reductions=None,
reduce_varnames=None, param_uses=None, param_nodes=None,
var_to_param=None):
"""find variables that are updated using their previous values and an array
item accessed with parfor index, e.g. s = s+A[i]
"""
if reductions is None:
reductions = {}
if reduce_varnames is None:
reduce_varnames = []
# for each param variable, find what other variables are used to update it
# also, keep the related nodes
if param_uses is None:
param_uses = defaultdict(list)
if param_nodes is None:
param_nodes = defaultdict(list)
if var_to_param is None:
var_to_param = {}
blocks = wrap_parfor_blocks(parfor)
topo_order = find_topo_order(blocks)
topo_order = topo_order[1:] # ignore init block
unwrap_parfor_blocks(parfor)
for label in reversed(topo_order):
for stmt in reversed(parfor.loop_body[label].body):
if (isinstance(stmt, ir.Assign)
and (stmt.target.name in parfor_params
or stmt.target.name in var_to_param)):
lhs = stmt.target.name
rhs = stmt.value
cur_param = lhs if lhs in parfor_params else var_to_param[lhs]
used_vars = []
if isinstance(rhs, ir.Var):
used_vars = [rhs.name]
elif isinstance(rhs, ir.Expr):
used_vars = [v.name for v in stmt.value.list_vars()]
param_uses[cur_param].extend(used_vars)
for v in used_vars:
var_to_param[v] = cur_param
# save copy of dependent stmt
stmt_cp = copy.deepcopy(stmt)
if stmt.value in calltypes:
calltypes[stmt_cp.value] = calltypes[stmt.value]
param_nodes[cur_param].append(stmt_cp)
if isinstance(stmt, Parfor):
# recursive parfors can have reductions like test_prange8
get_parfor_reductions(stmt, parfor_params, calltypes,
reductions, reduce_varnames, param_uses, param_nodes, var_to_param)
for param, used_vars in param_uses.items():
# a parameter is a reduction variable if its value is used to update it
# check reduce_varnames since recursive parfors might have processed
# param already
if param in used_vars and param not in reduce_varnames:
reduce_varnames.append(param)
param_nodes[param].reverse()
reduce_nodes = get_reduce_nodes(param, param_nodes[param])
init_val = guard(get_reduction_init, reduce_nodes)
reductions[param] = (init_val, reduce_nodes)
return reduce_varnames, reductions
def get_reduction_init(nodes):
"""
Get initial value for known reductions.
Currently, only += and *= are supported. We assume the inplace_binop node
is followed by an assignment.
"""
require(len(nodes) >=2)
require(isinstance(nodes[-1].value, ir.Var))
require(nodes[-2].target.name == nodes[-1].value.name)
acc_expr = nodes[-2].value
require(isinstance(acc_expr, ir.Expr) and acc_expr.op=='inplace_binop')
if acc_expr.fn == '+=':
return 0
if acc_expr.fn == '*=':
return 1
return None
def get_reduce_nodes(name, nodes):
"""
Get nodes that combine the reduction variable with a sentinel variable.
Recognizes the first node that combines the reduction variable with another
variable.
"""
reduce_nodes = None
for i, stmt in enumerate(nodes):
lhs = stmt.target.name
rhs = stmt.value
if isinstance(stmt.value, ir.Expr):
in_vars = set(v.name for v in stmt.value.list_vars())
if name in in_vars:
args = get_expr_args(stmt.value)
args.remove(name)
assert len(args) == 1
replace_vars_inner(stmt.value, {args[0]:
ir.Var(stmt.target.scope, name+"#init", stmt.target.loc)})
reduce_nodes = nodes[i:]
break;
assert reduce_nodes, "Invalid reduction format"
return reduce_nodes
def get_expr_args(expr):
"""
Get arguments of an expression node
"""
if expr.op in ['binop', 'inplace_binop']:
return [expr.lhs.name, expr.rhs.name]
if expr.op == 'call':
return [v.name for v in expr.args]
raise NotImplementedError("get arguments for expression {}".format(expr))
def visit_parfor_pattern_vars(parfor, callback, cbdata):
# currently, only stencil pattern has variables
for pattern in parfor.patterns:
if pattern[0] == 'stencil':
left_lengths = pattern[1][0]
for i in range(len(left_lengths)):
if isinstance(left_lengths[i], ir.Var):
left_lengths[i] = visit_vars_inner(left_lengths[i],
callback, cbdata)
right_lengths = pattern[1][1]
for i in range(len(right_lengths)):
if isinstance(right_lengths[i], ir.Var):
right_lengths[i] = visit_vars_inner(right_lengths[i],
callback, cbdata)
def visit_vars_parfor(parfor, callback, cbdata):
if config.DEBUG_ARRAY_OPT == 1:
print("visiting parfor vars for:", parfor)
print("cbdata: ", sorted(cbdata.items()))
for l in parfor.loop_nests:
l.index_variable = visit_vars_inner(l.index_variable, callback, cbdata)
if isinstance(l.start, ir.Var):
l.start = visit_vars_inner(l.start, callback, cbdata)
if isinstance(l.stop, ir.Var):
l.stop = visit_vars_inner(l.stop, callback, cbdata)
if isinstance(l.step, ir.Var):
l.step = visit_vars_inner(l.step, callback, cbdata)
visit_vars({-1: parfor.init_block}, callback, cbdata)
visit_parfor_pattern_vars(parfor, callback, cbdata)
visit_vars(parfor.loop_body, callback, cbdata)
return
# add call to visit parfor variable
ir_utils.visit_vars_extensions[Parfor] = visit_vars_parfor
def parfor_defs(parfor, use_set=None, def_set=None):
"""list variables written in this parfor by recursively
calling compute_use_defs() on body and combining block defs.
"""
if use_set is None:
use_set = set()
if def_set is None:
def_set = set()
blocks = wrap_parfor_blocks(parfor)
uses, defs = compute_use_defs(blocks)
cfg = compute_cfg_from_blocks(blocks)
last_label = max(blocks.keys())
unwrap_parfor_blocks(parfor)
# Conservatively, only add defs for blocks that are definitely executed
# Go through blocks in order, as if they are statements of the block that
# includes the parfor, and update uses/defs.
# no need for topo order of ir_utils
topo_order = cfg.topo_order()
# blocks that dominate last block are definitely executed
definitely_executed = cfg.dominators()[last_label]
# except loop bodies that might not execute
for loop in cfg.loops().values():
definitely_executed -= loop.body
for label in topo_order:
if label in definitely_executed:
# see compute_use_defs() in analysis.py
# variables defined in the block that includes the parfor are not
# uses of that block (are not potentially live in the beginning of
# the block)
use_set.update(uses[label] - def_set)
def_set.update(defs[label])
else:
use_set.update(uses[label] - def_set)
# treat loop variables and size variables as use
loop_vars = {
l.start.name for l in parfor.loop_nests if isinstance(
l.start, ir.Var)}
loop_vars |= {
l.stop.name for l in parfor.loop_nests if isinstance(
l.stop, ir.Var)}
loop_vars |= {
l.step.name for l in parfor.loop_nests if isinstance(
l.step, ir.Var)}
use_set.update(loop_vars)
use_set |= get_parfor_pattern_vars(parfor)
return analysis._use_defs_result(usemap=use_set, defmap=def_set)
analysis.ir_extension_usedefs[Parfor] = parfor_defs
def parfor_insert_dels(parfor, curr_dead_set):
"""insert dels in parfor. input: dead variable set right after parfor.
returns the variables for which del was inserted.
"""
blocks = wrap_parfor_blocks(parfor)
cfg = compute_cfg_from_blocks(blocks)
usedefs = compute_use_defs(blocks)
live_map = compute_live_map(cfg, blocks, usedefs.usemap, usedefs.defmap)
dead_map = compute_dead_maps(cfg, blocks, live_map, usedefs.defmap)
# treat loop variables and size variables as live
loop_vars = {
l.start.name for l in parfor.loop_nests if isinstance(
l.start, ir.Var)}
loop_vars |= {
l.stop.name for l in parfor.loop_nests if isinstance(
l.stop, ir.Var)}
loop_vars |= {
l.step.name for l in parfor.loop_nests if isinstance(
l.step, ir.Var)}
loop_vars |= {l.index_variable.name for l in parfor.loop_nests}
# for var_list in parfor.array_analysis.array_size_vars.values():
# loop_vars |= {v.name for v in var_list if isinstance(v, ir.Var)}
dead_set = set()
for label in blocks.keys():
# only kill vars that are actually dead at the parfor's block
dead_map.internal[label] &= curr_dead_set
dead_map.internal[label] -= loop_vars
dead_set |= dead_map.internal[label]
dead_map.escaping[label] &= curr_dead_set
dead_map.escaping[label] -= loop_vars
dead_set |= dead_map.escaping[label]
# dummy class to replace func_ir. _patch_var_dels only accesses blocks
class DummyFuncIR(object):
def __init__(self, blocks):
self.blocks = blocks
post_proc = postproc.PostProcessor(DummyFuncIR(blocks))
post_proc._patch_var_dels(dead_map.internal, dead_map.escaping)
unwrap_parfor_blocks(parfor)
return dead_set | loop_vars
postproc.ir_extension_insert_dels[Parfor] = parfor_insert_dels
# reorder statements to maximize fusion
def maximize_fusion(func_ir, blocks):
call_table, _ = get_call_table(blocks)
for block in blocks.values():
order_changed = True
while order_changed:
order_changed = False
i = 0
while i < len(block.body) - 2:
stmt = block.body[i]
next_stmt = block.body[i + 1]
# swap only parfors with non-parfors
# don't reorder calls with side effects (e.g. file close)
# only read-read dependencies are OK
# make sure there is no write-write, write-read dependencies
if (isinstance(
stmt, Parfor) and not isinstance(
next_stmt, Parfor) and not isinstance(
next_stmt, ir.Print)
and (not isinstance(next_stmt, ir.Assign)
or has_no_side_effect(
next_stmt.value, set(), call_table)
or guard(is_assert_equiv, func_ir, next_stmt.value))):
stmt_accesses = {v.name for v in stmt.list_vars()}
stmt_writes = get_parfor_writes(stmt)
next_accesses = {v.name for v in next_stmt.list_vars()}
next_writes = get_stmt_writes(next_stmt)
if len((stmt_writes & next_accesses)
| (next_writes & stmt_accesses)) == 0:
block.body[i] = next_stmt
block.body[i + 1] = stmt
order_changed = True
i += 1
return
def is_assert_equiv(func_ir, expr):
func_name, mod_name = find_callname(func_ir, expr)
return func_name == 'assert_equiv'
def get_parfor_writes(parfor):
assert isinstance(parfor, Parfor)
writes = set()
blocks = parfor.loop_body.copy()
blocks[-1] = parfor.init_block
for block in blocks.values():
for stmt in block.body:
writes.update(get_stmt_writes(stmt))
if isinstance(stmt, Parfor):
writes.update(get_parfor_writes(stmt))
return writes
def try_fuse(equiv_set, parfor1, parfor2):
"""try to fuse parfors and return a fused parfor, otherwise return None
"""
dprint("try_fuse trying to fuse \n", parfor1, "\n", parfor2)
# fusion of parfors with different dimensions not supported yet
if len(parfor1.loop_nests) != len(parfor2.loop_nests):
dprint("try_fuse parfors number of dimensions mismatch")
return None
ndims = len(parfor1.loop_nests)
# all loops should be equal length
def is_equiv(x, y):
return x == y or equiv_set.is_equiv(x, y)
for i in range(ndims):
nest1 = parfor1.loop_nests[i]
nest2 = parfor2.loop_nests[i]
if not (is_equiv(nest1.start, nest2.start) and
is_equiv(nest1.stop, nest2.stop) and
is_equiv(nest1.step, nest2.step)):
dprint("try_fuse parfor dimension correlation mismatch", i)
return None
# TODO: make sure parfor1's reduction output is not used in parfor2
# only data parallel loops
if has_cross_iter_dep(parfor1) or has_cross_iter_dep(parfor2):
dprint("try_fuse parfor cross iteration dependency found")
return None
# make sure parfor2's init block isn't using any output of parfor1
parfor1_body_usedefs = compute_use_defs(parfor1.loop_body)
parfor1_body_vardefs = set()
for defs in parfor1_body_usedefs.defmap.values():
parfor1_body_vardefs |= defs
init2_uses = compute_use_defs({0: parfor2.init_block}).usemap[0]
if not parfor1_body_vardefs.isdisjoint(init2_uses):
dprint("try_fuse parfor2 init block depends on parfor1 body")
return None
return fuse_parfors_inner(parfor1, parfor2)
def fuse_parfors_inner(parfor1, parfor2):
# fuse parfor2 into parfor1
# append parfor2's init block on parfor1's
parfor1.init_block.body.extend(parfor2.init_block.body)
# append parfor2's first block to parfor1's last block
parfor2_first_label = min(parfor2.loop_body.keys())
parfor2_first_block = parfor2.loop_body[parfor2_first_label].body
parfor1_first_label = min(parfor1.loop_body.keys())
parfor1_last_label = max(parfor1.loop_body.keys())
parfor1.loop_body[parfor1_last_label].body.extend(parfor2_first_block)
# add parfor2 body blocks to parfor1's except first
parfor1.loop_body.update(parfor2.loop_body)
parfor1.loop_body.pop(parfor2_first_label)
# replace parfor2 indices with parfor1's
ndims = len(parfor1.loop_nests)
index_dict = {parfor2.index_var.name: parfor1.index_var}
for i in range(ndims):
index_dict[parfor2.loop_nests[i].index_variable.name] = parfor1.loop_nests[
i].index_variable
replace_vars(parfor1.loop_body, index_dict)
# re-order labels from min to max
blocks = wrap_parfor_blocks(parfor1, entry_label=parfor1_first_label)
blocks = rename_labels(blocks)
unwrap_parfor_blocks(parfor1, blocks)
nameset = set(x.name for x in index_dict.values())
remove_duplicate_definitions(parfor1.loop_body, nameset)
parfor1.patterns.extend(parfor2.patterns)
if config.DEBUG_ARRAY_OPT_STATS:
print('Parallel for-loop #{} is fused into for-loop #{}.'.format(
parfor2.id, parfor1.id))
return parfor1
def remove_duplicate_definitions(blocks, nameset):
"""Remove duplicated definition for variables in the given nameset, which
is often a result of parfor fusion.
"""
for label, block in blocks.items():
body = block.body
new_body = []
defined = set()
for inst in body:
if isinstance(inst, ir.Assign):
name = inst.target.name
if name in nameset:
if name in defined:
continue
defined.add(name)
new_body.append(inst)
block.body = new_body
return
def has_cross_iter_dep(parfor):
# we consevatively assume there is cross iteration dependency when
# the parfor index is used in any expression since the expression could
# be used for indexing arrays
# TODO: make it more accurate using ud-chains
indices = {l.index_variable for l in parfor.loop_nests}
for b in parfor.loop_body.values():
for stmt in b.body:
# GetItem/SetItem nodes are fine since can't have expression inside
# and only simple indices are possible
if isinstance(stmt, (ir.SetItem, ir.StaticSetItem)):
continue
# tuples are immutable so no expression on parfor possible
if isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Expr):
op = stmt.value.op
if op in ['build_tuple', 'getitem', 'static_getitem']:
continue
# other statements can have potential violations
if not indices.isdisjoint(stmt.list_vars()):
dprint("has_cross_iter_dep found", indices, stmt)
return True
return False
def dprint(*s):
if config.DEBUG_ARRAY_OPT == 1:
print(*s)
def get_parfor_pattern_vars(parfor):
""" get the variables used in parfor pattern information
"""
out = set()
# currently, only stencil pattern has variables
for pattern in parfor.patterns:
if pattern[0] == 'stencil':
left_lengths = pattern[1][0]
right_lengths = pattern[1][1]
for v in left_lengths+right_lengths:
if isinstance(v, ir.Var):
out.add(v.name)
return out
def remove_dead_parfor(parfor, lives, arg_aliases, alias_map, typemap):
""" remove dead code inside parfor including get/sets
"""
# remove dead get/sets in last block
# FIXME: I think that "in the last block" is not sufficient in general. We might need to
# remove from any block.
last_label = max(parfor.loop_body.keys())
last_block = parfor.loop_body[last_label]
# save array values set to replace getitems
saved_values = {}
new_body = []
for stmt in last_block.body:
if (isinstance(stmt, ir.SetItem) and stmt.index.name ==
parfor.index_var.name and stmt.target.name not in lives):
saved_values[stmt.target.name] = stmt.value
if isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Expr):
rhs = stmt.value
if rhs.op == 'getitem' and isinstance(rhs.index, ir.Var):
if rhs.index.name == parfor.index_var.name:
# replace getitem if value saved
stmt.value = saved_values.get(rhs.value.name, rhs)
new_body.append(stmt)
last_block.body = new_body
alias_set = set(alias_map.keys())
# after getitem replacement, remove extra setitems
new_body = []
in_lives = copy.copy(lives)
for stmt in reversed(last_block.body):
# aliases of lives are also live for setitems
alias_lives = in_lives & alias_set
for v in alias_lives:
in_lives |= alias_map[v]
if (isinstance(stmt, ir.SetItem) and stmt.index.name ==
parfor.index_var.name and stmt.target.name not in in_lives):
continue
in_lives |= {v.name for v in stmt.list_vars()}
new_body.append(stmt)
new_body.reverse()
last_block.body = new_body
# process parfor body recursively
remove_dead_parfor_recursive(
parfor, lives, arg_aliases, alias_map, typemap)
# remove parfor if empty
is_empty = len(parfor.init_block.body) == 0
for block in parfor.loop_body.values():
is_empty &= len(block.body) == 0
if is_empty:
return None
return parfor
ir_utils.remove_dead_extensions[Parfor] = remove_dead_parfor
def remove_dead_parfor_recursive(parfor, lives, arg_aliases, alias_map, typemap):
"""create a dummy function from parfor and call remove dead recursively
"""
blocks = parfor.loop_body.copy() # shallow copy is enough
first_body_block = min(blocks.keys())
assert first_body_block > 0 # we are using 0 for init block here
last_label = max(blocks.keys())
return_label = last_label + 1
loc = blocks[last_label].loc
scope = blocks[last_label].scope
blocks[return_label] = ir.Block(scope, loc)
# add dummy jump in init_block for CFG to work
blocks[0] = parfor.init_block
blocks[0].body.append(ir.Jump(first_body_block, loc))
# add lives in a dummpy return to last block to avoid their removal
tuple_var = ir.Var(scope, mk_unique_var("$tuple_var"), loc)
# dummy type for tuple_var
typemap[tuple_var.name] = types.containers.UniTuple(
types.intp, 2)
live_vars = [ir.Var(scope, v, loc) for v in lives]
tuple_call = ir.Expr.build_tuple(live_vars, loc)
blocks[return_label].body.append(ir.Assign(tuple_call, tuple_var, loc))
blocks[return_label].body.append(ir.Return(tuple_var, loc))
branch = ir.Branch(0, first_body_block, return_label, loc)
blocks[last_label].body.append(branch)
# args var including aliases is ok
remove_dead(blocks, arg_aliases, typemap, alias_map, arg_aliases)
typemap.pop(tuple_var.name) # remove dummy tuple type
blocks[0].body.pop() # remove dummy jump
blocks[last_label].body.pop() # remove branch
return
def find_potential_aliases_parfor(parfor, args, typemap, alias_map, arg_aliases):
blocks = wrap_parfor_blocks(parfor)
ir_utils.find_potential_aliases(
blocks, args, typemap, alias_map, arg_aliases)
unwrap_parfor_blocks(parfor)
return
ir_utils.alias_analysis_extensions[Parfor] = find_potential_aliases_parfor
def wrap_parfor_blocks(parfor, entry_label = None):
"""wrap parfor blocks for analysis/optimization like CFG"""
blocks = parfor.loop_body.copy() # shallow copy is enough
if entry_label == None:
entry_label = min(blocks.keys())
assert entry_label > 0 # we are using 0 for init block here
# add dummy jump in init_block for CFG to work
blocks[0] = parfor.init_block
blocks[0].body.append(ir.Jump(entry_label, blocks[0].loc))
for block in blocks.values():
if len(block.body) == 0 or (not block.body[-1].is_terminator):
block.body.append(ir.Jump(entry_label, block.loc))
return blocks
def unwrap_parfor_blocks(parfor, blocks=None):
"""
unwrap parfor blocks after analysis/optimization.
Allows changes to the parfor loop.
"""
if blocks is not None:
# make sure init block isn't removed
init_block_label = min(blocks.keys())
# update loop body blocks
blocks.pop(init_block_label)
parfor.loop_body = blocks
# make sure dummy jump to loop body isn't altered
first_body_label = min(parfor.loop_body.keys())
assert isinstance(parfor.init_block.body[-1], ir.Jump)
# remove dummy jump to loop body
parfor.init_block.body.pop()
# make sure dummy jump back to loop body isn't altered
for block in parfor.loop_body.values():
if (isinstance(block.body[-1], ir.Jump) and
block.body[-1].target == first_body_label):
# remove dummy jump back to loop
block.body.pop()
return
def get_copies_parfor(parfor, typemap):
"""find copies generated/killed by parfor"""
blocks = wrap_parfor_blocks(parfor)
in_copies_parfor, out_copies_parfor = copy_propagate(blocks, typemap)
in_gen_copies, in_extra_kill = get_block_copies(blocks, typemap)
unwrap_parfor_blocks(parfor)
# parfor's extra kill is kills of its init block,
# and all possible gens and kills of it's body loop.
# body doesn't gen and only kills since it may or may not run
# TODO: save copies that are repeated in parfor
kill_set = in_extra_kill[0]
for label in parfor.loop_body.keys():
kill_set |= {l for l, r in in_gen_copies[label]}
kill_set |= in_extra_kill[label]
# gen copies is copies generated by init that are not killed by body
last_label = max(parfor.loop_body.keys())
gens = out_copies_parfor[last_label] & in_gen_copies[0]
if config.DEBUG_ARRAY_OPT == 1:
print("copy propagate parfor gens:", gens, "kill_set", kill_set)
return gens, kill_set
ir_utils.copy_propagate_extensions[Parfor] = get_copies_parfor
def apply_copies_parfor(parfor, var_dict, name_var_table, ext_func, ext_data,
typemap, calltypes, save_copies):
"""apply copy propagate recursively in parfor"""
for i, pattern in enumerate(parfor.patterns):
if pattern[0] == 'stencil':
parfor.patterns[i] = ('stencil',
replace_vars_inner(pattern[1], var_dict))
blocks = wrap_parfor_blocks(parfor)
# add dummy assigns for each copy
assign_list = []
for lhs_name, rhs in var_dict.items():
assign_list.append(ir.Assign(rhs, name_var_table[lhs_name],
ir.Loc("dummy", -1)))
blocks[0].body = assign_list + blocks[0].body
in_copies_parfor, out_copies_parfor = copy_propagate(blocks, typemap)
apply_copy_propagate(blocks, in_copies_parfor, name_var_table, typemap,
calltypes, ext_func, ext_data, save_copies)
unwrap_parfor_blocks(parfor)
# remove dummy assignments
blocks[0].body = blocks[0].body[len(assign_list):]
return
ir_utils.apply_copy_propagate_extensions[Parfor] = apply_copies_parfor
def push_call_vars(blocks, saved_globals, saved_getattrs):
"""push call variables to right before their call site.
assuming one global/getattr is created for each call site and control flow
doesn't change it.
"""
for block in blocks.values():
new_body = []
# global/attr variables that are defined in this block already,
# no need to reassign them
block_defs = set()
for stmt in block.body:
if isinstance(stmt, ir.Assign):
rhs = stmt.value
lhs = stmt.target
if (isinstance(rhs, ir.Global)):
# and isinstance(rhs.value, pytypes.ModuleType)):
saved_globals[lhs.name] = stmt
block_defs.add(lhs.name)
# continue
elif isinstance(rhs, ir.Expr) and rhs.op == 'getattr':
if (rhs.value.name in saved_globals
or rhs.value.name in saved_getattrs):
saved_getattrs[lhs.name] = stmt
block_defs.add(lhs.name)
# continue
elif isinstance(stmt, Parfor):
pblocks = stmt.loop_body.copy()
pblocks[-1] = stmt.init_block
push_call_vars(pblocks, saved_globals, saved_getattrs)
new_body.append(stmt)
continue
for v in stmt.list_vars():
new_body += _get_saved_call_nodes(v.name, saved_globals,
saved_getattrs, block_defs)
new_body.append(stmt)
block.body = new_body
return
def _get_saved_call_nodes(fname, saved_globals, saved_getattrs, block_defs):
nodes = []
while (fname not in block_defs and (fname in saved_globals
or fname in saved_getattrs)):
if fname in saved_globals:
nodes.append(saved_globals[fname])
block_defs.add(saved_globals[fname].target.name)
fname = '_PA_DONE'
elif fname in saved_getattrs:
up_name = saved_getattrs[fname].value.value.name
nodes.append(saved_getattrs[fname])
block_defs.add(saved_getattrs[fname].target.name)
fname = up_name
nodes.reverse()
return nodes
def repr_arrayexpr(arrayexpr):
"""Extract operators from arrayexpr to represent it abstractly as a string.
"""
if isinstance(arrayexpr, tuple):
opr = arrayexpr[0]
args = arrayexpr[1]
if len(args) == 1:
return '({}{})'.format(opr, repr_arrayexpr(args[0]))
else:
return '({})'.format(opr.join([ repr_arrayexpr(x) for x in args ]))
else:
return '_'
def fix_generator_types(generator_info, return_type, typemap):
"""postproc updates generator_info with live variables after transformations
but generator variables have types in return_type that are updated here.
"""
new_state_types = []
for v in generator_info.state_vars:
new_state_types.append(typemap[v])
return_type.state_types = tuple(new_state_types)
return
def get_parfor_call_table(parfor, call_table=None, reverse_call_table=None):
if call_table is None:
call_table = {}
if reverse_call_table is None:
reverse_call_table = {}
blocks = wrap_parfor_blocks(parfor)
call_table, reverse_call_table = get_call_table(blocks, call_table,
reverse_call_table)
unwrap_parfor_blocks(parfor)
return call_table, reverse_call_table
ir_utils.call_table_extensions[Parfor] = get_parfor_call_table
def get_parfor_tuple_table(parfor, tuple_table=None):
if tuple_table is None:
tuple_table = {}
blocks = wrap_parfor_blocks(parfor)
tuple_table = ir_utils.get_tuple_table(blocks, tuple_table)
unwrap_parfor_blocks(parfor)
return tuple_table
ir_utils.tuple_table_extensions[Parfor] = get_parfor_tuple_table
def get_parfor_array_accesses(parfor, accesses=None):
if accesses is None:
accesses = {}
blocks = wrap_parfor_blocks(parfor)
accesses = ir_utils.get_array_accesses(blocks, accesses)
unwrap_parfor_blocks(parfor)
return accesses
# parfor handler is same as
ir_utils.array_accesses_extensions[Parfor] = get_parfor_array_accesses
def parfor_add_offset_to_labels(parfor, offset):
blocks = wrap_parfor_blocks(parfor)
blocks = add_offset_to_labels(blocks, offset)
blocks[0] = blocks[offset]
blocks.pop(offset)
unwrap_parfor_blocks(parfor, blocks)
return
ir_utils.add_offset_to_labels_extensions[Parfor] = parfor_add_offset_to_labels
def parfor_find_max_label(parfor):
blocks = wrap_parfor_blocks(parfor)
max_label = ir_utils.find_max_label(blocks)
unwrap_parfor_blocks(parfor)
return max_label
ir_utils.find_max_label_extensions[Parfor] = parfor_find_max_label
def parfor_typeinfer(parfor, typeinferer):
save_blocks = typeinferer.blocks
blocks = wrap_parfor_blocks(parfor)
index_vars = [l.index_variable for l in parfor.loop_nests]
# no need to handle parfor.index_var (tuple of variables), since it will be
# assigned to a tuple from individual indices
first_block = min(blocks.keys())
loc = blocks[first_block].loc
index_assigns = [ir.Assign(ir.Const(1, loc), v, loc) for v in index_vars]
save_first_block_body = blocks[first_block].body
blocks[first_block].body = index_assigns + blocks[first_block].body
typeinferer.blocks = blocks
typeinferer.build_constraint()
typeinferer.blocks = save_blocks
blocks[first_block].body = save_first_block_body
unwrap_parfor_blocks(parfor)
typeinfer.typeinfer_extensions[Parfor] = parfor_typeinfer
@infer_global(reduce)
class ReduceInfer(AbstractTemplate):
def generic(self, args, kws):
assert not kws
assert len(args) == 3
assert isinstance(args[1], types.Array)
return signature(args[1].dtype, *args)
| 41.036473 | 107 | 0.602123 | 13,142 | 102,386 | 4.450616 | 0.064602 | 0.016003 | 0.008207 | 0.009301 | 0.393178 | 0.320038 | 0.258779 | 0.211814 | 0.187605 | 0.167995 | 0 | 0.005807 | 0.310433 | 102,386 | 2,494 | 108 | 41.052927 | 0.82264 | 0.143887 | 0 | 0.299408 | 0 | 0 | 0.027527 | 0.000864 | 0 | 0 | 0 | 0.002807 | 0.011309 | 1 | 0.048465 | false | 0.002693 | 0.012924 | 0.00377 | 0.117394 | 0.018309 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
57ab837cea53dac3757c6a0dbc81ebecae55baea | 3,031 | py | Python | bw2io/strategies/ecospold1_allocation.py | pjamesjoyce/brightway2-io | 142fc26e2ffc47d8ec474386ee93ab2737a089ce | [
"BSD-3-Clause"
] | null | null | null | bw2io/strategies/ecospold1_allocation.py | pjamesjoyce/brightway2-io | 142fc26e2ffc47d8ec474386ee93ab2737a089ce | [
"BSD-3-Clause"
] | null | null | null | bw2io/strategies/ecospold1_allocation.py | pjamesjoyce/brightway2-io | 142fc26e2ffc47d8ec474386ee93ab2737a089ce | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals, division
from eight import *
import copy
def delete_integer_codes(data):
"""Delete integer codes completely from extracted ecospold1 datasets"""
for ds in data:
if 'code' in ds and isinstance(ds['code'], int):
del ds['code']
for exc in ds.get('exchanges', []):
if 'code' in exc and isinstance(exc['code'], int):
del exc['code']
return data
def clean_integer_codes(data):
"""Convert integer activity codes to strings and delete integer codes from exchanges (they can't be believed)."""
for ds in data:
if 'code' in ds and isinstance(ds['code'], int):
ds['code'] = str(ds['code'])
for exc in ds.get('exchanges', []):
if 'code' in exc and isinstance(exc['code'], int):
del exc['code']
return data
def es1_allocate_multioutput(data):
"""This strategy allocates multioutput datasets to new datasets.
This deletes the multioutput dataset, breaking any existing linking. This shouldn't be a concern, as you shouldn't link to a multioutput dataset in any case.
Note that multiple allocations for the same product and input will result in undefined behavior.
"""
activities = []
for ds in data:
if ds.get('allocations'):
for activity in allocate_exchanges(ds):
del activity['allocations']
activities.append(activity)
else:
activities.append(ds)
return activities
def allocate_exchanges(ds):
"""
Take a dataset, which has multiple outputs, and return a list of allocated datasets.
The allocation data structure looks like:
.. code-block:: python
{
'exchanges': [integer codes for biosphere flows, ...],
'fraction': out of 100,
'reference': integer codes
}
We assume that the allocation factor for each coproduct is always 100 percent.
"""
new_datasets = []
coproducts = [exc for exc in ds["exchanges"]
if exc['type'] == 'production']
multipliers = {}
for obj in ds['allocations']:
if not obj['fraction']:
continue
for exc_id in obj['exchanges']:
multipliers.setdefault(obj['reference'], {})[exc_id] = \
obj['fraction'] / 100
exchange_dict = {exc['code']: exc for exc in ds['exchanges']
if exc['type'] != 'production'}
for coproduct in coproducts:
new_ds = copy.deepcopy(ds)
new_ds['exchanges'] = [
rescale_exchange(exchange_dict[exc_id], scale)
for exc_id, scale
in list(multipliers[coproduct['code']].items())
# Exclude self-allocation; assume 100%
if exc_id != coproduct['code']
] + [coproduct]
new_datasets.append(new_ds)
return new_datasets
def rescale_exchange(exc, scale):
exc = copy.deepcopy(exc)
exc['amount'] *= scale
return exc
| 31.905263 | 161 | 0.612669 | 375 | 3,031 | 4.874667 | 0.336 | 0.015317 | 0.017505 | 0.021882 | 0.192013 | 0.184902 | 0.184902 | 0.184902 | 0.184902 | 0.184902 | 0 | 0.006859 | 0.278456 | 3,031 | 94 | 162 | 32.244681 | 0.828989 | 0.307819 | 0 | 0.232143 | 0 | 0 | 0.101892 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.089286 | false | 0 | 0.053571 | 0 | 0.232143 | 0.017857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
57ace447434057e0e10b6fb061f135512a29a019 | 905 | py | Python | feedjack/migrations/0002_auto_20150203_0401.py | allo-/feedjack | 8112474504b7265d6722b74b89171df0e6dc7688 | [
"BSD-3-Clause"
] | 2 | 2017-12-19T17:11:04.000Z | 2020-08-19T21:15:51.000Z | feedjack/migrations/0002_auto_20150203_0401.py | allo-/feedjack | 8112474504b7265d6722b74b89171df0e6dc7688 | [
"BSD-3-Clause"
] | 32 | 2016-03-12T13:57:28.000Z | 2017-03-02T11:11:59.000Z | feedjack/migrations/0002_auto_20150203_0401.py | allo-/feedjack | 8112474504b7265d6722b74b89171df0e6dc7688 | [
"BSD-3-Clause"
] | 2 | 2018-04-06T11:55:47.000Z | 2020-01-12T00:22:04.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('feedjack', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='feed',
name='verify_tls_certs',
field=models.BooleanField(default=True, help_text='If https connections are used, this option allows to disable TLS certificate veritication.', verbose_name='verify TLS certificates, if any'),
preserve_default=True,
),
migrations.AlterField(
model_name='feed',
name='skip_errors',
field=models.BooleanField(default=False, help_text='Try to be as tolerant to the feed contents as possible during update.', verbose_name='skip non-critical errors'),
preserve_default=True,
),
]
| 33.518519 | 204 | 0.647514 | 99 | 905 | 5.747475 | 0.636364 | 0.057996 | 0.045694 | 0.059754 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007396 | 0.253039 | 905 | 26 | 205 | 34.807692 | 0.83432 | 0.023204 | 0 | 0.3 | 0 | 0 | 0.304989 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
57af28483f535ce3253b1114cbcfca76a66a1188 | 7,220 | py | Python | rbb_server/src/rbb_server/controllers/extraction_controller.py | SK4P3/rbb_core | 618617270314af5335de30179072244e1f440c4c | [
"MIT"
] | 55 | 2019-05-09T06:43:05.000Z | 2021-12-08T05:56:43.000Z | rbb_server/src/rbb_server/controllers/extraction_controller.py | SK4P3/rbb_core | 618617270314af5335de30179072244e1f440c4c | [
"MIT"
] | 5 | 2019-09-08T15:33:28.000Z | 2021-04-17T17:30:53.000Z | rbb_server/src/rbb_server/controllers/extraction_controller.py | SK4P3/rbb_core | 618617270314af5335de30179072244e1f440c4c | [
"MIT"
] | 16 | 2019-08-08T07:15:35.000Z | 2021-12-07T15:34:41.000Z | # AMZ-Driverless
# Copyright (c) 2019 Authors:
# - Huub Hendrikx <hhendrik@ethz.ch>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import connexion
from rbb_server.helper.permissions import Permissions
from rbb_swagger_server.models.bag_extraction_configuration import BagExtractionConfiguration
from rbb_swagger_server.models.error import Error
from rbb_server.model.database import Database, RosbagExtractionConfiguration, RosbagStore
from sqlalchemy.orm.query import Query
import logging
import rbb_server.helper.auth as auth
from flask import request as flask_request
from sqlalchemy.orm.attributes import flag_modified
from rbb_server.helper.error import handle_exception
@auth.requires_auth_with_permission(Permissions.ExtractionConfigRead)
def get_extraction_config(config_name, user=None):
"""Get configuration details
:param config_name: Name of the configuration
:type config_name: str
:rtype: BagExtractionConfiguration
"""
try:
q = Database.get_session().query(RosbagExtractionConfiguration).\
filter(RosbagExtractionConfiguration.name == config_name) #type: Query
if q.count():
return q[0].to_swagger_model(user=user)
else:
return Error(code=404, message="Configuration not found"), 404
except Exception as e:
return handle_exception(e)
@auth.requires_auth_with_permission(Permissions.ExtractionConfigRead)
def list_extraction_configurations(user=None):
"""List available configurations
:rtype: List[BagExtractionConfiguration]
"""
try:
q = Database.get_session().query(RosbagExtractionConfiguration)
return [p.to_swagger_model(user=user) for p in q]
except Exception as e:
return handle_exception(e)
@auth.requires_auth_with_permission(Permissions.ExtractionConfigRead)
def get_store_extraction_configs(store_name, user=None):
"""Get list of auto extraction configs
:param store_name: Name of the store
:type store_name: str
:rtype: List[BagExtractionConfiguration]
"""
try:
q = Database.get_session().query(RosbagStore).filter(RosbagStore.name == store_name) #type: Query
if q.count():
return [x.to_swagger_model(user=user) for x in q.first().auto_extraction_configs]
else:
return Error(code=404, message="Store not found"), 404
except Exception as e:
return handle_exception(e)
@auth.requires_auth_with_permission(Permissions.BagStoreWrite)
def put_store_extraction_configs(store_name, config_list, user=None):
"""Create/update store
:param store_name: Name of the store
:type store_name: str
:param store: List of config names
:type store: List[]
:rtype: List[str]
"""
session = Database.get_session()
try:
q = session.query(RosbagStore).filter(RosbagStore.name == store_name) #type: Query
if q.first():
store = q.first()
# Load configurations
config_dict = {}
for config in config_list:
if config not in config_dict:
q = session.query(RosbagExtractionConfiguration)\
.filter(RosbagExtractionConfiguration.name == config)
if not q.first():
return Error(code=404, message="Configuration '%s' not found" % config), 404
config_dict[config] = q.first()
# Assign new configurations
store.auto_extraction_configs = list(config_dict.values())
session.commit()
return [x.name for x in store.auto_extraction_configs]
else:
return Error(code=404, message="Store not found"), 404
except Exception as e:
return handle_exception(e)
@auth.requires_auth_with_permission(Permissions.ExtractionConfigWrite)
def put_extraction_configuration(config_name, configuration_obj, block_on_existing=None, user=None):
"""Create/update configuration
:param config_name: Name of the configuration
:type config_name: str
:param configuration: Configuration information
:type configuration: dict | bytes
:rtype: BagExtractionConfiguration
"""
if connexion.request.is_json:
configuration_obj = BagExtractionConfiguration.from_dict(connexion.request.get_json())
if config_name != configuration_obj.name:
return Error(code=400, message="URL and body names don't match"), 400
session = Database.get_session()
try:
# Check the store
q = session.query(RosbagExtractionConfiguration).filter(RosbagExtractionConfiguration.name == config_name) # type: Query
# Create new store or use existing
model = None
if q.count() == 1:
# Existing configuration
if block_on_existing:
return Error(code=1000, message="Already exists."), 400
model = q.first()
else:
model = RosbagExtractionConfiguration()
session.add(model)
model.from_swagger_model(configuration_obj, user=user)
session.commit()
q = session.query(RosbagExtractionConfiguration).filter(RosbagExtractionConfiguration.uid == model.uid)
return q.first().to_swagger_model(user=user), 200
except Exception as e:
session.rollback()
return handle_exception(e)
@auth.requires_auth_with_permission(Permissions.ExtractionConfigWrite)
def delete_extraction_configuration(config_name, user=None): # noqa: E501
"""Delete extraction configuration
# noqa: E501
:param config_name: Name of the configuration
:type config_name: str
:rtype: None
"""
session = Database.get_session()
try:
# Check the store
q = session.query(RosbagExtractionConfiguration).filter(RosbagExtractionConfiguration.name == config_name) # type: Query
if q.count() == 1:
session.delete(q.first())
session.commit()
return "", 204
else:
return Error(code=404, message="Extraction configuration not found"), 404
except Exception as e:
session.rollback()
return handle_exception(e) | 35.920398 | 129 | 0.699861 | 859 | 7,220 | 5.753201 | 0.23865 | 0.026305 | 0.021246 | 0.024282 | 0.465601 | 0.427964 | 0.382841 | 0.379401 | 0.340146 | 0.303318 | 0 | 0.011007 | 0.219806 | 7,220 | 201 | 130 | 35.920398 | 0.866323 | 0.297507 | 0 | 0.444444 | 0 | 0 | 0.03268 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060606 | false | 0 | 0.111111 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
57b250dd2824cb8b48dd72a2f5d4ad4c82d8ae6c | 4,609 | py | Python | mmdet3d/models/dense_heads/layout_head.py | chetanmreddy/imvoxelnet | 10dd35a96539af7b147be4bb03b0395cc164177e | [
"MIT"
] | 136 | 2021-06-03T06:37:56.000Z | 2022-03-29T13:29:03.000Z | mmdet3d/models/dense_heads/layout_head.py | chetanmreddy/imvoxelnet | 10dd35a96539af7b147be4bb03b0395cc164177e | [
"MIT"
] | 38 | 2021-06-05T12:41:30.000Z | 2022-03-23T07:31:28.000Z | mmdet3d/models/dense_heads/layout_head.py | chetanmreddy/imvoxelnet | 10dd35a96539af7b147be4bb03b0395cc164177e | [
"MIT"
] | 14 | 2021-06-04T11:37:32.000Z | 2022-02-10T03:39:20.000Z | import torch
from torch import nn
from mmdet.models.builder import HEADS, build_loss
from mmdet3d.core.bbox.structures import limit_period
@HEADS.register_module()
class LayoutHead(nn.Module):
def __init__(self,
n_channels,
linear_size,
dropout,
loss_angle=dict(type='SmoothL1Loss', loss_weight=1.),
loss_layout=dict(type='IoU3DLoss', loss_weight=.1)):
super().__init__()
self.angle_mlp = torch.nn.Sequential(
nn.Linear(n_channels, linear_size),
nn.ReLU(inplace=True),
nn.Dropout(p=dropout),
nn.Linear(linear_size, linear_size),
nn.ReLU(inplace=True),
nn.Dropout(p=dropout),
nn.Linear(linear_size, 2)
)
self.layout_mlp = torch.nn.Sequential(
nn.Linear(n_channels, linear_size),
nn.ReLU(inplace=True),
nn.Dropout(p=dropout),
nn.Linear(linear_size, linear_size),
nn.ReLU(inplace=True),
nn.Dropout(p=dropout),
nn.Linear(linear_size, 7)
)
self.loss_angle = build_loss(loss_angle)
self.loss_layout = build_loss(loss_layout)
def init_weights(self):
pass
def forward(self, x, img_metas):
x = x.mean(dim=(2, 3))
angle_features = self.angle_mlp(x)
layout_features = self.layout_mlp(x)
angles, layouts = [], []
for angle_feature, layout_feature, img_meta in zip(angle_features, layout_features, img_metas):
angle, layout = self._forward_single(angle_feature, layout_feature, img_meta)
angles.append(angle)
layouts.append(layout)
return angles, layouts
def _forward_single(self, angle, layout, img_meta):
angle = limit_period(angle)
size = torch.exp(layout[3:6])
# device = layout.device
# center_2d = torch.sigmoid(layout[:2])
# center_z = torch.exp(layout[2])
# intrinsic = torch.tensor(img_meta['lidar2img']['intrinsic'])
# extrinsic = torch.tensor(img_meta['lidar2img']['extrinsic'][0])
# projection = torch.inverse(intrinsic @ extrinsic)[:3, :3].to(device)
# width = torch.tensor(img_meta['ori_shape'][1]).to(device)
# height = torch.tensor(img_meta['ori_shape'][0]).to(device)
# center_2d_3 = center_2d.new_tensor((
# center_2d[0] * width * center_z,
# center_2d[1] * height * center_z,
# center_z
# ))
# center_3d = projection @ center_2d_3
layout = torch.cat((
layout[:3],
size,
layout[6:7]
))
return angle, layout
def loss(self, angles, layouts, img_metas):
angle_losses, layout_losses = [], []
for angle, layout, img_meta in zip(angles, layouts, img_metas):
angle_loss, layout_loss = self._loss_single(angle, layout, img_meta)
angle_losses.append(angle_loss)
layout_losses.append(layout_loss)
return {
'angle_loss': torch.mean(torch.stack(angle_losses)),
'layout_loss': torch.mean(torch.stack(layout_losses))
}
def _loss_single(self, angles, layout, img_meta):
gt_angles = angles.new_tensor(img_meta['lidar2img']['angles'])
pitch_loss = self.loss_angle(
torch.sin(angles[0]) * torch.cos(gt_angles[0]),
torch.cos(angles[0]) * torch.sin(gt_angles[0])
)
roll_loss = self.loss_angle(
torch.sin(angles[1]) * torch.cos(gt_angles[1]),
torch.cos(angles[1]) * torch.sin(gt_angles[1])
)
angle_loss = pitch_loss + roll_loss
gt_layout = img_meta['lidar2img']['layout']
gt_layout = torch.cat((
gt_layout.gravity_center,
gt_layout.tensor[:, 3:]
), dim=-1).to(layout.device)
layout_loss = self.loss_layout(layout.unsqueeze(0), gt_layout)
return angle_loss, layout_loss
def get_bboxes(self, angles, layouts, img_metas):
result_angles, result_layouts = [], []
for angle, layout, img_meta in zip(angles, layouts, img_metas):
result_angle, result_layout = self._get_bboxes_single(angle, layout, img_meta)
result_angles.append(result_angle.cpu())
result_layouts.append(result_layout.to('cpu'))
return result_angles, result_layouts
def _get_bboxes_single(self, angle, layout, img_meta):
return angle, img_meta['box_type_3d'](layout.unsqueeze(0), origin=(.5, .5, .5))
| 39.393162 | 103 | 0.601866 | 572 | 4,609 | 4.592657 | 0.18007 | 0.042634 | 0.039589 | 0.041112 | 0.334983 | 0.247431 | 0.181957 | 0.158356 | 0.158356 | 0.158356 | 0 | 0.015588 | 0.276199 | 4,609 | 116 | 104 | 39.732759 | 0.771882 | 0.123237 | 0 | 0.153846 | 0 | 0 | 0.021366 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.087912 | false | 0.010989 | 0.043956 | 0.010989 | 0.208791 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
57b2b427780ac8d2ac556399a9a89014d95095f3 | 5,381 | py | Python | src/pretix/plugins/badges/exporters.py | bsod85/pretix | d86b3a217352f7ad24008685393f9af18fcf6e6c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/pretix/plugins/badges/exporters.py | bsod85/pretix | d86b3a217352f7ad24008685393f9af18fcf6e6c | [
"ECL-2.0",
"Apache-2.0"
] | 7 | 2019-07-08T10:29:54.000Z | 2020-01-08T17:32:07.000Z | src/pretix/plugins/badges/exporters.py | bsod85/pretix | d86b3a217352f7ad24008685393f9af18fcf6e6c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import json
from collections import OrderedDict
from io import BytesIO
from typing import Tuple
from django import forms
from django.conf import settings
from django.contrib.staticfiles import finders
from django.core.files import File
from django.core.files.base import ContentFile
from django.core.files.storage import default_storage
from django.db.models import Exists, OuterRef
from django.db.models.functions import Coalesce
from django.utils.translation import ugettext as _
from jsonfallback.functions import JSONExtract
from PyPDF2 import PdfFileMerger
from reportlab.lib import pagesizes
from reportlab.pdfgen import canvas
from pretix.base.exporter import BaseExporter
from pretix.base.i18n import language
from pretix.base.models import Order, OrderPosition
from pretix.base.pdf import Renderer
from pretix.base.services.orders import OrderError
from pretix.base.settings import PERSON_NAME_SCHEMES
from pretix.plugins.badges.models import BadgeItem, BadgeLayout
def _renderer(event, layout):
if layout is None:
return None
if isinstance(layout.background, File) and layout.background.name:
bgf = default_storage.open(layout.background.name, "rb")
else:
bgf = open(finders.find('pretixplugins/badges/badge_default_a6l.pdf'), "rb")
return Renderer(event, json.loads(layout.layout), bgf)
def render_pdf(event, positions):
Renderer._register_fonts()
renderermap = {
bi.item_id: _renderer(event, bi.layout)
for bi in BadgeItem.objects.select_related('layout').filter(item__event=event)
}
try:
default_renderer = _renderer(event, event.badge_layouts.get(default=True))
except BadgeLayout.DoesNotExist:
default_renderer = None
merger = PdfFileMerger()
any = False
for op in positions:
r = renderermap.get(op.item_id, default_renderer)
if not r:
continue
any = True
with language(op.order.locale):
buffer = BytesIO()
p = canvas.Canvas(buffer, pagesize=pagesizes.A4)
r.draw_page(p, op.order, op)
p.save()
outbuffer = r.render_background(buffer, 'Badge')
merger.append(ContentFile(outbuffer.read()))
outbuffer = BytesIO()
merger.write(outbuffer)
merger.close()
outbuffer.seek(0)
if not any:
raise OrderError(_("None of the selected products is configured to print badges."))
return outbuffer
class BadgeExporter(BaseExporter):
identifier = "badges"
verbose_name = _("Attendee badges")
@property
def export_form_fields(self):
name_scheme = PERSON_NAME_SCHEMES[self.event.settings.name_scheme]
d = OrderedDict(
[
('items',
forms.ModelMultipleChoiceField(
queryset=self.event.items.annotate(
no_badging=Exists(BadgeItem.objects.filter(item=OuterRef('pk'), layout__isnull=True))
).exclude(no_badging=True),
label=_('Limit to products'),
widget=forms.CheckboxSelectMultiple(
attrs={'class': 'scrolling-multiple-choice'}
),
initial=self.event.items.filter(admission=True)
)),
('include_pending',
forms.BooleanField(
label=_('Include pending orders'),
required=False
)),
('order_by',
forms.ChoiceField(
label=_('Sort by'),
choices=[
('name', _('Attendee name')),
('code', _('Order code')),
] + ([
('name:{}'.format(k), _('Attendee name: {part}').format(part=label))
for k, label, w in name_scheme['fields']
] if settings.JSON_FIELD_AVAILABLE and len(name_scheme['fields']) > 1 else []),
)),
]
)
return d
def render(self, form_data: dict) -> Tuple[str, str, str]:
qs = OrderPosition.objects.filter(
order__event=self.event, item_id__in=form_data['items']
).prefetch_related(
'answers', 'answers__question'
).select_related('order', 'item', 'variation', 'addon_to')
if form_data.get('include_pending'):
qs = qs.filter(order__status__in=[Order.STATUS_PAID, Order.STATUS_PENDING])
else:
qs = qs.filter(order__status__in=[Order.STATUS_PAID])
if form_data.get('order_by') == 'name':
qs = qs.order_by('attendee_name_cached', 'order__code')
elif form_data.get('order_by') == 'code':
qs = qs.order_by('order__code')
elif form_data.get('order_by', '').startswith('name:'):
part = form_data['order_by'][5:]
qs = qs.annotate(
resolved_name=Coalesce('attendee_name_parts', 'addon_to__attendee_name_parts', 'order__invoice_address__name_parts')
).annotate(
resolved_name_part=JSONExtract('resolved_name', part)
).order_by(
'resolved_name_part'
)
outbuffer = render_pdf(self.event, qs)
return 'badges.pdf', 'application/pdf', outbuffer.read()
| 37.368056 | 132 | 0.61457 | 590 | 5,381 | 5.416949 | 0.337288 | 0.02816 | 0.026283 | 0.017835 | 0.048811 | 0.043179 | 0.043179 | 0.043179 | 0.02378 | 0 | 0 | 0.002071 | 0.28229 | 5,381 | 143 | 133 | 37.629371 | 0.825479 | 0 | 0 | 0.039683 | 0 | 0 | 0.113362 | 0.024159 | 0 | 0 | 0 | 0 | 0 | 1 | 0.031746 | false | 0 | 0.190476 | 0 | 0.285714 | 0.007937 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |