hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6486381637823d2233cc5e2e755dff8914311f6f | 5,841 | py | Python | third-party/rlimit/scripts/codegen.py | capyloon/api-daemon | ab4e4b60aa9bb617734c64655c0b8940fff098bc | [
"Apache-2.0"
] | 4 | 2022-02-08T05:32:15.000Z | 2022-03-29T22:35:33.000Z | third-party/rlimit/scripts/codegen.py | capyloon/api-daemon | ab4e4b60aa9bb617734c64655c0b8940fff098bc | [
"Apache-2.0"
] | null | null | null | third-party/rlimit/scripts/codegen.py | capyloon/api-daemon | ab4e4b60aa9bb617734c64655c0b8940fff098bc | [
"Apache-2.0"
] | 1 | 2022-03-20T11:14:51.000Z | 2022-03-20T11:14:51.000Z | from typing import Optional, Dict, List
import json
import os
import re
from pprint import pprint
LIBC_REPO_PATH = os.getenv("LIBC_REPO_PATH", "libc")
PREDICATES = {
"fuchsia/mod.rs": {"os": ["fuchsia"]},
"unix/bsd/apple/mod.rs": {"os": ["macos", "ios"]},
"unix/bsd/freebsdlike/dragonfly/mod.rs": {"os": ["dragonfly"]},
"unix/bsd/freebsdlike/freebsd/mod.rs": {"os": ["freebsd"]},
"unix/bsd/freebsdlike/mod.rs": {"os": ["freebsd", "dragonfly"]},
"unix/bsd/netbsdlike/mod.rs": {"os": ["openbsd", "netbsd"]},
"unix/bsd/netbsdlike/netbsd/mod.rs": {"os": ["netbsd"]},
"unix/haiku/mod.rs": {"os": ["haiku"]},
"unix/linux_like/android/mod.rs": {"os": ["android"]},
"unix/linux_like/emscripten/mod.rs": {"os": ["emscripten"]},
"unix/linux_like/linux/arch/generic/mod.rs": {"os": ["linux"]},
"unix/linux_like/linux/arch/mips/mod.rs": {"os": ["linux"], "arch": ["mips", "mips64"]},
"unix/linux_like/linux/arch/powerpc/mod.rs": {"os": ["linux"], "arch": ["powerpc", "powerpc64"]},
"unix/linux_like/linux/arch/sparc/mod.rs": {"os": ["linux"], "arch": ["sparc", "sparc64"]},
"unix/solarish/mod.rs": {"os": ["solaris", "illumos"]},
"unix/linux_like/linux/gnu/mod.rs": {"os": ["linux"], "env": ["gnu"]},
"unix/linux_like/linux/musl/mod.rs": {"os": ["linux"], "env": ["musl"]},
"unix/linux_like/linux/uclibc/mod.rs": {"os": ["linux"], "env": ["uclibc"]},
"unix/linux_like/android/b32/mod.rs": {"os": ["android"], "pointer_width": ["32"]},
"unix/linux_like/android/b64/mod.rs": {"os": ["android"], "pointer_width": ["64"]},
"unix/linux_like/linux/mod.rs": {"os": ["linux"]},
"unix/mod.rs": {"family": ["unix"]},
"vxworks/mod.rs": {"os": ["vxworks"]},
"unix/bsd/mod.rs": {"os": ["macos", "ios", "watchos", "freebsd", "dragonfly", "openbsd", "netbsd"]},
"unix/hermit/mod.rs": {"os": ["hermit"]},
"unix/newlib/mod.rs": {"env": ["newlib"]},
"unix/redox/mod.rs": {"os": ["redox"]},
}
def extract_paths(rg_lines: List[str]) -> List[str]:
paths = set()
for line in rg_lines:
item = json.loads(line)
if item["type"] == "match":
file_path = item["data"]["path"]["text"]
rel_file_path = re.match(".+src/(.+)", file_path).group(1) # type: ignore
paths.add(rel_file_path)
return sorted(paths)
def search(prefix: str, ident: str) -> List[Dict[str, List[str]]]:
pipe = os.popen(f"rg --json 'pub {prefix} {ident}' {LIBC_REPO_PATH}")
lines = [l for l in pipe.read().split("\n") if l != ""]
cfgs = [PREDICATES[path] for path in extract_paths(lines)]
return cfgs
def emit_predicate(kind: str, cond: List[str]) -> str:
if len(cond) == 1:
return f'{kind} = "{cond[0]}"'
else:
return "any(" + ", ".join(f'{kind} = "{c}"' for c in cond) + ")"
def emit_cfg(cfgs: List[Dict[str, List[str]]], indent: int) -> str:
predicates = []
for cfg in cfgs:
ps = []
for kind in ["os", "arch", "env", "pointer_width", "family"]:
if kind in cfg:
ps.append(emit_predicate(f"target_{kind}", cfg[kind]))
if len(ps) == 1:
predicates.append(ps[0])
else:
predicates.append("all(" + ", ".join(ps) + ")")
ans = "any(\n"
for p in predicates:
ans += " " * (indent + 1) + p + ",\n"
ans += " " * indent + ")"
return ans
if __name__ == "__main__":
resources = [
"RLIMIT_AS",
"RLIMIT_CORE",
"RLIMIT_CPU",
"RLIMIT_DATA",
"RLIMIT_FSIZE",
"RLIMIT_KQUEUES",
"RLIMIT_LOCKS",
"RLIMIT_MEMLOCK",
"RLIMIT_MSGQUEUE",
"RLIMIT_NICE",
"RLIMIT_NOFILE",
"RLIMIT_NOVMON",
"RLIMIT_NPROC",
"RLIMIT_NPTS",
"RLIMIT_NTHR",
"RLIMIT_POSIXLOCKS",
"RLIMIT_RSS",
"RLIMIT_RTPRIO",
"RLIMIT_RTTIME",
"RLIMIT_SBSIZE",
"RLIMIT_SIGPENDING",
"RLIMIT_STACK",
"RLIMIT_SWAP",
"RLIMIT_UMTXP",
"RLIMIT_VMEM",
]
print(
"#![allow("
"clippy::assertions_on_constants, "
"clippy::absurd_extreme_comparisons, "
"clippy::cast_possible_truncation, "
"unused_comparisons)]\n"
)
resource_cfgs = []
for resource in resources:
cfg = emit_cfg(search("const", resource), indent=0)
resource_cfgs.append((resource, cfg))
print(f"#[cfg({cfg})]")
print(f"pub const {resource}: u8 = libc::{resource} as u8;")
print()
print(f"#[cfg(not({cfg}))]")
print(f"pub const {resource}: u8 = u8::MAX;")
print()
print("// " + "-" * 77)
print()
print("#[allow(clippy::too_many_lines)]")
print("#[test]")
print("fn resource_bound() {")
for resource, cfg in resource_cfgs:
print(f" #[cfg({cfg})]")
print(f" assert!((0..128).contains(&libc::{resource}));")
print()
print("}")
print()
for ident in ["rlimit", "getrlimit", "setrlimit"]:
if ident == "rlimit":
cfg64 = emit_cfg(search("struct", ident + "64"), indent=0)
cfg = emit_cfg(search("struct", ident), indent=0)
else:
cfg64 = emit_cfg(search("fn", ident + "64"), indent=0)
cfg = emit_cfg(search("fn", ident), indent=0)
print(f"#[cfg({cfg64})]")
print(f"pub use libc::{ident}64 as {ident};")
print()
print(f"#[cfg(all(not({cfg64}), {cfg}))]")
print(f"pub use libc::{ident};")
print()
ident = "RLIM_INFINITY"
cfg = emit_cfg(search("const", ident), indent=0)
print(f"#[cfg({cfg})]")
print(f"pub const {ident}: u64 = libc::{ident} as u64;")
print()
print(f"#[cfg(not({cfg}))]")
print(f"pub const {ident}: u64 = u64::MAX;")
print()
| 34.56213 | 104 | 0.541174 | 723 | 5,841 | 4.250346 | 0.250346 | 0.043931 | 0.056948 | 0.04686 | 0.235926 | 0.117475 | 0.07029 | 0.058575 | 0.022128 | 0.022128 | 0 | 0.012999 | 0.23609 | 5,841 | 168 | 105 | 34.767857 | 0.675706 | 0.002054 | 0 | 0.115646 | 0 | 0 | 0.398661 | 0.140724 | 0 | 0 | 0 | 0 | 0.013605 | 1 | 0.027211 | false | 0 | 0.034014 | 0 | 0.095238 | 0.204082 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64879ceed73b3f3ed843d40690f61395ac4530f2 | 13,389 | py | Python | deepatari/tools/statistics.py | cowhi/deepatari | 3b676ca4fc66266d766cd2366226f3e10213bc78 | [
"MIT"
] | 10 | 2016-06-10T01:13:44.000Z | 2017-10-15T10:47:09.000Z | deepatari/tools/statistics.py | cowhi/deepatari | 3b676ca4fc66266d766cd2366226f3e10213bc78 | [
"MIT"
] | null | null | null | deepatari/tools/statistics.py | cowhi/deepatari | 3b676ca4fc66266d766cd2366226f3e10213bc78 | [
"MIT"
] | 2 | 2016-06-10T14:38:08.000Z | 2020-08-29T03:11:06.000Z | import logging
_logger = logging.getLogger(__name__)
import sys
import os
import csv
import time
import numpy as np
class Statistics(object):
""" This class handles all statistics of an experiment.
The class keeps the statistics running and saves certain paramaters at the
end of an epoch to files. It's also responsible for generating nice graphs
to evaluate the training progress.
Attributes:
STATS_AGENT_TRAIN (tuple): Defines a tuple of all relevant agent training parameters for evaluation.
STATS_AGENT_TEST (tuple): Defines a tuple of all relevant agent testing parameters for evaluation.
STATS_AGENT_NET (tuple): Defines a tuple of all relevant network training parameters for evaluation.
name (str): The name of the statistic object.
agent (Agent): The agent that performes the learning.
net (Learner): Object of one of the Learner modules.
mem (Memory): The replay memory to save the experiences.
env (Environment): The envirnoment in which the agent actuates.
target_dir (str): Location to save the stats.
csv_file_train (file): The file were the training parameters are stored.
csv_writer_train (writer): Converts the data into a delimited string to save in the file.
csv_file_test (file): The file were the testing parameters are stored.
csv_writer_test (writer): Converts the data into a delimited string to save in the file.
time_start (time): Keeps track of the experiment start time.
phase (str): Indicates the current phase of the experiment.
"""
STATS_AGENT_TRAIN = (
("epoch","Epoch","#","int"),
("phase","Phase","",np.object_),
("n_steps_epoch","Steps per Epoch","#","int"),
("n_games","Games per Epoch","#","int"),
("n_steps_games_avg","Steps per Game (avg)","#",),
("n_steps_games_min","Steps per Game (min)","#","int"),
("n_steps_games_max","Steps per Game (max)","#","int"),
("reward_epoch","Reward per Epoch","","float"),
("reward_game_avg","Reward per Game (avg)","","float"),
("reward_game_min","Reward per Game (min)","","float"),
("reward_game_max","Reward per Game (max)","","float"),
("epsilon","Exploration Rate","","float"),
("n_steps_total","Steps Total","#","int"),
("replay_memory_size","Replay Memory Size","#","int"),
("q_avg_epoch","Q-Value per Epoch (avg)","","float"),
("cost_avg_epoch","Cost per Epoch (avg)","","float"),
("weight_updates","Network Weight Updates","#","int"),
("time_total","Time Total","s","float"),
("time_epoch","Time Epoch","s","float"),
("steps_per_second","Steps per Second","#","int")
)
STATS_AGENT_TEST = (
("epoch","Epoch","#","int"),
("phase","Phase","",np.object_),
("n_steps_epoch","Steps per Epoch","#","int"),
("n_games","Games per Epoch","#","int"),
("n_steps_games_avg","Steps per Game (avg)","#",),
("n_steps_games_min","Steps per Game (min)","#","int"),
("n_steps_games_max","Steps per Game (max)","#","int"),
("reward_epoch","Reward per Epoch","","float"),
("reward_game_avg","Reward per Game (avg)","","float"),
("reward_game_min","Reward per Game (min)","","float"),
("reward_game_max","Reward per Game (max)","","float"),
("epsilon","Exploration Rate","","float"),
("n_steps_total","Steps Total","#","int"),
("replay_memory_size","Replay Memory Size","#","int"),
("q_avg_epoch","Q-Value per Epoch (avg)","","float"),
("cost_avg_epoch","Cost per Epoch (avg)","","float"),
("weight_updates","Network Weight Updates","#","int"),
("time_total","Time Total","s","float"),
("time_epoch","Time Epoch","s","float"),
("steps_per_second","Steps per Second","#","int")
)
STATS_NET = (
("epoch","Epoch","#","int"),
("n_batch_update","Batch Update","#","int"),
("cost_current","Cost per Batch Update","","float"),
("cost_average","Cost Average","","float"),
("qvalue_average","Q-Value per Batch Update","","float"),
("epsilon","Exploration Rate","","float")
)
# TODO: adapt stats for training and testing
# TODO: separate stats for network
def __str__(self):
""" Overwrites the object.__str__ method.
Returns:
string (str): Important parameters of the object.
"""
return "'name':" + str(self.name) + ", " + \
"'time_start':" + str(self.time_start)
def __init__(self, agent, net, mem, env, args, target_dir):
""" Initialize an statistics object.
Args:
agent (Agent): The agent that performes the learning.
net (Learner): Object of one of the Learner modules.
mem (Memory): The replay memory to save the experiences.
env (Environment): Current environment, which provides information for the learner.
args (argparse.Namespace): All settings either default or set via command line arguments.
target_dir (str): Location to save the stats.
"""
_logger.info("Initializing new object of type " + str(type(self).__name__))
self.name = "Observer"
# attach statistics to agent
self.agent = agent
self.agent.callback = self
# attach statistics to net
self.net = net
self.net.callback = self
# make replay memory and environment available
self.mem = mem
self.env = env
# make target dir available
self.target_dir = target_dir
# check directory for savin stats
#if not os.path.isdir(target_dir):
# os.makedirs(target_dir)
if not self.target_dir == None:
# setup file for train stats
self.csv_file_train = open(os.path.join(target_dir, "stats_agent_train.csv"), "wb")
self.csv_writer_train = csv.writer(self.csv_file_train)
self.csv_writer_train.writerow([stat[0] for stat in self.STATS_AGENT_TRAIN])
self.csv_file_train.flush()
# setup file for test stats
self.csv_file_test = open(os.path.join(target_dir, "stats_agent_test.csv"), "wb")
self.csv_writer_test = csv.writer(self.csv_file_test)
self.csv_writer_test.writerow([stat[0] for stat in self.STATS_AGENT_TEST])
self.csv_file_test.flush()
# initialize timer
self.time_start = time.clock()
_logger.debug("%s" % str(self))
def close(self):
""" Closes the logfiles after the experiment. """
_logger.debug("Closing logfiles")
if not self.target_dir == None:
#if self.agent.phase in ("train","random"):
self.csv_file_train.close()
#elif self.agent.phase == "test":
self.csv_file_test.close()
def reset_epoch_stats(self):
""" Resets the parameters to initial values for each epoch. """
_logger.debug("Resetting stats")
self.time_epoch_start = time.clock()
self.n_steps_epoch = 0
self.n_games = 0
self.n_steps_game = 0
self.n_steps_games_avg = 0
self.n_steps_games_min = sys.maxint
self.n_steps_games_max = -sys.maxint - 1
self.reward_epoch = 0
self.reward_game = 0
self.reward_game_avg = 0
self.reward_game_min = sys.maxint
self.reward_game_max = -sys.maxint - 1
self.epsilon = 1
self.cost_avg_epoch = 0
self.q_avg_epoch = 0
def from_agent(self, reward, terminal, epsilon):
""" Handles the callbacks from the agent.
Args:
reward (int): The reward received after taking the action.
terminal (bool): The new terminal state indicator after taking the action.
epsilon (float): The current epsilon value.
"""
_logger.debug("Callback from agent")
self.reward_epoch += reward
self.reward_game += reward
self.n_steps_epoch += 1
self.n_steps_game += 1
self.epsilon = epsilon
if terminal:
self.n_games += 1
self.reward_game_avg += float(self.reward_game - self.reward_game_avg) / self.n_games
self.reward_game_min = min(self.reward_game_min, self.reward_game)
self.reward_game_max = max(self.reward_game_max, self.reward_game)
self.reward_game = 0
self.n_steps_games_avg += float(self.n_steps_game - self.n_steps_games_avg) / self.n_games
self.n_steps_games_min = min(self.n_steps_games_min, self.n_steps_game)
self.n_steps_games_max = max(self.n_steps_games_max, self.n_steps_game)
self.n_steps_game = 0
def from_learner(self, cost_batch, q_avg_batch):
""" Handles the callbacks from the learner.
Args:
cost_batch (float): Cost per batch.
q_avg_batch (float): Average max Q-value per batch.
"""
_logger.debug("Callback from net")
self.cost_avg_epoch += (cost_batch - self.cost_avg_epoch) / self.net.update_iterations
self.q_avg_epoch += (q_avg_batch - self.q_avg_epoch) / self.net.update_iterations
def write_epoch_stats(self, epoch):
""" Writes the stats for the current epoch to disk.
Args:
epoch (int): Current epoch.
"""
_logger.debug("Epoch = %d" % epoch)
time_current = time.clock()
time_total = time_current - self.time_start
time_epoch = time_current - self.time_epoch_start
if time_epoch != 0:
steps_per_second = int(self.n_steps_epoch / time_epoch)
else:
steps_per_second = 1
if self.n_games == 0:
self.n_games = 1
self.reward_game_avg = self.reward_game
'''
# getting qvalue dynamics ??
if self.validation_states is None and self.mem.count > self.mem.batch_size:
# sample states for measuring Q-value dynamics
prestates, actions, rewards, poststates, terminals = self.mem.getMinibatch()
self.validation_states = prestates
if self.validation_states is not None:
qvalues = np.empty((self.net.output_shape, self.net.batch_size))
for i, state in enumerate(self.validation_states):
qvalues[:,i] = self.net.predict(state)
maxqs = np.max(qvalues, axis=1)
assert maxqs.shape[0] == qvalues.shape[0]
meanq = np.mean(maxqs)
else:
meanq = 0
'''
if not self.target_dir == None:
if self.agent.phase in ("train","random"):
content = (
epoch,
self.agent.phase,
self.n_steps_epoch,
self.n_games,
self.n_steps_games_avg,
self.n_steps_games_min,
self.n_steps_games_max,
self.reward_epoch,
self.reward_game_avg,
self.reward_game_min,
self.reward_game_max,
self.epsilon,
self.agent.n_steps_total,
self.mem.count,
self.q_avg_epoch,
self.cost_avg_epoch,
self.net.update_iterations,
"{:.2f}".format(time_total),
"{:.2f}".format(time_epoch),
steps_per_second
)
self.csv_writer_train.writerow(content)
self.csv_file_train.flush()
elif self.agent.phase == "test":
content = (
epoch,
self.agent.phase,
self.n_steps_epoch,
self.n_games,
self.n_steps_games_avg,
self.n_steps_games_min,
self.n_steps_games_max,
self.reward_epoch,
self.reward_game_avg,
self.reward_game_min,
self.reward_game_max,
self.epsilon,
self.agent.n_steps_total,
self.mem.count,
self.q_avg_epoch, #was: meanq,
self.cost_avg_epoch,
self.net.update_iterations,
"{:.2f}".format(time_total),
"{:.2f}".format(time_epoch),
steps_per_second
)
self.csv_writer_test.writerow(content)
self.csv_file_test.flush()
_logger.info("n_games: %d, average_reward: %f, min_game_reward: %d, max_game_reward: %d, epsilon: %f, time_epoch: %ds, steps_per_second: %d" % (self.n_games, self.reward_game_avg, self.reward_game_min, self.reward_game_max, self.epsilon, time_epoch, steps_per_second))
| 44.481728 | 276 | 0.56778 | 1,622 | 13,389 | 4.448829 | 0.146732 | 0.031596 | 0.036031 | 0.031181 | 0.54296 | 0.440549 | 0.41311 | 0.388997 | 0.336752 | 0.326774 | 0 | 0.003377 | 0.314288 | 13,389 | 300 | 277 | 44.63 | 0.782594 | 0.219359 | 0 | 0.492147 | 0 | 0.005236 | 0.197568 | 0.00224 | 0 | 0 | 0 | 0.003333 | 0 | 1 | 0.036649 | false | 0 | 0.031414 | 0 | 0.094241 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6488b1c28baaff292669122a86d2163e41ff8cf6 | 1,284 | py | Python | cmskit/products/views.py | ozgurgunes/django-cmskit | 19d14fbb57702a6c56b6b3a5d859c93533ff1535 | [
"MIT"
] | 1 | 2015-09-28T10:10:34.000Z | 2015-09-28T10:10:34.000Z | cmskit/products/views.py | ozgurgunes/django-cmskit | 19d14fbb57702a6c56b6b3a5d859c93533ff1535 | [
"MIT"
] | null | null | null | cmskit/products/views.py | ozgurgunes/django-cmskit | 19d14fbb57702a6c56b6b3a5d859c93533ff1535 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from django.utils.translation import get_language
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from django.http import Http404
from cmskit.products.models import Category, Product
def index(request):
categories = Category.objects.select_related().filter(parent=None)
return render_to_response('products/index.html', {'categories': categories},
context_instance=RequestContext(request))
def detail(request, path, *args, **kwargs):
slugs = path.split('/')
try:
query = Product.objects.published().select_related()
product = eval('query.get('
'slug_'+get_language()+'=slugs[-1],'
'category__slug_'+get_language()+'=slugs[-2])')
return render_to_response('products/product.html', {'product':product},
context_instance=RequestContext(request))
except: pass
query = Category.objects.select_related().all()
category = eval('get_object_or_404(query,'
'slug_'+get_language()+'=slugs[-1])')
return render_to_response('products/category.html', {'category':category},
context_instance=RequestContext(request))
| 41.419355 | 80 | 0.662773 | 139 | 1,284 | 5.913669 | 0.402878 | 0.048662 | 0.077859 | 0.080292 | 0.160584 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012795 | 0.208723 | 1,284 | 31 | 81 | 41.419355 | 0.79626 | 0.016355 | 0 | 0.125 | 0 | 0 | 0.142631 | 0.05309 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0.041667 | 0.208333 | 0 | 0.416667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
648919faa81c812b312bc58cbd08e8cc789afe5f | 307 | py | Python | main.py | baifengbai/xlart | d4568dcc3c221b25a9227af1ae022c4f0a5b3476 | [
"MIT"
] | 1 | 2022-03-12T02:40:51.000Z | 2022-03-12T02:40:51.000Z | main.py | baifengbai/xlart | d4568dcc3c221b25a9227af1ae022c4f0a5b3476 | [
"MIT"
] | null | null | null | main.py | baifengbai/xlart | d4568dcc3c221b25a9227af1ae022c4f0a5b3476 | [
"MIT"
] | null | null | null | from time import time
import xlart
if __name__ == '__main__':
start = time()
xlart.resize_image('picture.jpg', 'picture_resized.jpg', 200, 200)
xlart.image_to_xlsx('picture_resized.jpg', 'demo.xlsx')
end = time()
print('An xlart file has been generated in ' + str(end - start) + 's.')
| 27.909091 | 75 | 0.664495 | 44 | 307 | 4.340909 | 0.613636 | 0.104712 | 0.17801 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.024096 | 0.188925 | 307 | 10 | 76 | 30.7 | 0.742972 | 0 | 0 | 0 | 0 | 0 | 0.338762 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6489d5635a822fcdc10fe744a26044fe5cedea48 | 528 | py | Python | tests/units/Vault/test_withdraw_all.py | benber86/alcom_contracts | 57136d97d0d30088679e358a2fc3345e82ccb0f7 | [
"MIT"
] | 2 | 2021-07-14T16:26:14.000Z | 2021-08-01T22:24:51.000Z | tests/units/Vault/test_withdraw_all.py | benber86/alcom_contracts | 57136d97d0d30088679e358a2fc3345e82ccb0f7 | [
"MIT"
] | null | null | null | tests/units/Vault/test_withdraw_all.py | benber86/alcom_contracts | 57136d97d0d30088679e358a2fc3345e82ccb0f7 | [
"MIT"
] | null | null | null | import brownie
AMOUNT = 10 ** 18
def test_single_withdraw_all(alice, vault, alcx, ss_compounder):
prior_pool_balance = ss_compounder.stakeBalance()
prior_alcx_balance = alcx.balanceOf(alice)
alcx.approve(vault, AMOUNT, {'from': alice})
vault.deposit(AMOUNT, {'from': alice})
vault.withdrawAll({'from': alice})
assert vault.totalSupply() == 0
assert vault.balanceOf(alice) == 0
assert alcx.balanceOf(alice) == prior_alcx_balance
assert ss_compounder.stakeBalance() == prior_pool_balance
| 27.789474 | 64 | 0.717803 | 65 | 528 | 5.615385 | 0.415385 | 0.082192 | 0.087671 | 0.158904 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013544 | 0.160985 | 528 | 18 | 65 | 29.333333 | 0.810384 | 0 | 0 | 0 | 0 | 0 | 0.022727 | 0 | 0 | 0 | 0 | 0 | 0.333333 | 1 | 0.083333 | false | 0 | 0.083333 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
648b823acfa1c931f329b120da5595b30d0999f3 | 685 | py | Python | tests/test_constituency_parsing.py | jayten42/pororo | 0b02e6a633b9a32ec4241b8ed96745e6592db317 | [
"Apache-2.0"
] | 1,137 | 2021-02-02T02:09:06.000Z | 2022-03-29T03:10:40.000Z | tests/test_constituency_parsing.py | jayten42/pororo | 0b02e6a633b9a32ec4241b8ed96745e6592db317 | [
"Apache-2.0"
] | 57 | 2021-02-02T03:29:54.000Z | 2022-03-31T16:20:00.000Z | tests/test_constituency_parsing.py | jayten42/pororo | 0b02e6a633b9a32ec4241b8ed96745e6592db317 | [
"Apache-2.0"
] | 216 | 2021-02-02T02:49:02.000Z | 2022-03-28T01:19:58.000Z | """Test Constituency Parsing module"""
import unittest
from pororo import Pororo
class PororoConstParsingTester(unittest.TestCase):
def test_modules(self):
const = Pororo(task="const", lang="ko")
const_res = const(
"지금까지 최원호 한화 이글스 감독대행, 이동욱 NC 다이노스 감독, 이강철 KT 감독에 이어 4번째 선물이었다.")
self.assertIsInstance(const_res, str)
const = Pororo(task="const", lang="zh")
const_res = const("我喜欢饼干")
self.assertIsInstance(const_res, str)
const = Pororo(task="const", lang="en")
const_res = const("I love this place")
self.assertIsInstance(const_res, str)
if __name__ == "__main__":
unittest.main()
| 25.37037 | 77 | 0.640876 | 85 | 685 | 4.988235 | 0.541176 | 0.113208 | 0.106132 | 0.141509 | 0.389151 | 0.259434 | 0.259434 | 0.259434 | 0.259434 | 0.259434 | 0 | 0.001923 | 0.240876 | 685 | 26 | 78 | 26.346154 | 0.813462 | 0.046715 | 0 | 0.1875 | 0 | 0 | 0.174652 | 0 | 0 | 0 | 0 | 0 | 0.1875 | 1 | 0.0625 | false | 0 | 0.125 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
648bc0fbcc9b572dd1e815e182903fd435d432c6 | 6,120 | py | Python | goat_scraper.py | Joseph1337/Snkr-Findr-API | 7caf2eef5beb487d1a5446dafec796c37c7e6be4 | [
"MIT"
] | 4 | 2021-03-03T17:14:25.000Z | 2022-01-22T14:52:49.000Z | goat_scraper.py | Joseph1337/Snkr-Trakr-API | 7caf2eef5beb487d1a5446dafec796c37c7e6be4 | [
"MIT"
] | null | null | null | goat_scraper.py | Joseph1337/Snkr-Trakr-API | 7caf2eef5beb487d1a5446dafec796c37c7e6be4 | [
"MIT"
] | null | null | null | import json
import requests
import pprint
from time import sleep
import random
#extracts all user-agents from the provided 'ua_file.txt' into a list then randomly selects a user-agent
def getUserAgent():
randomUserAgent = ""
listOfUserAgents = []
userAgentFile = 'ua_file.txt'
with open('ua_file.txt') as file:
listOfUserAgents = [line.rstrip("\n") for line in file]
return random.choice(listOfUserAgents)
class Sneaker:
def __init__(self, name, query_id, retail_price, displayed_size, price, image_url):
self.name = name
self.query_id = query_id
if(retail_price == None):
self.retail_price = "N/A"
else:
self.retail_price = retail_price/100
if(displayed_size == None):
self.displayed_size = "N/A"
else:
self.displayed_size = displayed_size
if(price==None):
self.lowest_price = "N/A"
else:
self.lowest_price = price/100
self.image_url = image_url
# self.sizeAndPrice = sizeAndPrice
#function to get all sneakers from 'Shop All' page
def getAllSneakers(keyword=''):
sneakersList = []
#api call to retrieve sneaker details
url = 'https://2fwotdvm2o-3.algolianet.com/1/indexes/*/queries'
#size you want to look for:
shoe_size = ""
search_field = keyword
#data sent with POST request
for page in range(0,5):
form_data = {
"requests": [{
"indexName":"product_variants_v2",
"params":"",
"highlightPreTag" : "<ais-highlight-0000000000>",
"highlightPostTag": "</ais-highlight-0000000000>",
"distinct": "true",
"query": keyword,
"facetFilters": [["presentation_size:" + str(shoe_size)],["product_category:shoes"]],
"maxValuesPerFacet": 30,
"page": page,
"facets": ["instant_ship_lowest_price_cents","single_gender","presentation_size","shoe_condition","product_category","brand_name","color","silhouette","designer","upper_material","midsole","category","release_date_name"],
"tagFilters":""
}]
}
query_params = {
'x-algolia-agent': 'Algolia for JavaScript (3.35.1); Browser (lite); JS Helper (3.2.2); react (16.13.1); react-instantsearch (6.8.2)',
'x-algolia-application-id': '2FWOTDVM2O',
'x-algolia-api-key': 'ac96de6fef0e02bb95d433d8d5c7038a'
}
response = requests.post(url, data=json.dumps(form_data), params=query_params).json()['results'][0]['hits']
for sneaker in response:
sneakersList.append((Sneaker(sneaker['name'], sneaker['slug'], sneaker['retail_price_cents'], sneaker['size'], sneaker['lowest_price_cents'], sneaker['original_picture_url']).__dict__)) # getSneakerSizesAndPrices(sneaker['slug'])))
# sleep(random.randrange(1,3))
return sneakersList
def getSneaker(query_id):
sneakerInfo = {}
url = "https://www.goat.com/web-api/v1/product_templates/" + query_id
user_agent = getUserAgent()
headers = {
"User-Agent": user_agent,
"Accept": "application/json",
"Referer": "https://www.goat.com/sneakers/" + query_id
}
for i in range(0, 10):
try:
headers.update({"user-agent": getUserAgent()})
response = requests.get(url, headers=headers).json()
print(response)
sneakerInfo['Name'] = response['name']
sneakerInfo['Colorway'] = response['details']
sneakerInfo['Style ID'] = response['sku']
sneakerInfo['Release Date'] = response['releaseDate'].split('T')[0]
sneakerInfo['Price Map'] = getSneakerSizesAndPrices(query_id)
sneakerInfo['Image'] = response['mainPictureUrl']
break
except: #runs into captcha, so retry
sleep(random.randrange(1,3))
continue
else:
return {"message": "Could not connect to GOAT.com while searching for " + query_id}
return sneakerInfo
def getSneakerSizesAndPrices(query_id): #helper method for getSneakr to get prices via separate api call
sizeAndPrice = {}
url = 'https://www.goat.com/web-api/v1/product_variants'
user_agent = getUserAgent()
headers = {
"user-agent": user_agent,
"accept" : "application/json",
"accept-encoding": "gzip, deflate, br",
"accept-language" : "en-US,en;q=0.9",
"referer": 'https://www.google.com/'
}
query_params = {
"productTemplateId": query_id
}
for i in range(0, 10):
try:
headers.update({"user-agent": getUserAgent()})
response = requests.get(url, headers=headers, params=query_params, timeout=10)
# print(response.text)
if(response.status_code >= 200 and response.status_code < 400):
page = response.json()
for i in range(0, len(page)):
#check ONLY for new shoes with boxes in good condition
if(page[i]['boxCondition'] == "good_condition" and page[i]['shoeCondition'] == "new_no_defects"):
sizeAndPrice.update({page[i]['size']: page[i]['lowestPriceCents']['amount']/100})
# elif (response.json()['success'] == False): #catches if query_id invalid
elif("success" in response.json()):
if(response.json()['success'] == False):
sizeAndPrice.update({"message": "Invalid product id."})
break
else:
raise PermissionError
except (PermissionError):#request got blocked by captcha
continue
except requests.exceptions.Timeout as err:
continue
else:
break
else: # if not sizeAndPrice:
sizeAndPrice.update({"Size_Timeout": "Price_Timeout"})
return sizeAndPrice
| 39.483871 | 244 | 0.584967 | 648 | 6,120 | 5.399691 | 0.371914 | 0.022006 | 0.009145 | 0.008574 | 0.135467 | 0.110889 | 0.110889 | 0.110889 | 0.110889 | 0.092026 | 0 | 0.022212 | 0.286438 | 6,120 | 154 | 245 | 39.74026 | 0.779025 | 0.103595 | 0 | 0.201613 | 0 | 0.008065 | 0.254662 | 0.029616 | 0 | 0 | 0 | 0 | 0 | 1 | 0.040323 | false | 0 | 0.040323 | 0 | 0.129032 | 0.016129 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
648e62a712e9527d4e41e9d0aa25cde0e47d754b | 4,674 | py | Python | src/morsecoder.py | PyMorseCoder/MorseCoder | 3266f2ec7c0563affd0c6d7a71a621245fb890e4 | [
"Apache-2.0"
] | 7 | 2021-06-20T22:57:36.000Z | 2021-07-05T21:43:24.000Z | src/morsecoder.py | HestStudio/MorseCoder | 3266f2ec7c0563affd0c6d7a71a621245fb890e4 | [
"Apache-2.0"
] | null | null | null | src/morsecoder.py | HestStudio/MorseCoder | 3266f2ec7c0563affd0c6d7a71a621245fb890e4 | [
"Apache-2.0"
] | 2 | 2021-07-30T04:28:14.000Z | 2022-01-02T07:45:30.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Update time: 2021/6/19
class MorsecodeError(Exception):
'''
自定义异常,
更明了的异常信息
'''
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Morsecoder:
'''
基于Python3.6+的摩斯密码库,
支持编码, 译码, 自定义密码
'''
VERSION = 0.51
AUTHOR = 'Lemonix'
__enList = {'A': '.-',
'B': '-...',
'C': '-.-.',
'D': '-..',
'E': '.',
'F': '..-.',
'G': '--.',
'H': '....',
'I': '..',
'J': '.---',
'K': '-.-',
'L': '.-..',
'M': '--',
'N': '-.',
'O': '---',
'P': '.--.',
'Q': '--.-',
'R': '.-.',
'S': '...',
'T': '-',
'U': '..-',
'V': '...-',
'W': '.--',
'X': '-..-',
'Y': '-.--',
'Z': '--.',
'1': '.----',
'2': '..---',
'3': '...--',
'4': '....-',
'5': '.....',
'6': '-....',
'7': '--...',
'8': '---..',
'9': '----.',
'0': '-----',
'.': '.-.-.-',
'/': '-..-.',
'-': '-....-',
'(': '-.--.',
')': '-.--.-',}
__deList = {v:k for k,v in __enList.items()}
# enList: 编码表, deList: 译码表
def __init__(self, text='', sep=''):
'''
初始化参数,
设置文本, 分隔符以及自动分析空格,
如果当前文本含有不在对照表不包含的字符时,
会通过Unicode进行编码
'''
self.__text = text.upper()
self.__sep = sep
if self.__sep == ' ':
Morsecoder.__enList.update({' ': '/'})
Morsecoder.__deList.update({'/': ' '})
else:
Morsecoder.__enList.update({' ': ' '})
Morsecoder.__deList.update({' ': ' '})
# 避免重复, 更改空格的样式
for i in self.__text:
if i not in Morsecoder.__enList:
# 用二进制的Unicode进行编码
uni_char = bin(ord(i))[2:].replace('1', '-').replace('0', '.')
Morsecoder.__enList.update({i: uni_char})
Morsecoder.__deList.update({uni_char: i})
def __str__(self):
return f'''
Instance -> '{type(self).__name__}'
Text({len(self.__text)}) -> '{''.join(self.__text)}'
Sep({len(self.__sep)}) -> '{self.__sep}'
'''
__repr__ = __str__
def setArgs(self, text, sep):
'''
设置当前实例的参数
'''
self.__text, self.__sep = text.upper(), sep
def getArgs(self):
'''
获取当前实例的参数
'''
return {
'text': self.__text,
'sep': self.__sep
}
def getEncode(self):
# En - 摩斯密码编码
'''
获取当前实例的编码
'''
try:
for i in self.__text:
yield f'{Morsecoder.__enList[i]}{self.__sep}'
except:
raise MorsecodeError('含有特殊字符')
def getDecode(self):
# De - 摩斯密码译码
'''
获取当前实例的译码
'''
try:
self.__text.replace(' ', '')
# 去除空格
self.__text = self.__text.split(self.__sep)
# 用sep把code分割为列表
if self.__text[-1] == '':
# 去除尾部的空元素
self.__text.pop()
for i in self.__text:
yield Morsecoder.__deList[i]
except:
raise MorsecodeError('非法摩斯密码')
def modify(key, value):
'''
修改编码表或译码表
'''
try:
Morsecoder.__enList.update({key: value})
Morsecoder.__deList.update({value: key})
# 更新编码表和译码表
except:
raise MorsecoderError('修改失败')
def getList(listType):
'''
获取编码表或译码表
'''
if listType == 'enList':
return Morsecoder.__enList
elif listType == 'deList':
return Morsecoder.__deList
else:
MorsecoderError('不存在此对照表')
if __name__ == '__main__':
# 编码演示
myCode = Morsecoder(text='Hello World', sep='/')
for values in myCode.getEncode():
print(values, end='')
print()
# 译码演示
myCode.setArgs(text='...././.-../.-../---/ /.--/---/.-./.-../-../',
sep=myCode.getArgs()['sep']
)
for values in myCode.getDecode():
print(values, end='')
print()
# __str__
print(myCode)
# Doc
print(help(Morsecoder))
'''
My Bilibili channel: https://b23.tv/wxyFrS
Thank u 4 using my program
''' | 22.257143 | 78 | 0.384467 | 370 | 4,674 | 4.540541 | 0.413514 | 0.071429 | 0.02619 | 0.017857 | 0.107143 | 0.075 | 0 | 0 | 0 | 0 | 0 | 0.01124 | 0.409927 | 4,674 | 210 | 79 | 22.257143 | 0.597897 | 0.081729 | 0 | 0.141667 | 0 | 0 | 0.12698 | 0.043249 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0 | 0.016667 | 0.183333 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
648f3c72677968d9346724e918ca2c01342d3bfb | 6,629 | py | Python | third_party/chromite/scripts/sysmon/puppet_metrics_unittest.py | zipated/src | 2b8388091c71e442910a21ada3d97ae8bc1845d3 | [
"BSD-3-Clause"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | third_party/chromite/scripts/sysmon/puppet_metrics_unittest.py | cangulcan/src | 2b8388091c71e442910a21ada3d97ae8bc1845d3 | [
"BSD-3-Clause"
] | 395 | 2020-04-18T08:22:18.000Z | 2021-12-08T13:04:49.000Z | third_party/chromite/scripts/sysmon/puppet_metrics_unittest.py | cangulcan/src | 2b8388091c71e442910a21ada3d97ae8bc1845d3 | [
"BSD-3-Clause"
] | 338 | 2020-04-18T08:03:10.000Z | 2022-03-29T12:33:22.000Z | # -*- coding: utf-8 -*-
# Copyright 2017 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for puppet_metrics."""
# pylint: disable=protected-access
from __future__ import absolute_import
from __future__ import print_function
from cStringIO import StringIO
import os
import mock
from chromite.lib import cros_test_lib
from chromite.scripts.sysmon import puppet_metrics
_SUMMARY = '''\
---
version:
config: 1499979608
puppet: "3.4.3"
resources:
changed: 7
failed: 0
failed_to_restart: 0
out_of_sync: 7
restarted: 0
scheduled: 0
skipped: 1
total: 218
time:
config_retrieval: 2.862796974
cron: 0.004638468
exec: 11.494792536
file: 0.618018423
file_line: 0.003589435
filebucket: 0.000341392
group: 0.017957332
ini_subsetting: 0.001235189
mount: 0.001416499
package: 4.315027644000001
schedule: 0.001541641
service: 10.242378408
total: 52.958788377
user: 0.001673407
vcsrepo: 23.393381029
last_run: 1499979671
changes:
total: 7
events:
failure: 0
success: 7
total: 7%
'''
class TestPuppetRunSummary(cros_test_lib.TestCase):
"""Tests for _PuppetRunSummary."""
def test_config_version(self):
summary = puppet_metrics._PuppetRunSummary(StringIO(_SUMMARY))
self.assertEqual(summary.config_version, 1499979608)
def test_puppet_version(self):
summary = puppet_metrics._PuppetRunSummary(StringIO(_SUMMARY))
self.assertEqual(summary.puppet_version, '3.4.3')
def test_events(self):
summary = puppet_metrics._PuppetRunSummary(StringIO(_SUMMARY))
self.assertEqual(summary.events, {
'failure': 0,
'success': 7
})
def test_resources(self):
summary = puppet_metrics._PuppetRunSummary(StringIO(_SUMMARY))
self.assertEqual(summary.resources, {
'changed': 7,
'failed': 0,
'failed_to_restart': 0,
'out_of_sync': 7,
'restarted': 0,
'scheduled': 0,
'skipped': 1,
'other': 203,
})
def test_times(self):
summary = puppet_metrics._PuppetRunSummary(StringIO(_SUMMARY))
self.assertEqual(summary.times, {
'config_retrieval': 2.862796974,
'cron': 0.004638468,
'exec': 11.494792536,
'file': 0.618018423,
'file_line': 0.003589435,
'filebucket': 0.000341392,
'group': 0.017957332,
'ini_subsetting': 0.001235189,
'mount': 0.001416499,
'other': 0,
'package': 4.315027644000001,
'schedule': 0.001541641,
'service': 10.242378408,
'user': 0.001673407,
'vcsrepo': 23.393381029,
})
def test_last_run_time(self):
summary = puppet_metrics._PuppetRunSummary(StringIO(_SUMMARY))
self.assertEqual(summary.last_run_time, 1499979671)
class TestPuppetMetrics(cros_test_lib.TempDirTestCase):
"""Tests for puppet_metrics."""
def setUp(self):
patcher = mock.patch('infra_libs.ts_mon.common.interface.state.store',
autospec=True)
self.store = patcher.start()
self.addCleanup(patcher.stop)
self.tempfile = os.path.join(self.tempdir, 'last_run_summary.yaml')
def test_collect(self):
with open(self.tempfile, 'w') as f:
f.write(_SUMMARY)
with mock.patch('time.time', return_value=1500000000):
with mock.patch.object(puppet_metrics, 'LAST_RUN_FILE', self.tempfile):
puppet_metrics.collect_puppet_summary()
setter = self.store.set
calls = [
mock.call('puppet/version/config', (), None,
1499979608, enforce_ge=mock.ANY),
mock.call('puppet/version/puppet', (), None,
'3.4.3', enforce_ge=mock.ANY),
mock.call('puppet/events', ('failure',), None,
0, enforce_ge=mock.ANY),
mock.call('puppet/events', ('success',), None,
7, enforce_ge=mock.ANY),
mock.call('puppet/resources', ('scheduled',), None,
0, enforce_ge=mock.ANY),
mock.call('puppet/resources', ('skipped',), None,
1, enforce_ge=mock.ANY),
mock.call('puppet/resources', ('restarted',), None,
0, enforce_ge=mock.ANY),
mock.call('puppet/resources', ('changed',), None,
7, enforce_ge=mock.ANY),
mock.call('puppet/resources', ('failed',), None,
0, enforce_ge=mock.ANY),
mock.call('puppet/resources', ('other',), None,
203, enforce_ge=mock.ANY),
mock.call('puppet/resources', ('failed_to_restart',), None,
0, enforce_ge=mock.ANY),
mock.call('puppet/resources', ('out_of_sync',), None,
7, enforce_ge=mock.ANY),
mock.call('puppet/times', ('vcsrepo',), None,
23.393381029, enforce_ge=mock.ANY),
mock.call('puppet/times', ('exec',), None,
11.494792536, enforce_ge=mock.ANY),
mock.call('puppet/times', ('cron',), None,
0.004638468, enforce_ge=mock.ANY),
mock.call('puppet/times', ('file_line',), None,
0.003589435, enforce_ge=mock.ANY),
mock.call('puppet/times', ('config_retrieval',), None,
2.862796974, enforce_ge=mock.ANY),
mock.call('puppet/times', ('user',), None,
0.001673407, enforce_ge=mock.ANY),
mock.call('puppet/times', ('file',), None,
0.618018423, enforce_ge=mock.ANY),
mock.call('puppet/times', ('group',), None,
0.017957332, enforce_ge=mock.ANY),
mock.call('puppet/times', ('service',), None,
10.242378408, enforce_ge=mock.ANY),
mock.call('puppet/times', ('package',), None,
4.315027644000001, enforce_ge=mock.ANY),
mock.call('puppet/times', ('mount',), None,
0.001416499, enforce_ge=mock.ANY),
mock.call('puppet/times', ('schedule',), None,
0.001541641, enforce_ge=mock.ANY),
mock.call('puppet/times', ('other',), None,
0.0, enforce_ge=mock.ANY),
mock.call('puppet/times', ('ini_subsetting',), None,
0.001235189, enforce_ge=mock.ANY),
mock.call('puppet/times', ('filebucket',), None,
0.000341392, enforce_ge=mock.ANY),
mock.call('puppet/age', (), None,
20329.0, enforce_ge=mock.ANY),
]
setter.assert_has_calls(calls)
self.assertEqual(len(setter.mock_calls), len(calls))
| 33.821429 | 77 | 0.607633 | 774 | 6,629 | 5.050388 | 0.224806 | 0.057304 | 0.100281 | 0.114607 | 0.568688 | 0.553083 | 0.53671 | 0.521361 | 0.39166 | 0.310565 | 0 | 0.116757 | 0.251923 | 6,629 | 195 | 78 | 33.994872 | 0.671506 | 0.045256 | 0 | 0.10241 | 0 | 0 | 0.25107 | 0.017277 | 0 | 0 | 0 | 0 | 0.048193 | 1 | 0.048193 | false | 0 | 0.042169 | 0 | 0.10241 | 0.006024 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6490cc9b76431e255aeab4722b02c97b8014ad01 | 5,628 | py | Python | src/flocker/blueprints/mvg-frame/__init__.py | Muxelmann/home-projects | 85bd06873174b9c5c6276160988c19b460370db8 | [
"MIT"
] | null | null | null | src/flocker/blueprints/mvg-frame/__init__.py | Muxelmann/home-projects | 85bd06873174b9c5c6276160988c19b460370db8 | [
"MIT"
] | null | null | null | src/flocker/blueprints/mvg-frame/__init__.py | Muxelmann/home-projects | 85bd06873174b9c5c6276160988c19b460370db8 | [
"MIT"
] | null | null | null | import os
import time
from flask import Blueprint, render_template, redirect, url_for, request, current_app
from . import mvg
from . import displays
from PIL import Image, ImageFont, ImageDraw
def create_bp(app):
bp_mvg = Blueprint('mvg-frame', __name__, url_prefix='/mvg-frame')
displays.init(app)
@bp_mvg.route('/index/')
@bp_mvg.route('/')
def index():
return render_template('mvg-frame/index.html.j2', data=displays.data())
@bp_mvg.route("/updateData/<string:mac>", methods={'GET', 'POST'})
def update_data(mac):
data = {}
# Check if a specific station ID has been passed
if 'station_id' in request.args:
station_id = request.args.get('station_id')
station_name = mvg.get_name_for_id(station_id)
# Only set the data if ID is valid, i.e. returns a valid station name
if station_name is not None:
data['station_id'] = station_id
data['station_name'] = station_name
# Populate data with form inputs
for key, value in request.form.items():
if key in ['station_name']:
data[key] = value
# vv Makes sure that the old station ID is not accidentally kept
data['station_id'] = None
if key in ['offset_top', 'offset_bottom', 'offset_left', 'offset_right'] and value.isnumeric():
data[key] = int(value)
# Upate the stored data
displays.update(mac, data)
# Check if a station ID has already been passed / set
if data['station_id'] is None:
# Find all station IDs for the station name
station_ids = mvg.get_ids_for_satation(data['station_name'])
# If not exactly one station was found...
if len(station_ids) == 1:
# ... save the found station ID
for key, value in station_ids.items():
displays.update(mac, {'station_id': key})
elif len(station_ids) > 1:
# ... or let the user choose and pass (via GET) a station ID
return render_template('mvg-frame/index.html.j2', mac=mac, station_ids=station_ids)
return redirect(url_for('mvg-frame.index'))
# Functions called from frame
@bp_mvg.route('/update/<string:mac>')
def update(mac):
# Make a new empty image in the size of the screen
img_path = os.path.join(current_app.instance_path, 'mvg-{}.png'.format(mac.replace(':', '')))
(w, h) = displays.size_for(mac)
img = Image.new('RGB', (w, h), (0, 0, 0))
draw = ImageDraw.Draw(img)
font_dir = os.path.join('/'.join(os.path.abspath(__file__).split('/')[0:-1]), 'static')
font_normal = ImageFont.truetype(os.path.join(font_dir, 'EXCITE.otf'), 42)
font_bold = ImageFont.truetype(os.path.join(font_dir, 'EXCITE_B.otf'), 42)
station_id, _ = displays.station_for(mac)
if station_id is None:
draw.text((w/2, h/2), "STATION ERROR", fill=(255, 255, 255), font=font_bold, anchor='mm')
img.save(img_path, 'PNG')
return "0"
(o_t, o_b, o_l, o_r) = displays.offset_for(mac)
draw.polygon([
o_l, o_t,
o_l, h-o_b,
w-o_r, h-o_b,
w-o_r, o_t,
], fill=(255, 255, 255))
# Get the departures for the station ID
departures = mvg.get_departures_for_id(station_id, limit=7)
if len(departures) == 0:
draw.text((w/2, h/2), "NO DATA", fill=(0, 0, 0), font=font_bold, anchor='mm')
img.save(img_path, 'PNG')
return "0"
# departure_times = "\n".join([time.strftime('%H:%M', d['departure']) for d in departures])
departure_minutes = "\n".join(["{:.0f}".format((time.mktime(d['departure'])-time.time())/60) for d in departures])
departure_service = "\n".join(["{} {}".format(d['service'], d['destination']) for d in departures])
draw.multiline_text((o_l + 10, o_t+5), departure_minutes, font=font_bold, fill=(0, 0, 0))
draw.multiline_text((o_l + 100, o_t+5), departure_service, font=font_normal, fill=(0, 0, 0))
img.save(img_path, 'PNG')
return "1"
@bp_mvg.route('/imageData/<string:mac>') # GET: segCount & seg
def image_data(mac):
seg_count = int(request.args.get('segCount', default="1"))
seg = int(request.args.get('seg', default="0"))
img_path = os.path.join(current_app.instance_path, 'mvg-{}.png'.format(mac.replace(':', '')))
img = Image.open(img_path)
(w, h) = img.size
img = img.rotate(180)
crop_box = (0, seg*h/seg_count, w, (seg+1)*h/seg_count)
img = img.crop(crop_box)
(w, h) = img.size
data = ''
pixels = img.load()
for y in range(h):
for x in range(0, w, 4):
black = [all([pixel == 0 for pixel in pixels[x+px, y]]) for px in range(4)]
white = [all([pixel == 255 for pixel in pixels[x+px, y]]) for px in range(4)]
new_data = ''
for z in range(4):
if white[z]:
new_data += '11'
elif black[z]:
new_data += '00'
else:
new_data += '01'
data += '{:02x}'.format(int(new_data, base=2))
return data
@bp_mvg.route('/delayTime/<string:mac>')
def delay_time(mac):
return "30000"
return bp_mvg | 39.083333 | 122 | 0.555792 | 781 | 5,628 | 3.850192 | 0.254802 | 0.053874 | 0.019953 | 0.013967 | 0.203193 | 0.163951 | 0.14433 | 0.14433 | 0.091786 | 0.091786 | 0 | 0.022675 | 0.302594 | 5,628 | 144 | 123 | 39.083333 | 0.743439 | 0.120469 | 0 | 0.09 | 0 | 0 | 0.099291 | 0.023506 | 0 | 0 | 0 | 0 | 0 | 1 | 0.06 | false | 0 | 0.06 | 0.02 | 0.21 | 0.02 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
649106bcf86144f6cc9ff5c528dd98e420f849a0 | 4,770 | py | Python | friendly/runtime_errors/value_error.py | MrGreenTea/friendly | 091f6af1d3c2be8fee078e52db6e16074d5518e5 | [
"MIT"
] | null | null | null | friendly/runtime_errors/value_error.py | MrGreenTea/friendly | 091f6af1d3c2be8fee078e52db6e16074d5518e5 | [
"MIT"
] | null | null | null | friendly/runtime_errors/value_error.py | MrGreenTea/friendly | 091f6af1d3c2be8fee078e52db6e16074d5518e5 | [
"MIT"
] | null | null | null | """value_error.py
Collection of functions useful in parsing ValueError messages and
providing a more detailed explanation.
"""
import re
from ..my_gettext import current_lang, no_information, internal_error
from .. import info_variables
from .. import debug_helper
from .. import utils
convert_type = info_variables.convert_type
MESSAGES_PARSERS = []
def add_message_parser(func):
"""A simple decorator that adds a function to parse a specific message
to the list of known parsers."""
MESSAGES_PARSERS.append(func)
def wrapper(*args):
return func(*args)
return wrapper
def get_cause(value, frame, tb_data):
try:
return _get_cause(value, frame, tb_data)
except Exception as e:
debug_helper.log_error(e)
return {"cause": internal_error(), "suggest": internal_error()}
def _get_cause(value, frame, tb_data):
_ = current_lang.translate
message = str(value)
for parser in MESSAGES_PARSERS:
cause = parser(message, frame, tb_data)
if cause:
return cause
return {"cause": no_information()}
def _unpacking():
_ = current_lang.translate
return _(
"Unpacking is a convenient way to assign a name,\n"
"to each item of an iterable.\n"
)
def get_iterable(code, frame):
"""gets an iterable object and its type as a string."""
try:
# As a ValueError exception has been raised, Python has already evaluated
# all the relevant code parts. Thus, using eval should be completely safe.
obj = utils.eval_expr(code, frame)
# obj = eval(code, frame.f_globals, frame.f_locals)
except Exception: # noqa
return None, None
if isinstance(obj, dict):
iterable = "dict"
elif isinstance(obj, list):
iterable = "list"
elif isinstance(obj, set):
iterable = "set"
elif isinstance(obj, str):
iterable = "str"
elif isinstance(obj, tuple):
iterable = "tuple"
else:
iterable = None
return obj, iterable
@add_message_parser
def not_enough_values_to_unpack(message, frame, tb_data):
_ = current_lang.translate
pattern1 = re.compile(r"not enough values to unpack \(expected (\d+), got (\d+)\)")
match1 = re.search(pattern1, message)
pattern2 = re.compile(
r"not enough values to unpack \(expected at least (\d+), got (\d+)\)"
)
match2 = re.search(pattern2, message)
if match1 is None and match2 is None:
return {}
match = match1 if match2 is None else match2
nb_names = match.group(1)
length = match.group(2)
if tb_data.bad_line.count("=") != 1:
cause = _unpacking() + _(
"In this instance, there are more names ({nb_names})\n"
"than {length}, the length of the iterable.\n"
).format(nb_names=nb_names, length=length)
return {"cause": cause}
_lhs, rhs = tb_data.bad_line.split("=")
obj, iterable = get_iterable(rhs, frame)
if obj is None or iterable is None:
cause = _unpacking() + _(
"In this instance, there are more names ({nb_names})\n"
"than {length}, the length of the iterable.\n"
).format(nb_names=nb_names, length=length)
return {"cause": cause}
cause = _unpacking() + _(
"In this instance, there are more names ({nb_names})\n"
"than the length of the iterable, {iter_type} of length {length}.\n"
).format(nb_names=nb_names, iter_type=convert_type(iterable), length=length)
return {"cause": cause}
@add_message_parser
def too_many_values_to_unpack(message, frame, tb_data):
_ = current_lang.translate
pattern = re.compile(r"too many values to unpack \(expected (\d+)\)")
match = re.search(pattern, message)
if match is None:
return {}
nb_names = match.group(1)
if tb_data.bad_line.count("=") != 1:
cause = _unpacking() + _(
"In this instance, there are fewer names ({nb_names})\n"
"than the length of the iterable.\n"
).format(nb_names=nb_names)
return {"cause": cause}
_lhs, rhs = tb_data.bad_line.split("=")
obj, iterable = get_iterable(rhs, frame)
if obj is None or iterable is None or not hasattr(obj, "__len__"):
cause = _unpacking() + _(
"In this instance, there are fewer names ({nb_names})\n"
"than the length of the iterable.\n"
).format(nb_names=nb_names)
return {"cause": cause}
cause = _unpacking() + _(
"In this instance, there are fewer names ({nb_names})\n"
"than the length of the iterable, {iter_type} of length {length}.\n"
).format(nb_names=nb_names, iter_type=convert_type(iterable), length=len(obj))
return {"cause": cause}
| 31.176471 | 87 | 0.639832 | 641 | 4,770 | 4.588144 | 0.24805 | 0.047603 | 0.048963 | 0.040802 | 0.478069 | 0.442367 | 0.427406 | 0.409045 | 0.409045 | 0.381163 | 0 | 0.004462 | 0.248218 | 4,770 | 152 | 88 | 31.381579 | 0.815672 | 0.098323 | 0 | 0.418182 | 0 | 0 | 0.21791 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.072727 | false | 0 | 0.045455 | 0.009091 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6492a201029a7d225176b66fd3ca085b9340bf5b | 5,951 | py | Python | bctree/bctree.py | lad/bctree | 7f99917fab9071af8ad870ba39304bd804e78e12 | [
"Apache-2.0"
] | 1 | 2016-02-16T20:07:34.000Z | 2016-02-16T20:07:34.000Z | bctree/bctree.py | lad/bctree | 7f99917fab9071af8ad870ba39304bd804e78e12 | [
"Apache-2.0"
] | null | null | null | bctree/bctree.py | lad/bctree | 7f99917fab9071af8ad870ba39304bd804e78e12 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""BeatCleaver Unsorted Tree Implementation."""
from collections import deque
# pylint: disable-msg=W0212
# pylint can't figure out that we're accessing a protected member of our own
# class.
class BcTree(object):
"""Unsorted Native Python Tree."""
DFS = 0
BFS = 1
def __init__(self, value=None):
self.value = value
self._children = []
def __eq__(self, other):
"""Equality relies upon matching values only not children."""
return isinstance(other, BcTree) and self.value == other.value
# Check for existings/add replace? arg
def add(self, value):
"""Add the given value as an immediate child."""
child = BcTree(value)
self._children.append(child)
return child
def get(self, value):
"""Get the immediate child that matches the given value."""
if self.value == value:
return self
for child in self._children:
if child.value == value:
return child
return None
def add_to(self, parent_values, value):
"""Add a value using a list of values as a "path" to the parent node.
The parent_values list is expected to be a "path" of values. The
value provided will be added to the last node in the parent list.
"""
parent = self.get_from(parent_values)
if parent:
return parent.add(value)
else:
return None
def get_from(self, values):
"""Get the node using the given value list."""
return self._get_from(values)[1]
def _get_from(self, values):
"""Get the parent and the node using the given value list."""
if values[0] != self.value:
return None, None
elif len(values) == 1:
return None, self
child = self
for value in values[1:]:
parent = child
for child in parent._children:
if child.value == value:
break
else:
return None, None
return parent, child
def extend(self, tree):
"""Add the given tree as an immediate child."""
self._children.append(tree)
return tree
def move(self, dst_value, src_value, order=DFS):
"""Move a node and its decendents.
The source and destination nodes are identified by their values
only. A find operation is performed for both using the given
order."""
if src_value == self.value:
raise ValueError('Moving the root of the tree is not supported.')
dst = self.find(dst_value, order=order)
if not dst:
raise ValueError('Source value "{}" not found in tree.'
.format(src_value))
src_parent, src = self._find(src_value, order=order)
if not src_parent or not src:
raise ValueError('Source value "{}" not found in tree.'
.format(src_value))
src_parent._children.remove(src)
dst._children.append(src)
def move_from(self, dst_parent_values, src_values):
"""Move a node and its decendents.
The source and destination nodes are identified by a "path" of
values from the root node to the desired node."""
dst = self.get_from(dst_parent_values)
if not dst:
raise ValueError('Destination values ({}) not found in tree.'
.format(dst_parent_values))
src_parent, src = self._get_from(src_values)
if not src_parent or not src:
raise ValueError('Source values ({}) not found in tree.'
.format(src_values))
src_parent._children.remove(src)
dst._children.append(src)
def remove(self, value):
"""Remove the given value from the tree."""
parent, child = self._find(value, self.DFS)
if not parent or not child:
raise ValueError('Value "{}" not found in tree.'.format(value))
parent._children.remove(child)
return child
def remove_from(self, values):
"""Remove a value from the tree using a "path" of values."""
parent, child = self._get_from(values)
if not parent or not child:
raise ValueError('Values ({}) not found in tree.'
.format(values))
parent._children.remove(child)
def find(self, value, order=DFS):
"""Find the given value in the tree using the given order."""
return self._find(value, order=order)[1]
def _find(self, value, order):
"""Return parent and child for a matching child value."""
if self.value == value:
return None, self
for parent, child in self._iterate(root=False, order=order):
if child.value == value:
return parent, child
return None, None
def __iter__(self):
"""Iterate through the tree, depth first."""
for _, tree in self._iterate():
yield tree
def iterate(self, root=True, order=DFS):
"""Iterate through the tree in the given order."""
for _, tree in self._iterate(root=root, order=order):
yield tree
def _iterate(self, root=True, order=DFS):
"""Iterate through the tree yielding a tuple of (parent, child)"""
if root:
yield None, self
to_visit = deque([(self, c) for c in self._children])
if order == self.BFS:
add_to_visit = to_visit.extend
elif order == self.DFS:
add_to_visit = lambda c: to_visit.extendleft(reversed(c))
else:
raise ValueError('Invalid "order" argument')
while to_visit:
parent, current = to_visit.popleft()
yield parent, current
add_to_visit([(current, c) for c in current._children])
| 33.621469 | 77 | 0.584104 | 762 | 5,951 | 4.450131 | 0.181102 | 0.031849 | 0.023002 | 0.024771 | 0.323503 | 0.258036 | 0.210852 | 0.179298 | 0.158065 | 0.158065 | 0 | 0.002728 | 0.322467 | 5,951 | 176 | 78 | 33.8125 | 0.838294 | 0.232062 | 0 | 0.345455 | 0 | 0 | 0.063481 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.154545 | false | 0 | 0.009091 | 0 | 0.354545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64949fc4b1f9f8284a4942246dab5340df67e65c | 773 | py | Python | src/main.py | juhovan/stuk-dose-rate-exporter | 7eec62cc3c9dad48a0a2868dcb55e9a9b4bd569e | [
"MIT"
] | null | null | null | src/main.py | juhovan/stuk-dose-rate-exporter | 7eec62cc3c9dad48a0a2868dcb55e9a9b4bd569e | [
"MIT"
] | 1 | 2022-03-08T20:52:02.000Z | 2022-03-08T20:52:02.000Z | src/main.py | juhovan/stuk-dose-rate-exporter | 7eec62cc3c9dad48a0a2868dcb55e9a9b4bd569e | [
"MIT"
] | null | null | null | #!/bin/python
import datetime
from http.server import HTTPServer, BaseHTTPRequestHandler
import dose_rates
class SimpleHTTPRequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
if self.path == '/metrics':
results = dose_rates.get_data()
body = '\n'.join(results)
self.send_response(200)
self.send_header(
"Content-type", "text/plain; charset=utf-8; version=0.0.4")
self.end_headers()
self.wfile.write(body.encode())
else:
body = "404 Not Found"
self.send_response(404)
self.end_headers()
self.wfile.write(body.encode())
httpd = HTTPServer(('0.0.0.0', 8080), SimpleHTTPRequestHandler)
httpd.serve_forever()
| 26.655172 | 75 | 0.609314 | 87 | 773 | 5.298851 | 0.597701 | 0.017354 | 0.069414 | 0.078091 | 0.164859 | 0.164859 | 0.164859 | 0.164859 | 0 | 0 | 0 | 0.0373 | 0.271669 | 773 | 28 | 76 | 27.607143 | 0.781528 | 0.015524 | 0 | 0.2 | 0 | 0 | 0.107895 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.15 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6496e828b7afe578a9d14aa5a0c97a8185b15a63 | 7,487 | py | Python | service/api.py | psorianom/csv_detective_api | 7c96f497374d842226a95a26cb6627ac22cd799b | [
"MIT"
] | 2 | 2020-02-04T05:24:56.000Z | 2021-05-05T17:22:55.000Z | service/api.py | psorianom/csv_detective_api | 7c96f497374d842226a95a26cb6627ac22cd799b | [
"MIT"
] | 10 | 2019-10-24T13:29:59.000Z | 2022-02-26T17:06:15.000Z | service/api.py | psorianom/csv_detective_api | 7c96f497374d842226a95a26cb6627ac22cd799b | [
"MIT"
] | 2 | 2019-12-30T23:26:53.000Z | 2020-03-27T17:23:28.000Z | #!flask/bin/python
import os
import sys
from collections import defaultdict
sys.path.append("./csv_detective_ml") # horrible hack to load my features class to load my ML pipeline :/
from flask import Flask
from flask import request
from flask import jsonify
from flask_restplus import Api, Resource, fields
from flask_cors import CORS
from tempfile import NamedTemporaryFile
import logging
import json
from joblib import load
from utils.reference_matcher import link_reference_datasets
from utils.parsers import file_upload
from csv_detective_ml.analyze_csv_cli import analyze_csv
logger = logging.getLogger()
logger.setLevel(logging.ERROR)
logger.addHandler(logging.StreamHandler())
app = Flask(__name__)
CORS(app)
api = Api(app=app,
version="0.1",
title="CSV Detective API",
description="Get info about the data contained in a DGF CSV file.")
ns_csv_detective = api.namespace('csv_detective', description='Get data from DGF CSVs')
resource_model = api.model('Analysis parameters',
{'resource_id': fields.String(required=True,
description="DGF Resource ID or CSV path",
help="Resource ID cannot be blank")
})
type_model = api.model('Type analysis parameters',
{'target_type': fields.String(required=True,
description="Target type to find among resources/datasets",
help="Resource ID cannot be blank")
})
DATASET_CSV_INFO = {}
TYPE_CSV_INFO = defaultdict(lambda: defaultdict(dict))
ML_PIPELINE = None
def load_ml_model():
global ML_PIPELINE
logger.info("Loading ML model...")
ML_PIPELINE = load('./csv_detective_ml/models/model.joblib')
return ML_PIPELINE
load_ml_model()
@ns_csv_detective.route("/dataset_id")
class CSVDetectiveDataset(Resource):
@api.expect(resource_model)
def get(self):
global DATASET_CSV_INFO
try:
resource_id = request.args.get('resource_id')
if resource_id in DATASET_CSV_INFO:
response = DATASET_CSV_INFO[resource_id]
response = reformat_response(response)
response = link_reference_datasets(response)
return jsonify(response)
else:
logger.info("Resource id not found in 'database'.")
return jsonify({"error": "ID {} not found".format(resource_id)})
except Exception as e:
return jsonify({"error": str(e)})
@ns_csv_detective.route("/resource_id")
class CSVDetectiveResource(Resource):
@api.expect(resource_model)
def get(self):
global DATASET_CSV_INFO
try:
resource_id = request.args.get('resource_id')
if resource_id in DATASET_CSV_INFO:
response = DATASET_CSV_INFO[resource_id]
response = reformat_response(response)
response = link_reference_datasets(response)
return jsonify(response)
else:
logger.info("Resource id not found in 'database'.")
return jsonify({"error": "ID {} not found".format(resource_id)})
except Exception as e:
return jsonify({"error": str(e)})
@ns_csv_detective.expect(file_upload)
def post(self):
args = file_upload.parse_args()
if "resource_csv" in args and args["resource_csv"].mimetype != "text/csv":
return jsonify({"error": "No uploaded file or the file seems to not be a CSV."})
if ML_PIPELINE is None:
analysis_type = "rule"
else:
analysis_type = "both"
uploaded_csv = args["resource_csv"]
tmp = NamedTemporaryFile(delete=False)
try:
tmp.write(uploaded_csv.read())
tmp.close()
_, response = analyze_csv(tmp.name, analysis_type=analysis_type, pipeline=ML_PIPELINE, num_rows=500)
finally:
os.remove(tmp.name)
response = reformat_response(response)
response = link_reference_datasets(response)
return jsonify(response)
@ns_csv_detective.route("/type")
class CSVDetectiveType(Resource):
@api.expect(type_model)
def get(self):
global TYPE_CSV_INFO
try:
target_type = request.args.get('target_type')
if target_type in TYPE_CSV_INFO:
response = TYPE_CSV_INFO[target_type]
return jsonify(response)
else:
logger.info("Type not found in 'database'.")
return jsonify({"error": "Type {} not found".format(target_type)})
except Exception as e:
return jsonify({"error": str(e)})
@ns_csv_detective.route("/isAlive")
class IsAlive(Resource):
def get(self):
return "True"
def reformat_response(response):
response = dict(response)
new_response = {}
if "columns_rb" in response:
reformatted_rb = {k: v[0] for k, v in response["columns_rb"].items()}
new_response["columns_rb"] = reformatted_rb
response.pop("columns_rb")
if "columns_ml" in response:
reformatted_ml = {k: v[0] for k, v in response["columns_ml"].items()}
new_response["columns_ml"] = reformatted_ml
response.pop("columns_ml")
new_response["metadata"] = dict(response)
return new_response
def load_result_dict():
global DATASET_CSV_INFO
try:
with open("./data/interim/2019-10-25-11_59_dgf_friendly.json", "r") as filo:
logger.info("Loading JSON file with csv info...")
DATASET_CSV_INFO = json.load(filo)
except Exception as e:
logger.error("Error reading JSON data file: {0}".format(str(e)))
exit(1)
return DATASET_CSV_INFO
def crate_type_index(dataset_csv_info):
"""
Invert the results dict to have a mapping of types --> dataset (and resource). Something like this:
{
type1: {
datasetID1: { resourceID1 : {csv_detective results}, {...} }
}
:return:
"""
results_keynames = ["columns_rb", "columns_ml"]
def extract_types_detected(csv_detective_results):
detected_types = set([])
for res in results_keynames:
if res not in csv_detective_results:
continue
detected_types.update([f[0] for f in csv_detective_results[res].values()])
return detected_types
for dataset_id, resources in dataset_csv_info.items():
for resource_id, csv_detective_result in resources.items():
if not any([f in csv_detective_result for f in results_keynames]):
continue
for type_detected in extract_types_detected(csv_detective_result):
TYPE_CSV_INFO[type_detected][dataset_id][resource_id] = csv_detective_result
return TYPE_CSV_INFO
if __name__ == '__main__':
# load csv_detective info json
DATASET_CSV_INFO = load_result_dict()
TYPE_CSV_INFO = crate_type_index(DATASET_CSV_INFO)
if 'ENVIRONMENT' in os.environ:
if os.environ['ENVIRONMENT'] == 'production':
app.run(port=80, host='0.0.0.0')
if os.environ['ENVIRONMENT'] == 'local':
app.run(port=5000, host='0.0.0.0')
else:
app.run(port=5000, host='0.0.0.0')
| 33.573991 | 112 | 0.625351 | 908 | 7,487 | 4.932819 | 0.21696 | 0.034383 | 0.04376 | 0.016968 | 0.322617 | 0.268587 | 0.236213 | 0.228176 | 0.228176 | 0.207189 | 0 | 0.008684 | 0.277147 | 7,487 | 222 | 113 | 33.725225 | 0.818921 | 0.042073 | 0 | 0.323353 | 0 | 0 | 0.1448 | 0.012195 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05988 | false | 0 | 0.08982 | 0.005988 | 0.275449 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6497557f12dce30fe9d34eda6e562520909cd934 | 624 | py | Python | Python-code-snippets-001-100/080-crop image and save.py | abartoha/python-snippets-ref | 04e4feada96077f0e849b277204c012194e8fbcd | [
"Unlicense"
] | null | null | null | Python-code-snippets-001-100/080-crop image and save.py | abartoha/python-snippets-ref | 04e4feada96077f0e849b277204c012194e8fbcd | [
"Unlicense"
] | null | null | null | Python-code-snippets-001-100/080-crop image and save.py | abartoha/python-snippets-ref | 04e4feada96077f0e849b277204c012194e8fbcd | [
"Unlicense"
] | null | null | null | '''
80-Crop Image
Source: vars sources and shambleized
pip install opencv-python
'''
import cv2
# Load image to crop.
img = cv2.imread('weird.jpg', cv2.IMREAD_UNCHANGED)
# Set crop dimensions
# We first supply the startY and endY coordinates,
#followed by the startX and endX coordinates to the slice.
#top-line-230 bottom line-360, left side-205: right side-475
cropped = img[230:360, 205:475]
# Display original and cropped image.
cv2.imshow("Original", img)
cv2.imshow("Cropped", cropped)
# Save.
cv2.imwrite('cropped-image.jpg', cropped)
# Wait for any key press.
cv2.waitKey(0)
# Close.
cv2.destroyAllWindows()
| 20.8 | 61 | 0.740385 | 96 | 624 | 4.802083 | 0.625 | 0.02603 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.065421 | 0.142628 | 624 | 29 | 62 | 21.517241 | 0.796262 | 0.572115 | 0 | 0 | 0 | 0 | 0.163347 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
649796fc6892373cd358b9ae0ff0471d9dffe9cf | 1,474 | py | Python | own/recursive_lomuto_quick_select_k_set_coderpad.py | MrCsabaToth/IK | 713f91c28af7b4a964ba854ede9fec73bf0c4682 | [
"Apache-2.0"
] | null | null | null | own/recursive_lomuto_quick_select_k_set_coderpad.py | MrCsabaToth/IK | 713f91c28af7b4a964ba854ede9fec73bf0c4682 | [
"Apache-2.0"
] | null | null | null | own/recursive_lomuto_quick_select_k_set_coderpad.py | MrCsabaToth/IK | 713f91c28af7b4a964ba854ede9fec73bf0c4682 | [
"Apache-2.0"
] | null | null | null | import random
def quick_select_helper(a, beg, end, k):
if beg >= end:
return
if end == beg + 1:
if a[beg] > a[end]:
a[beg], a[end] = a[end], a[beg]
return
pivot_index = random.randint(beg, end)
pivot = a[pivot_index]
# 1 swap pivot with beginning
if pivot_index != beg:
a[beg], a[pivot_index] = a[pivot_index], a[beg]
# 2 scan rest fo the array and partition
smaller = beg
for bigger in range(beg + 1, end + 1):
if a[bigger] < pivot:
smaller += 1
a[smaller], a[bigger] = a[bigger], a[smaller]
# 3 swap back pivot with beginning
a[beg], a[smaller] = a[smaller], a[beg]
if smaller == k:
return
# One half of the problem is not relevant any more, we hone in on the interesting part
if smaller > k:
quick_select_helper(a, beg, smaller - 1, k)
else:
quick_select_helper(a, smaller + 1, end, k)
def quick_select(a, k):
quick_select_helper(a, 0, len(a) - 1, k - 1)
import pytest
@pytest.mark.parametrize("i", range(30))
def test_quick_select_rng(i):
rng = random.SystemRandom()
length = rng.randint(10, 40)
a = [rng.randint(0, 1000) for j in range(length)]
k = length // 2
a2 = sorted(a)
print(length, k)
quick_select(a, k)
a3 = sorted(a2[:k])
print(a)
print(a2)
print(a2[:k])
print(a3)
for j in range(k):
assert a2[j] == a3[j]
pytest.main()
| 23.03125 | 90 | 0.57327 | 233 | 1,474 | 3.549356 | 0.300429 | 0.043531 | 0.082225 | 0.087062 | 0.095526 | 0 | 0 | 0 | 0 | 0 | 0 | 0.030769 | 0.294437 | 1,474 | 63 | 91 | 23.396825 | 0.764423 | 0.12483 | 0 | 0.068182 | 0 | 0 | 0.000778 | 0 | 0 | 0 | 0 | 0 | 0.022727 | 1 | 0.068182 | false | 0 | 0.045455 | 0 | 0.181818 | 0.113636 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6497a361fbfe4776b2db4e33d8ba9a4805544636 | 1,212 | py | Python | app.py | Programmer-RD-AI/Simple-Blog | 188343b9ed039690e2081026ec604c37d821ad57 | [
"Apache-2.0"
] | null | null | null | app.py | Programmer-RD-AI/Simple-Blog | 188343b9ed039690e2081026ec604c37d821ad57 | [
"Apache-2.0"
] | null | null | null | app.py | Programmer-RD-AI/Simple-Blog | 188343b9ed039690e2081026ec604c37d821ad57 | [
"Apache-2.0"
] | null | null | null | from flask import *
import pymongo
from pymongo import *
app = Flask(__name__)
app.debug = True
app.secret_key = 'test'
cluster = MongoClient("mongodb://Ranuga:ranuga2008@cluster0-shard-00-00.odlbl.mongodb.net:27017,cluster0-shard-00-01.odlbl.mongodb.net:27017,cluster0-shard-00-02.odlbl.mongodb.net:27017/myFirstDatabase?ssl=true&replicaSet=atlas-spv504-shard-0&authSource=admin&retryWrites=true&w=majority")
db = cluster['Blog']
collection = db['Blog']
@app.route('/')
def home() -> 'html':
if collection.find_one() is None:
blogs = [{'Head':'there is no blogs yet','Desc':'there is no blogs yet'}]
return render_template('home.html',blogs=blogs)
blogs = []
for blog in collection.find():
blogs.append(blog)
return render_template('home.html',blogs=blogs)
@app.route('/add/blog',methods=['POST','GET'])
@app.route('/add/blog/',methods=['POST','GET'])
def add_blog() -> 'html':
if request.method == 'POST':
head = request.form['head']
desc = request.form['desc']
collection.insert_one({'Head':head,'Desc':desc})
return redirect('/')
else:
return render_template('add_blog.html')
if __name__ == '__main__':
app.run()
| 36.727273 | 289 | 0.670792 | 165 | 1,212 | 4.806061 | 0.430303 | 0.035309 | 0.056747 | 0.075662 | 0.300126 | 0.257251 | 0.257251 | 0 | 0 | 0 | 0 | 0.036893 | 0.150165 | 1,212 | 32 | 290 | 37.875 | 0.73301 | 0 | 0 | 0.066667 | 0 | 0.033333 | 0.353135 | 0.217822 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.1 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
649d0ca596b6a034bd16e8ed6193bec8d572c9c4 | 2,033 | py | Python | ProgrammingAssignments/IrisClassification/Vowpal/convertToVowpal.py | ckcortright/CSCI4830MachineLearning | 5d1c6c7bfb05b54f7c000c940b1f6410054f10f0 | [
"MIT"
] | null | null | null | ProgrammingAssignments/IrisClassification/Vowpal/convertToVowpal.py | ckcortright/CSCI4830MachineLearning | 5d1c6c7bfb05b54f7c000c940b1f6410054f10f0 | [
"MIT"
] | null | null | null | ProgrammingAssignments/IrisClassification/Vowpal/convertToVowpal.py | ckcortright/CSCI4830MachineLearning | 5d1c6c7bfb05b54f7c000c940b1f6410054f10f0 | [
"MIT"
] | 2 | 2016-11-30T07:28:47.000Z | 2017-01-28T05:52:45.000Z | ################################################################################
# A simple script to convert the iris training data to the vowpal wabbit format.
#
# Author: Carl Cortright
# Date: 9/10/2016
#
# Copyright 2016 Carl Cortright
################################################################################
import csv
import random
import sys
#
# Converts a file to the vowpal format
#
def convertToVowpal(filename):
# Open the relevant files
iris = open(filename, "r+")
iris_csv = csv.reader(iris, delimiter=",")
data_entries = []
# Add the data to the vowpal_output file in the correct format
for row in iris_csv:
label = 0
if(row[4] == "Iris-setosa"):
label = 1
elif(row[4] == "Iris-versicolor"):
label = 2
elif(row[4] == "Iris-virginica"):
label = 3
# Generate the data entry
new_data_entry = ""
new_data_entry += str(label) + " | "
new_data_entry += str(row[0]) + ":1 "
new_data_entry += str(row[1]) + ":1 "
new_data_entry += str(row[2]) + ":1 "
new_data_entry += str(row[3]) + ":1 "
new_data_entry += "\n"
data_entries.append(new_data_entry)
random.shuffle(data_entries)
for entry in data_entries:
iris.write(entry)
iris.close()
#
# Shuffles the dataset based on a ratio training:test
#
def shuffleData(training, test):
iris_data = open("iris.data", "r+")
iris_training = open("iris.training.data", "w")
iris_test = open("iris.test.data", "w")
all_data = iris_data.readlines()
random.shuffle(all_data)
all_data_len = len(all_data)
split_point = int( all_data_len * ( float(training) / (training + test)))
for i in range(0, split_point):
iris_training.write(all_data[i])
for j in range(split_point, all_data_len - 1):
iris_test.write(all_data[j])
iris_data.close()
iris_training.close()
iris_test.close()
shuffleData(int(sys.argv[1]), int(sys.argv[2]))
| 27.849315 | 80 | 0.567634 | 266 | 2,033 | 4.165414 | 0.304511 | 0.073105 | 0.086643 | 0.06769 | 0.097473 | 0.051444 | 0 | 0 | 0 | 0 | 0 | 0.019255 | 0.233645 | 2,033 | 72 | 81 | 28.236111 | 0.691913 | 0.1697 | 0 | 0 | 0 | 0 | 0.069444 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046512 | false | 0 | 0.069767 | 0 | 0.116279 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
649fc4b512a5b9fc6782de731a26e50560bffbcf | 516 | py | Python | z.uncategorized/programmers87946.py | kimminki10/algorithms2 | 5d3b2d970dbc88169108632ce0d234bf74446316 | [
"MIT"
] | null | null | null | z.uncategorized/programmers87946.py | kimminki10/algorithms2 | 5d3b2d970dbc88169108632ce0d234bf74446316 | [
"MIT"
] | null | null | null | z.uncategorized/programmers87946.py | kimminki10/algorithms2 | 5d3b2d970dbc88169108632ce0d234bf74446316 | [
"MIT"
] | null | null | null | """
https://programmers.co.kr/learn/courses/30/lessons/87946
피로도
"""
result = 0
def gogo(k, dun, depth=0):
global result
if result < depth:
result = depth
if k < 1:
return
if dun == None:
return
for i in range(len(dun)):
if k >= dun[i][0]:
gogo(k - dun[i][1], dun[:i] + dun[i+1:], depth+1)
def solution(k, dungeons):
gogo(k, dungeons)
answer = result
return answer
kk = 80
dundun = [[80,20],[50,40],[30,10]]
print(solution(kk, dundun)) | 19.111111 | 61 | 0.550388 | 80 | 516 | 3.55 | 0.475 | 0.056338 | 0.056338 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.075067 | 0.277132 | 516 | 27 | 62 | 19.111111 | 0.686327 | 0.116279 | 0 | 0.105263 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0 | 0 | 0.263158 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64a02aaa94244083fe284a70aa2f56168c273094 | 10,914 | py | Python | nicos/clients/gui/utils.py | ess-dmsc/nicos | 755d61d403ff7123f804c45fc80c7ff4d762993b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 1 | 2021-03-26T10:30:45.000Z | 2021-03-26T10:30:45.000Z | nicos/clients/gui/utils.py | ess-dmsc/nicos | 755d61d403ff7123f804c45fc80c7ff4d762993b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 91 | 2020-08-18T09:20:26.000Z | 2022-02-01T11:07:14.000Z | nicos/clients/gui/utils.py | ess-dmsc/nicos | 755d61d403ff7123f804c45fc80c7ff4d762993b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 3 | 2020-08-04T18:35:05.000Z | 2021-04-16T11:22:08.000Z | # -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2021 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# Georg Brandl <g.brandl@fz-juelich.de>
#
# *****************************************************************************
"""NICOS GUI utilities."""
import logging
from contextlib import contextmanager
from os import path
from nicos.core import MAINTENANCE, MASTER, SIMULATION, SLAVE
from nicos.guisupport.qt import QApplication, QByteArray, QColor, QCursor, \
QDateTime, QDialog, QFileDialog, QFont, QLabel, QMessageBox, \
QProgressDialog, QPushButton, QSettings, QSize, QStyle, Qt, QTextEdit, \
QToolButton, QVBoxLayout, QWidget, uic
def splitTunnelString(tunnel):
tmp = tunnel.split('@')
host = tmp[-1]
username, password = '', ''
if len(tmp) > 1:
tmp = tmp[0].split(':')
username = tmp[0]
if len(tmp) > 1:
password = tmp[1]
return host, username, password
uipath = path.dirname(__file__)
def loadUi(widget, uiname):
return uic.loadUi(path.join(uipath, uiname), widget)
def dialogFromUi(parent, uiname):
dlg = QDialog(parent)
loadUi(dlg, uiname)
return dlg
def loadBasicWindowSettings(window, settings):
window.restoreGeometry(settings.value('geometry', '', QByteArray))
window.restoreState(settings.value('windowstate', '', QByteArray))
try:
window.splitstate = settings.value('splitstate', '', QByteArray)
except TypeError:
window.splitstate = ''
def loadUserStyle(window, settings):
window.user_font = QFont(settings.value('font', QFont('Monospace')))
color = QColor(settings.value('color'))
if color.isValid():
window.user_color = color
else:
window.user_color = QColor(Qt.white)
def enumerateWithProgress(seq, text, every=1, parent=None, total=None,
force_display=False):
total = total or len(seq)
pd = QProgressDialog(parent, labelText=text)
pd.setRange(0, total)
pd.setCancelButton(None)
if total > every or force_display:
pd.show()
processEvents = QApplication.processEvents
processEvents()
try:
for i, item in enumerate(seq):
if i % every == 0:
pd.setValue(i)
processEvents()
yield i, item
finally:
pd.close()
def showToolText(toolbar, action):
widget = toolbar.widgetForAction(action)
if isinstance(widget, QToolButton):
widget.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
def modePrompt(mode):
return {SLAVE: 'slave >>',
SIMULATION: 'SIM >>',
MAINTENANCE: 'maint >>',
MASTER: '>>'}[mode]
class DlgUtils:
def __init__(self, title):
self._dlgutils_title = title
def showError(self, text):
QMessageBox.warning(self, self._dlgutils_title, text)
def showInfo(self, text):
QMessageBox.information(self, self._dlgutils_title, text)
def askQuestion(self, text, select_no=False):
defbutton = select_no and QMessageBox.No or QMessageBox.Yes
buttons = QMessageBox.Yes | QMessageBox.No
return QMessageBox.question(self, self._dlgutils_title, text,
buttons, defbutton) == QMessageBox.Yes
def selectInputFile(self, ctl, text='Choose an input file'):
previous = ctl.text()
if previous:
startdir = path.dirname(previous)
else:
startdir = '.'
fn = QFileDialog.getOpenFileName(self, text, startdir, 'All files (*)')[0]
if fn:
ctl.setText(fn)
def selectOutputFile(self, ctl, text='Choose an output filename'):
previous = ctl.text()
if previous:
startdir = path.dirname(previous)
else:
startdir = '.'
fn = QFileDialog.getSaveFileName(self, text, startdir, 'All files (*)')[0]
if fn:
ctl.setText(fn)
def selectDirectory(self, ctl, text='Choose a directory'):
previous = ctl.text()
startdir = previous or '.'
fname = QFileDialog.getExistingDirectory(self, text, startdir)
if fname:
ctl.setText(fname)
def viewTextFile(self, fname):
with open(fname, encoding='utf-8', erorrs='replace') as f:
contents = f.read()
qd = QDialog(self, 'PreviewDlg', True)
qd.setCaption('File preview')
qd.resize(QSize(500, 500))
lay = QVBoxLayout(qd, 11, 6, 'playout')
lb = QLabel(qd, 'label')
lb.setText('Viewing %s:' % fname)
lay.addWidget(lb)
tx = QTextEdit(qd, 'preview')
tx.setReadOnly(1)
tx.setText(contents)
font = QFont(tx.font())
font.setFamily('monospace')
tx.setFont(font)
lay.addWidget(tx)
btn = QPushButton(qd, 'ok')
btn.setAutoDefault(1)
btn.setDefault(1)
btn.setText('Close')
btn.clicked.connect(qd.accept)
lay.addWidget(btn, 0, QWidget.AlignRight)
qd.show()
class SettingGroup:
global_group = ''
def __init__(self, name):
self.name = name
self.settings = QSettings()
def __enter__(self):
if self.global_group:
self.settings.beginGroup(self.global_group)
self.settings.beginGroup(self.name)
return self.settings
def __exit__(self, *args):
if self.global_group:
self.settings.endGroup()
self.settings.endGroup()
self.settings.sync()
class ScriptExecQuestion(QMessageBox):
"""Special QMessageBox for asking what to do when a script is running."""
def __init__(self):
QMessageBox.__init__(self, QMessageBox.Information, 'Error',
'A script is currently running. What do you want to do?',
QMessageBox.NoButton)
self.b0 = self.addButton('Cancel', QMessageBox.RejectRole)
self.b0.setIcon(self.style().standardIcon(QStyle.SP_DialogCancelButton))
self.b1 = self.addButton('Queue script', QMessageBox.YesRole)
self.b1.setIcon(self.style().standardIcon(QStyle.SP_DialogOkButton))
self.b2 = self.addButton('Execute now!', QMessageBox.ApplyRole)
self.b2.setIcon(self.style().standardIcon(QStyle.SP_MessageBoxWarning))
def exec_(self):
# According to the docs, exec_() returns an "opaque value" if using
# non-standard buttons, so we have to check clickedButton(). Do that
# here and return a valid QMessageBox button constant.
QMessageBox.exec_(self)
btn = self.clickedButton()
if btn == self.b2:
return QMessageBox.Apply # Execute now
elif btn == self.b1:
return QMessageBox.Yes # Queue
return QMessageBox.Cancel # Cancel
class DlgPresets:
"""Save dialog presets for Qt dialogs."""
def __init__(self, group, ctls):
self.group = group
self.ctls = ctls
self.settings = QSettings()
def load(self):
self.settings.beginGroup(self.group)
for (ctl, default) in self.ctls:
entry = 'presets/' + ctl.objectName()
val = self.settings.value(entry, default, type(default))
try:
getattr(self, 'set_' + ctl.__class__.__name__)(ctl, val)
except Exception as err:
print(ctl, err)
self.settings.endGroup()
def save(self):
self.settings.beginGroup(self.group)
for (ctl, _) in self.ctls:
entry = 'presets/' + ctl.objectName()
try:
val = getattr(self, 'get_' + ctl.__class__.__name__)(ctl)
self.settings.setValue(entry, val)
except Exception as err:
print(err)
self.settings.endGroup()
self.settings.sync()
def set_QLineEdit(self, ctl, val):
ctl.setText(val)
def set_QListBox(self, ctl, val):
ctl.setSelected(ctl.findItem(val), 1)
def set_QListWidget(self, ctl, val):
ctl.setCurrentItem(ctl.findItems(val, Qt.MatchExactly)[0])
def set_QComboBox(self, ctl, val):
if ctl.isEditable():
ctl.setEditText(val)
else:
ctl.setCurrentIndex(val)
def set_QTextEdit(self, ctl, val):
ctl.setText(val)
def set_QTabWidget(self, ctl, val):
ctl.setCurrentIndex(val)
def set_QSpinBox(self, ctl, val):
ctl.setValue(val)
def set_QRadioButton(self, ctl, val):
ctl.setChecked(bool(val))
def set_QCheckBox(self, ctl, val):
ctl.setChecked(bool(val))
def set_QDateTimeEdit(self, ctl, val):
ctl.setDateTime(QDateTime.fromString(val))
def get_QLineEdit(self, ctl):
return ctl.text()
def get_QListBox(self, ctl):
return ctl.selectedItem().text()
def get_QListWidget(self, ctl):
return ctl.currentItem().text()
def get_QComboBox(self, ctl):
if ctl.isEditable():
return ctl.currentText()
else:
return ctl.currentIndex()
def get_QTextEdit(self, ctl):
return ctl.toPlainText()
def get_QTabWidget(self, ctl):
return ctl.currentIndex()
def get_QSpinBox(self, ctl):
return ctl.value()
def get_QRadioButton(self, ctl):
return int(ctl.isChecked())
def get_QCheckBox(self, ctl):
return int(ctl.isChecked())
def get_QDateTimeEdit(self, ctl):
return ctl.dateTime().toString()
class DebugHandler(logging.Handler):
def __init__(self, mainwindow):
self.mainwindow = mainwindow
logging.Handler.__init__(self)
def emit(self, record):
if self.mainwindow.debugConsole:
msg = self.format(record)
self.mainwindow.debugConsole.addLogMsg('#' * 80)
self.mainwindow.debugConsole.addLogMsg(msg)
@contextmanager
def waitCursor():
try:
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
yield
finally:
QApplication.restoreOverrideCursor()
| 31.45245 | 87 | 0.616731 | 1,220 | 10,914 | 5.431967 | 0.32623 | 0.024295 | 0.01509 | 0.017655 | 0.189226 | 0.158292 | 0.101102 | 0.078769 | 0.047382 | 0.036517 | 0 | 0.007561 | 0.260766 | 10,914 | 346 | 88 | 31.543353 | 0.813832 | 0.125252 | 0 | 0.229839 | 0 | 0 | 0.037883 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.189516 | false | 0.012097 | 0.020161 | 0.044355 | 0.314516 | 0.008065 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64a5e4b1934441d33265c0c90517da3ddd60146a | 27,257 | py | Python | pythonScripts/blenderMeshExport.py | dsnettleton/whipstitch-game-engine | 1c91a2e90274f18723141ec57d0cb4930bd29b25 | [
"MIT"
] | null | null | null | pythonScripts/blenderMeshExport.py | dsnettleton/whipstitch-game-engine | 1c91a2e90274f18723141ec57d0cb4930bd29b25 | [
"MIT"
] | null | null | null | pythonScripts/blenderMeshExport.py | dsnettleton/whipstitch-game-engine | 1c91a2e90274f18723141ec57d0cb4930bd29b25 | [
"MIT"
] | null | null | null | # Blender Mesh Export for the Whipstitch Game Engine
# Copyright D. Scott Nettleton, 2013
# This software is released under the terms of the
# Lesser GNU Public License (LGPL).
import bpy,os
from math import sqrt
from mathutils import Matrix, Vector, Quaternion
from copy import deepcopy
workingDirectory = "./"
MAJOR_VERSION = 1
MINOR_VERSION = 3
BLENDER_FPS = 24
WS_TEXTURE_MAP_COLOR = 0x0001
WS_TEXTURE_MAP_NORMAL = 0x0002
class wsJointMod:
def __init__(self, name):
self.name = name
self.jointIndex = 0
self.location = Vector((0,0,0))
self.rotation = Quaternion((1,0,0,0))
self.initialRot = Quaternion((1,0,0,0))
#end jointModification constructor
def swap(self, curveIndex, value):
if (curveIndex == 0):
# Blender Location X value
self.location.x = value
elif (curveIndex == 1):
# Blender Location Y value
self.location.z = -value
elif (curveIndex == 2):
# Blender Location Z value
self.location.y = value
elif (curveIndex == 3):
# Blender Rotation W value
self.rotation.w = value
elif (curveIndex == 4):
# Blender Rotation X value
self.rotation.x = value
elif (curveIndex == 5):
# Blender Rotation Y value
self.rotation.z = -value
elif (curveIndex == 6):
# Blender Rotation Z value
self.rotation.y = value
#end class jointModification
class wsJoint:
def __init__(self, name):
self.name = name
self.start = Vector((0,0,0))
self.end = Vector((0,0,0))
self.rot = Quaternion((1,0,0,0))
self.initialRot = Quaternion((1,0,0,0))
self.parent = -1
self.bounds = wsBounds(0,0,0,0,0,0)
class wsSkeleton:
def __init__(self, name):
self.name = name
self.numJoints = 0
self.joints = []
self.location = Vector((0,0,0))
self.rotation = Quaternion((1,0,0,0))
self.scale = Vector((1,1,1))
class wsKeyframe:
def __init__(self, index):
self.frameIndex = index
self.numJointMods = 0
self.jointMods = []
self.bounds = wsBounds(0,0,0,0,0,0)
class wsAnimation:
def __init__(self, name, skeleton):
self.name = name
self.numKeyframes = 0
self.keyframes = []
self.skeleton = skeleton
self.framesPerSecond = BLENDER_FPS
self.length = 0.0
self.bounds = wsBounds(0,0,0,0,0,0)
class wsBounds:
def __init__(self, minX, maxX, minY, maxY, minZ, maxZ):
self.minX = minX
self.maxX = maxX
self.minY = minY
self.maxY = maxY
self.minZ = minZ
self.maxZ = maxZ
self.halfX = 0.0
self.halfY = 0.0
self.halfZ = 0.0
class wsMesh:
def __init__(self, name, skeleton):
self.name = name
self.numVerts = 0
self.numMaterials = 0
self.verts = []
self.materials = []
self.skeleton = skeleton
self.location = Vector((0,0,0))
self.scale = Vector((1,1,1))
self.rotation = Quaternion((1,0,0,0))
self.bounds = wsBounds(0,0,0,0,0,0)
class wsVert:
def __init__(self):
self.pos = Vector((0,0,0))
self.norm = Vector((1,0,0))
self.texCoords = [0, 0]
self.numWeights = 0
self.weights = []
self.weightSum = 0
class wsWeight:
def __init__(self, index, influence):
self.jointIndex = index
self.influence = influence
class wsTri:
def __init__(self, val1, val2, val3):
self.vertIndices = [val1, val2, val3]
class wsMaterial:
def __init__(self, name):
self.name = name
self.ambient = [0, 0, 0, 1]
self.diffuse = [0, 0, 0, 1]
self.specular = [0, 0, 0, 1]
self.emissive = [0, 0, 0, 1]
self.numTris = 0
self.tris = []
self.shininess = 0
self.mapBitFlag = 0
self.colorMap = ""
self.normalMap = ""
self.properties = []
class wsCustomProperty:
def __init__(self, name, value):
self.name = name
self.value = value
class wsFileBuffer:
def __init__(self, filename, ext):
self.filename = filename +"."+ ext
self.buffer = open(workingDirectory + self.filename, "w")
def write(self, text):
self.buffer.write(text)
def apply(self):
self.buffer.close()
def clear(self):
self.buffer = ""
animSkel = None
animSkelName = ""
mesh = None
animations = []
hasSkeleton = 0
boundsInitialized = 0
bpy.ops.object.mode_set(mode='OBJECT') # Make sure we're in object mode
# Calculate Skeletal data
for my in bpy.data.objects:
if (my.type == "ARMATURE"):
hasSkeleton = 1
animSkel = wsSkeleton(my.name)
animSkelName = my.name
jointNum = 0
joints = []
for boneName in my.data.bones.keys():
bone = my.data.bones[boneName]
joint = wsJoint(boneName)
joint.start.x = bone.head_local[0]
joint.start.y = bone.head_local[2]
joint.start.z = -bone.head_local[1]
joint.end.x = bone.tail_local[0]
joint.end.y = bone.tail_local[2]
joint.end.z = -bone.tail_local[1]
joint.rot = Quaternion(bone.matrix_local.to_quaternion())
tmpZ = joint.rot.z
joint.rot.z = -joint.rot.y
joint.rot.y = tmpZ
parentNum = 0
for bone2 in my.data.bones:
if (bone2 == bone.parent):
joint.parent = parentNum
break
parentNum += 1
# End for each bone2
joints.append( joint )
jointNum += 1
#end for each boneName
animSkel.joints = joints
animSkel.numJoints = jointNum
animSkel.location = Vector([ my.location[0], my.location[2], -my.location[1] ])
animSkel.rotation.x = my.rotation_quaternion[1]
animSkel.rotation.y = my.rotation_quaternion[3]
animSkel.rotation.z = -my.rotation_quaternion[2]
animSkel.rotation.w = my.rotation_quaternion[0]
animSkel.scale = Vector([ my.scale[0], my.scale[2], my.scale[1] ])
#end if (type == "ARMATURE")
#end for each object
# Calculate Animation Data
for my in bpy.data.actions:
if (my.id_root == 'OBJECT'):
anim = wsAnimation(my.name, animSkel)
keyFrames = []
for curve in my.fcurves:
for kPoint in curve.keyframe_points:
inList = 0
for key in keyFrames:
if (key == kPoint.co.x):
inList = 1
break
#end for each key in keyFrames
if (inList == 0):
keyFrames.append(kPoint.co.x)
anim.numKeyframes = len(keyFrames)
if (anim.numKeyframes <= 1):
continue
anim.length = keyFrames[len(keyFrames)-1] / BLENDER_FPS
for index in keyFrames:
key = wsKeyframe(index)
key.jointMods = [None]*len(animSkel.joints)
for group in my.groups:
modified = 0
mod = wsJointMod(group.name)
for j in range( len(animSkel.joints) ):
if (animSkel.joints[j].name == group.name):
mod.jointIndex = j
break
# mod.jointIndex = index
curveIndex = 0
for curve in group.channels:
for kPoint in curve.keyframe_points:
if (kPoint.co.x == index):
modified = 1
mod.swap(curveIndex, kPoint.co.y)
#end for each keyframe_point
curveIndex += 1
#end for each fcurve
if (modified == 1):
# key.jointMods.append(mod)
key.jointMods[mod.jointIndex] = mod
#end for each action group
key.numJointMods = len(key.jointMods)
anim.keyframes.append(key)
#end for each keyframe
animations.append(anim)
#end if this action applies to an object
#end for each animation
# Calculate Mesh Data
for my in bpy.data.objects:
if (my.type == "MESH"):
my.data.calc_tessface()
mesh = wsMesh(my.name, animSkel)
location = my.matrix_world.to_translation()
scale = my.matrix_world.to_scale()
rotation = my.matrix_world.to_quaternion()
mesh.location = Vector([ location[0], location[2], -location[1] ])
mesh.scale = Vector([ scale.x, scale.z, scale.y ])
mesh.rotation.x = rotation[1]
mesh.rotation.y = rotation[3]
mesh.rotation.z = -rotation[2]
mesh.rotation.w = rotation[0]
matCount = 0
vertIndexCount = 0
for myMat in my.data.materials:
faceNum = 0
mat = wsMaterial(myMat.name)
for face in my.data.tessfaces:
if (face.material_index == matCount):
for v in range( len(face.vertices) ):
vert = wsVert()
bVert = my.data.vertices[face.vertices[v]]
vert.pos = Vector([ bVert.co[0], bVert.co[2], -bVert.co[1] ])
vert.norm = Vector([ bVert.normal[0], bVert.normal[2], -bVert.normal[1] ])
if (v == 0):
vert.texCoords = [ my.data.tessface_uv_textures[0].data[faceNum].uv1[0], \
my.data.tessface_uv_textures[0].data[faceNum].uv1[1] ]
elif (v == 1):
vert.texCoords = [ my.data.tessface_uv_textures[0].data[faceNum].uv2[0], \
my.data.tessface_uv_textures[0].data[faceNum].uv2[1] ]
elif (v == 2):
vert.texCoords = [ my.data.tessface_uv_textures[0].data[faceNum].uv3[0], \
my.data.tessface_uv_textures[0].data[faceNum].uv3[1] ]
elif (v == 3):
vert.texCoords = [ my.data.tessface_uv_textures[0].data[faceNum].uv4[0], \
my.data.tessface_uv_textures[0].data[faceNum].uv4[1] ]
for g in range(len(bVert.groups)):
vertGroup = my.vertex_groups[bVert.groups[g].group]
jointid = -1
boneCount = 0
foundOne = 0
for thisBone in animSkel.joints:
if (thisBone.name == vertGroup.name):
jointid = boneCount
foundOne = 1
break
boneCount += 1
if (foundOne == 1):
vert.weights.append( wsWeight(jointid, bVert.groups[g].weight) )
vert.numWeights += 1
vert.weightSum += bVert.groups[g].weight
#end for each vertex group
mesh.verts.append(vert)
#end for each vertex
mat.tris.append( wsTri(vertIndexCount, vertIndexCount+1, vertIndexCount+2) )
if (len(face.vertices) == 4):
mat.tris.append( wsTri(vertIndexCount, vertIndexCount+2, vertIndexCount+3) )
vertIndexCount += 1
vertIndexCount += 3
#end if (this face uses the current materia)l
faceNum += 1
#end for each face
mat.numTris = len(mat.tris)
mat.ambient = [ myMat.ambient * myMat.diffuse_color[0], \
myMat.ambient * myMat.diffuse_color[1], \
myMat.ambient * myMat.diffuse_color[2], \
myMat.ambient * myMat.alpha ]
mat.diffuse = [ myMat.diffuse_color[0], \
myMat.diffuse_color[1], \
myMat.diffuse_color[2], \
myMat.alpha ]
mat.specular = [ myMat.specular_color[0], \
myMat.specular_color[1], \
myMat.specular_color[2], \
myMat.specular_alpha ]
mat.emissive = [ myMat.emit * myMat.diffuse_color[0], \
myMat.emit * myMat.diffuse_color[1], \
myMat.emit * myMat.diffuse_color[2], \
myMat.emit * myMat.alpha ]
mat.shininess = myMat.specular_hardness
for tex in myMat.texture_slots:
if tex != None:
if tex.use_map_color_diffuse:
mat.mapBitFlag |= WS_TEXTURE_MAP_COLOR
mat.colorMap = tex.texture.image.filepath
fileStart = mat.colorMap.rfind("/") - 1
mat.colorMap = mat.colorMap[fileStart:]
elif tex.use_map_normal:
mat.mapBitFlag |= WS_TEXTURE_MAP_NORMAL
mat.normalMap = tex.texture.image.filepath
fileStart = mat.normalMap.rfind("/") - 1
mat.normalMap = mat.normalMap[fileStart:]
#end if this texture has been defined
#end for each texture
# Check for custom material properties
for prop in myMat.items():
if prop[0] != "_RNA_UI":
mat.properties.append(wsCustomProperty(prop[0], prop[1]))
# End for each custom property
mesh.materials.append( mat )
matCount += 1
#end for each material
mesh.numMaterials = len(mesh.materials)
mesh.numVerts = len(mesh.verts)
#end for each mesh object
if (mesh != None):
mesh.numVerts = len(mesh.verts)
mesh.numMaterials = len(mesh.materials)
boundsInitialized = 0
for mat in mesh.materials:
mat.numTris = len(mat.tris)
for vert in mesh.verts:
vert.pos.x *= mesh.scale.x
vert.pos.y *= mesh.scale.y
vert.pos.z *= mesh.scale.z
vert.pos += mesh.location
vert.pos.rotate(mesh.rotation)
vert.numWeights = len(vert.weights)
# Set the default bounding box
if (boundsInitialized == 0):
mesh.bounds = wsBounds(vert.pos.x, vert.pos.x, vert.pos.y, vert.pos.y, vert.pos.z, vert.pos.z)
boundsInitialized = 1
else:
mesh.bounds.minX = min(mesh.bounds.minX, vert.pos.x)
mesh.bounds.maxX = max(mesh.bounds.maxX, vert.pos.x)
mesh.bounds.minY = min(mesh.bounds.minY, vert.pos.y)
mesh.bounds.maxY = max(mesh.bounds.maxY, vert.pos.y)
mesh.bounds.minZ = min(mesh.bounds.minZ, vert.pos.z)
mesh.bounds.maxZ = max(mesh.bounds.maxZ, vert.pos.z)
#end for each vertex
mesh.bounds.halfX = (mesh.bounds.maxX - mesh.bounds.minX) / 2.0
mesh.bounds.halfY = (mesh.bounds.maxY - mesh.bounds.minY) / 2.0
mesh.bounds.halfZ = (mesh.bounds.maxZ - mesh.bounds.minZ) / 2.0
mesh.location.x = (mesh.bounds.maxX + mesh.bounds.minX) / 2.0
mesh.location.y = (mesh.bounds.maxY + mesh.bounds.minY) / 2.0
mesh.location.z = (mesh.bounds.maxZ + mesh.bounds.minZ) / 2.0
#end if we have a mesh
# ADJUST DATA FOR PARENT OBJECT TRANSFORMATIONS
if (animSkel != None):
animSkel.numJoints = len(animSkel.joints)
boundsInitialized = 0
for joint in animSkel.joints:
joint.start.rotate(animSkel.rotation)
joint.end.rotate(animSkel.rotation)
joint.start.x *= animSkel.scale.x
joint.start.y *= animSkel.scale.y
joint.start.z *= animSkel.scale.z
joint.end.x *= animSkel.scale.x
joint.end.y *= animSkel.scale.y
joint.end.z *= animSkel.scale.z
joint.start += animSkel.location
joint.end += animSkel.location
joint.initialRot = joint.rot
joint.rot = animSkel.rotation * joint.initialRot
# Set the joint's bounding box
if (boundsInitialized == 0):
joint.bounds = wsBounds(min(joint.start.x, joint.end.x), max(joint.start.x, joint.end.x), \
min(joint.start.y, joint.end.y), max(joint.start.y, joint.end.y), \
min(joint.start.z, joint.end.z), max(joint.start.z, joint.end.z))
boundsInitialized = 1
else:
joint.bounds.minX = min(joint.bounds.minX, joint.start.x, joint.end.x)
joint.bounds.maxX = max(joint.bounds.maxX, joint.start.x, joint.end.x)
joint.bounds.minY = min(joint.bounds.minY, joint.start.y, joint.end.y)
joint.bounds.maxY = max(joint.bounds.maxY, joint.start.y, joint.end.y)
joint.bounds.minZ = min(joint.bounds.minZ, joint.start.z, joint.end.z)
joint.bounds.maxZ = max(joint.bounds.maxZ, joint.start.z, joint.end.z)
#end for each joint
#end if we have a skeleton
for anim in animations:
anim.numKeyframes = len(anim.keyframes)
for key in anim.keyframes:
key.numJointMods = len(key.jointMods)
for j in range( key.numJointMods ):
boneMat = None
skel = bpy.data.objects[animSkelName]
key.jointMods[j].location.x *= animSkel.scale.x
key.jointMods[j].location.y *= animSkel.scale.y
key.jointMods[j].location.z *= animSkel.scale.z
key.jointMods[j].initialRot = key.jointMods[j].rotation
key.jointMods[j].rotation = animSkel.rotation * animSkel.joints[j].initialRot * key.jointMods[j].initialRot
key.jointMods[j].location.rotate(key.jointMods[j].rotation)
par = animSkel.joints[j].parent
if (par >= 0):
diffRot = animSkel.joints[par].initialRot.inverted() * animSkel.joints[j].initialRot
key.jointMods[j].rotation = key.jointMods[par].rotation * diffRot * key.jointMods[j].initialRot
#end for each jointMod
#end for each keyframe
for key in anim.keyframes:
boundsInitialized = 0
# Set the keyframe's bounding box
#apply the animation to a copy.
jointList = deepcopy(animSkel.joints)
for j in range(len(jointList)):
par = animSkel.joints[j].parent
jointList[j].end -= jointList[j].start
if (par >= 0):
jointList[j].startRel = jointList[j].start - animSkel.joints[par].start
jointList[j].startRel.rotate(animSkel.joints[par].rot.inverted())
else:
jointList[j].startRel = jointList[j].start
jointList[j].end.rotate(jointList[j].rot.inverted())
for j in range(len(jointList)):
jointList[j].rot = key.jointMods[j].rotation
jointList[j].start = jointList[j].startRel
if (jointList[j].parent >= 0):
jointList[j].start.rotate(jointList[jointList[j].parent].rot)
jointList[j].start += jointList[jointList[j].parent].start
jointList[j].start += key.jointMods[j].location
jointList[j].end.rotate(key.jointMods[j].rotation)
jointList[j].end += jointList[j].start
if (boundsInitialized == 0):
key.bounds = wsBounds(min(jointList[j].start.x, jointList[j].end.x), max(jointList[j].start.x, jointList[j].end.x), \
min(jointList[j].start.y, jointList[j].end.y), max(jointList[j].start.y, jointList[j].end.y), \
min(jointList[j].start.z, jointList[j].end.z), max(jointList[j].start.z, jointList[j].end.z))
boundsInitialized = 1
else:
key.bounds.minX = min(key.bounds.minX, jointList[j].start.x, jointList[j].end.x)
key.bounds.maxX = max(key.bounds.maxX, jointList[j].start.x, jointList[j].end.x)
key.bounds.minY = min(key.bounds.minY, jointList[j].start.y, jointList[j].end.y)
key.bounds.maxY = max(key.bounds.maxY, jointList[j].start.y, jointList[j].end.y)
key.bounds.minZ = min(key.bounds.minZ, jointList[j].start.z, jointList[j].end.z)
key.bounds.maxZ = max(key.bounds.maxZ, jointList[j].start.z, jointList[j].end.z)
#end for each joint
key.bounds.halfX = max(abs(key.bounds.maxX-mesh.location.x), abs(key.bounds.minX-mesh.location.x))
key.bounds.halfY = max(abs(key.bounds.maxY-mesh.location.y), abs(key.bounds.minY-mesh.location.y))
key.bounds.halfZ = max(abs(key.bounds.maxZ-mesh.location.z), abs(key.bounds.minZ-mesh.location.z))
anim.bounds.halfX = max(anim.bounds.halfX, key.bounds.halfX);
anim.bounds.halfY = max(anim.bounds.halfY, key.bounds.halfY);
anim.bounds.halfZ = max(anim.bounds.halfZ, key.bounds.halfZ);
#end for each keyframe
#end for each animation
# NOW WE WRITE!
if (mesh != None):
output = wsFileBuffer(mesh.name, "wsMesh")
output.write("// Whipstitch Mesh File\n")
output.write("// This mesh is for use with the Whipstitch Game Engine\n")
output.write("// For more information, email dsnettleton@whipstitchgames.com\n\n")
output.write("versionNumber "+ str(MAJOR_VERSION) +"."+ str(MINOR_VERSION) +"\n")
output.write("meshName "+ mesh.name +"\n")
output.write("numVertices "+ str(mesh.numVerts) +"\n")
output.write("numMaterials "+ str(mesh.numMaterials) +"\n")
output.write("defaultPos { %f %f %f }\n" % (mesh.location.x, mesh.location.y, mesh.location.z))
output.write("hasSkeleton %u\n\n" % hasSkeleton)
if (hasSkeleton > 0):
output.write("skeleton {\n")
skel = mesh.skeleton
output.write(" numJoints "+ str(skel.numJoints) +"\n") #add one for the root location
for j in range( skel.numJoints ):
output.write(" joint "+ str(j) +" {\n")
output.write(" name "+ skel.joints[j].name+"\n")
output.write(" parent "+ str(skel.joints[j].parent) +"\n")
output.write(" pos_start { %f %f %f }\n" % (skel.joints[j].start.x, skel.joints[j].start.y, skel.joints[j].start.z))
output.write(" pos_end { %f %f %f }\n" % (skel.joints[j].end.x, skel.joints[j].end.y, skel.joints[j].end.z))
output.write(" rotation { %f %f %f %f }\n" % (skel.joints[j].rot.x, skel.joints[j].rot.y, skel.joints[j].rot.z, \
skel.joints[j].rot.w))
output.write(" }\n")
#end for each joint
output.write("}\n\n")
#End if hasSkeleton > 0
output.write("vertices {\n")
output.write(" bounds { %f %f %f }\n" % (mesh.bounds.halfX, mesh.bounds.halfY, mesh.bounds.halfZ) )
for v in range( mesh.numVerts ):
vert = mesh.verts[v]
output.write(" vert "+ str(v) +" {\n")
output.write(" pos { %f %f %f }\n norm { %f %f %f }\n tex { %f %f }\n" % \
(mesh.verts[v].pos.x, mesh.verts[v].pos.y, mesh.verts[v].pos.z, \
mesh.verts[v].norm.x, mesh.verts[v].norm.y, mesh.verts[v].norm.z, \
mesh.verts[v].texCoords[0], mesh.verts[v].texCoords[1]) )
output.write(" weights {\n")
output.write(" numWeights "+ str(mesh.verts[v].numWeights) +"\n")
for w in range( mesh.verts[v].numWeights):
weightVal = mesh.verts[v].weights[w].influence
if (mesh.verts[v].weightSum != 0):
weightVal /= mesh.verts[v].weightSum
output.write(" joint { %d %f }\n" % \
(mesh.verts[v].weights[w].jointIndex, weightVal))
#end for each weight
output.write(" }\n")
output.write(" }\n")
#end for each vertex
output.write("}\n\n")
output.write("materials {\n")
for m in range( mesh.numMaterials ):
mat = mesh.materials[m]
output.write(" mat "+ str(m) +" {\n")
output.write(" name "+ mat.name +"\n")
output.write(" shine "+ str(mat.shininess) +"\n")
output.write(" ambient { "+ \
str(mat.ambient[0]) +" "+ \
str(mat.ambient[1]) +" "+ \
str(mat.ambient[2]) +" "+ \
str(mat.ambient[3]) +" }\n")
output.write(" diffuse { "+ \
str(mat.diffuse[0]) +" "+ \
str(mat.diffuse[1]) +" "+ \
str(mat.diffuse[2]) +" "+ \
str(mat.diffuse[3]) +" }\n")
output.write(" specular { "+ \
str(mat.specular[0]) +" "+ \
str(mat.specular[1]) +" "+ \
str(mat.specular[2]) +" "+ \
str(mat.specular[3]) +" }\n")
output.write(" emissive { "+ \
str(mat.emissive[0]) +" "+ \
str(mat.emissive[1]) +" "+ \
str(mat.emissive[2]) +" "+ \
str(mat.emissive[3]) +" }\n")
output.write(" maps {\n")
output.write(" bitFlag %u\n" % (mat.mapBitFlag))
if (mat.colorMap != ""):
output.write(" colorMap "+ mat.colorMap[2:] +"\n")
if (mat.normalMap != ""):
output.write(" normalMap "+ mat.normalMap[2:] +"\n")
output.write(" }\n")
output.write(" numTriangles "+ str(mat.numTris) +"\n")
output.write(" triangles {\n")
for t in range(mat.numTris):
output.write(" tri "+ str(t) +" {\n")
output.write(" verts {\n")
output.write(" indices { "+ \
str(mat.tris[t].vertIndices[0]) +" "+ \
str(mat.tris[t].vertIndices[1]) +" "+ \
str(mat.tris[t].vertIndices[2]) +" }\n")
output.write(" }\n")
output.write(" }\n")
#end for each triangle
output.write(" }\n")
output.write(" properties {\n")
output.write(" numProperties "+ str(len(mat.properties)) +"\n")
propCount = 0
for prop in mat.properties:
output.write(" property "+ str(propCount) +" {\n")
output.write(" name "+ prop.name +"\n")
output.write(" value "+ str(prop.value) +"\n")
output.write(" }\n")
propCount += 1
#end for each property
output.write(" }\n")
output.write(" }\n")
#end for each material
output.write("}\n\n")
output.apply()
#end if (mesh exist)s
for a in range( len(animations) ):
anim = animations[a]
skel = anim.skeleton
debug = wsFileBuffer(anim.name, "wsDebug")
output = wsFileBuffer(anim.name, "wsAnim")
output.write("// Whipstitch Animation File\n")
output.write("// This Animation is for use with the Whipstitch Game Engine\n")
output.write("// For more information, email dsnettleton@whipstitchgames.com\n\n")
output.write("versionNumber "+ str(MAJOR_VERSION) +"."+ str(MINOR_VERSION) +"\n")
output.write("animationType 1\n")
output.write("animationName "+ anim.name +"\n")
output.write("framesPerSecond "+ str(anim.framesPerSecond) +"\n\n")
output.write("numJoints "+str(skel.numJoints)+"\n") #add one for the root location
output.write("numKeyFrames "+ str(anim.numKeyframes) +"\n")
output.write("bounds { %f %f %f }\n" % (anim.bounds.halfX, anim.bounds.halfY, anim.bounds.halfZ) )
# output.write("defaultPos { %f %f %f }\n\n" % (anim.location.x, anim.location.y, anim.location.z) )
output.write("joints {\n")
for j in range( skel.numJoints ):
joint = skel.joints[j]
output.write(" joint "+ str(j) +" {\n")
output.write(" jointName "+ joint.name +"\n")
output.write(" parent "+ str(joint.parent) +"\n")
output.write(" pos_start { "+ \
str(joint.start.x) +" "+ \
str(joint.start.y) +" "+ \
str(joint.start.z) +" }\n")
output.write(" rotation { "+ \
str(joint.rot.x) +" "+ \
str(joint.rot.y) +" "+ \
str(joint.rot.z) +" "+ \
str(joint.rot.w) +" }\n")
output.write(" }\n")
#end for each joint
output.write("}\n\n")
output.write("keyframes {\n")
for k in range( anim.numKeyframes ):
key = anim.keyframes[k]
debug.write("keyframe %u - bounds { %f %f %f }\n" % (k, key.bounds.halfX, key.bounds.halfY, key.bounds.halfZ))
output.write(" keyframe "+ str(k) +" {\n")
output.write(" frameNumber "+ str(key.frameIndex) +"\n")
output.write(" jointsModified "+ str(key.numJointMods) + "\n")
for j in range( key.numJointMods ):
output.write(" joint "+ str(j) +" {\n")
# output.write(" jointName "+ key.jointMods[j].name +"\n")
output.write(" jointTranslation { %f %f %f }\n" % (key.jointMods[j].location.x, key.jointMods[j].location.y, \
key.jointMods[j].location.z))
output.write(" jointRotation { %f %f %f %f }\n" % (key.jointMods[j].rotation.x, key.jointMods[j].rotation.y, \
key.jointMods[j].rotation.z, key.jointMods[j].rotation.w))
output.write(" }\n")
#end for each joint mod
output.write(" }\n")
#end for each keyframe
output.write("}\n")
output.apply()
debug.apply()
#end for each animation
if (mesh != None):
output = wsFileBuffer(mesh.name, "wsModel")
output.write("// Whipstitch Model File\n")
output.write("// This model is for use with the Whipstitch Game Engine\n")
output.write("// For more information, email dsnettleton@whipstitchgames.com\n\n")
output.write("versionNumber "+ str(MAJOR_VERSION) +"."+ str(MINOR_VERSION) +"\n")
output.write("modelName "+ mesh.name +"\n\n")
output.write("numMeshes 1\n")
output.write("numAnimations %d\n" % (len(animations)))
print("Export Complete.")
| 39.849415 | 125 | 0.602341 | 3,657 | 27,257 | 4.450643 | 0.093793 | 0.064881 | 0.044237 | 0.002949 | 0.352421 | 0.253195 | 0.19495 | 0.170742 | 0.12374 | 0.086569 | 0 | 0.015099 | 0.244304 | 27,257 | 683 | 126 | 39.90776 | 0.775075 | 0.071064 | 0 | 0.179795 | 0 | 0.001712 | 0.075814 | 0.004159 | 0 | 0 | 0.000475 | 0 | 0 | 1 | 0.02911 | false | 0 | 0.006849 | 0 | 0.058219 | 0.001712 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64a70ed160e992e374d323deae21b2f96b2a2c39 | 638 | py | Python | python/listnode/219.contains-duplicate-ii.py | Nobodylesszb/LeetCode | 0e902f6bff4834a93ce64cf9c57fd64297e63523 | [
"MIT"
] | null | null | null | python/listnode/219.contains-duplicate-ii.py | Nobodylesszb/LeetCode | 0e902f6bff4834a93ce64cf9c57fd64297e63523 | [
"MIT"
] | null | null | null | python/listnode/219.contains-duplicate-ii.py | Nobodylesszb/LeetCode | 0e902f6bff4834a93ce64cf9c57fd64297e63523 | [
"MIT"
] | null | null | null | # Given an array of integers and an integer k, find out whether there are two distinct indices i and j in the array such that nums[i] = nums[j] and the absolute difference between i and j is at most k.
# Example 1:
# Input: nums = [1,2,3,1], k = 3
# Output: true
# Example 2:
# Input: nums = [1,0,1,1], k = 1
# Output: true
# Example 3:
# Input: nums = [1,2,3,1,2,3], k = 2
# Output: false
class Solution:
def containsNearbyDuplicate(self,nums,k):
d = {}
for index,num in enumerate(nums):
if num in d and index -d[num] <=k:
return True
d[num] = index
return False
| 25.52 | 201 | 0.597179 | 109 | 638 | 3.495413 | 0.458716 | 0.070866 | 0.07874 | 0.057743 | 0.068241 | 0.068241 | 0 | 0 | 0 | 0 | 0 | 0.043956 | 0.286834 | 638 | 24 | 202 | 26.583333 | 0.793407 | 0.57837 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64a73e1098cc1dff298ecb88f5712e1ebc04b918 | 23,120 | py | Python | network/utils.py | lukaspie/xpsdeeplearning | 0e5cf818cb0fe7bcfda707f58a1d2194301a9f24 | [
"MIT"
] | 1 | 2022-01-05T09:41:58.000Z | 2022-01-05T09:41:58.000Z | network/utils.py | lukaspie/xpsdeeplearning | 0e5cf818cb0fe7bcfda707f58a1d2194301a9f24 | [
"MIT"
] | null | null | null | network/utils.py | lukaspie/xpsdeeplearning | 0e5cf818cb0fe7bcfda707f58a1d2194301a9f24 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 9 14:10:44 2020.
@author: pielsticker
"""
import os
import pickle
import numpy as np
import json
from matplotlib import pyplot as plt
import matplotlib.colors as mcolors
import seaborn as sns
from docx import Document
from docx.enum.table import WD_TABLE_ALIGNMENT, WD_ROW_HEIGHT_RULE
from docx.enum.text import WD_ALIGN_PARAGRAPH
from docx.shared import Cm, Pt
#%%
class SpectraPlot:
"""A nx5 array of plots from a given data set."""
def __init__(self, data, annots):
"""
Initiate subplots in a nx5 array where n = data.shape[0]/5.
Parameters
----------
data : array
A numpy set with two channels: binding energy and intensity.
annots : list
List of annotations.
Returns
-------
None.
"""
self.data = data
self.annots = annots
self.no_of_spectra = self.data.shape[0]
self.no_of_cols = 5
self.no_of_rows = int(self.no_of_spectra / self.no_of_cols)
if (self.no_of_spectra % self.no_of_cols) != 0:
self.no_of_rows += 1
self.fig, self.axs = plt.subplots(
nrows=self.no_of_rows, ncols=self.no_of_cols
)
plt.subplots_adjust(
left=0.125,
bottom=0.5,
right=4.8,
top=self.no_of_rows,
wspace=0.2,
hspace=0.2,
)
def plot(self):
"""
Populate the plots with the data.
Returns
-------
fig, axs
Matplotlib objects.
"""
for i in range(self.no_of_spectra):
row, col = int(i / self.no_of_cols), i % self.no_of_cols
x = self.data[i][:, 0]
y = self.data[i][:, 1]
annot = self.annots[i]
try:
self.axs[row, col].plot(x, y)
self.axs[row, col].invert_xaxis()
self.axs[row, col].set_xlim(np.max(x), np.min(x))
self.axs[row, col].set_xlabel("Binding energy (eV)")
self.axs[row, col].set_ylabel("Intensity (arb. units)")
self.axs[row, col].text(
0.025,
0.4,
annot,
horizontalalignment="left",
verticalalignment="top",
transform=self.axs[row, col].transAxes,
fontsize=12,
)
except IndexError:
self.axs[row].plot(x, y)
self.axs[row].invert_xaxis()
self.axs[row].set_xlim(np.max(x), np.min(x))
self.axs[row].set_xlabel("Binding energy (eV)")
self.axs[row].set_ylabel("Intensity (arb. units)")
self.axs[row].text(
0.025,
0.4,
annot,
horizontalalignment="left",
verticalalignment="top",
transform=self.axs[row, col].transAxes,
fontsize=12,
)
return self.fig, self.axs
class ClassDistribution:
"""Class for the distribution of classes in a dataset."""
def __init__(self, task, data_list):
"""
Calculate the distibutions of the labels.
If the task is "regression", the average distributions
are calculated. If the task is "classification", calculate how
many examples of each class are in the different data ses.
Save the distribution in a dict called 'cd'.
cd: Dictionary of the format {'all data': dict,
'training data': dict,
'validation data': dict,
'test data': dict}.
Each of the sub-dicts contains the distribution of the labels
in the data sub-set.
Parameters
----------
task : str
If task == 'regression', an average distribution is
calculated.
If task == 'classification' or 'multi_class_detection',
the distribution of the labels across the different data
sets is calculated.
data_list : list
List of numpy arrays containing labels.
Returns
-------
None.
"""
self.task = task
self.cd = {
"all data": {},
"training data": {},
"validation data": {},
"test data": {},
}
for i in range(data_list[0].shape[1]):
self.cd["all data"][str(i)] = 0
self.cd["training data"][str(i)] = 0
self.cd["validation data"][str(i)] = 0
self.cd["test data"][str(i)] = 0
if self.task == "classification":
for i, dataset in enumerate(data_list):
key = list(self.cd.keys())[i]
for j, datapoint in enumerate(dataset):
argmax_class = np.argmax(datapoint, axis=0)
self.cd[key][str(argmax_class)] += 1
elif self.task == "regression":
for i, dataset in enumerate(data_list):
key = list(self.cd.keys())[i]
average = list(np.mean(dataset, axis=0))
self.cd[key] = average
elif self.task == "multi_class_detection":
for i, dataset in enumerate(data_list):
key = list(self.cd.keys())[i]
for j, datapoint in enumerate(dataset):
non_zero_classes_args = np.where(datapoint > 0.0)[0]
for n in non_zero_classes_args:
self.cd[key][str(n)] += 1
def plot(self, labels):
"""
Plot the class distribution. Using the labels list as legend.
Parameters
----------
labels : list
List of label values for the legend.
Returns
-------
None.
"""
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1])
x = np.arange(len(self.cd.keys())) * 1.5
data = []
if self.task == "regression":
plt.title("Average distribution across the classes")
# Plot of the average label distribution in the different
# data sets.
for k, v in self.cd.items():
data.append(v)
data = np.transpose(np.array(data))
else:
plt.title("Class distribution")
for k, v in self.cd.items():
data_list = []
for key, value in v.items():
data_list.append(value)
data.append(data_list)
data = np.transpose(np.array(data))
for i in range(data.shape[0]):
ax.bar(x + i * 0.25, data[i], align="edge", width=0.2)
plt.legend(labels)
plt.xticks(ticks=x + 0.5, labels=list(self.cd.keys()))
plt.show()
class TrainingGraphs:
"""Class for graphs with the result of the training in Keras."""
def __init__(self, history, fig_dir):
"""
Take a dictionary containing the results from training.
Parameters
----------
history : dict
# A dictionary containing the results from the training of a
neural network in Keras.
fig_dir : str
The name of the directory where the figures shall be saved.
Returns
-------
None.
"""
self.history = history
self.fig_dir = fig_dir
def plot_metric(self, metric, title=None, ylabel=None, to_file=True):
"""
Plots the training and validation values of a metric
against the epochs.
Returns
-------
None.
"""
metric_cap = metric.capitalize()
try:
metric_history = self.history[metric]
fig, ax = plt.subplots()
ax.plot(metric_history, linewidth=3)
try:
val_key = "val_" + metric
ax.plot(self.history[val_key], linewidth=3)
except KeyError:
print(f"Validation {metric} was not logged.")
ax.set_title(metric_cap)
ax.set_ylabel(metric_cap)
if title:
ax.set_title(str(title))
if ylabel:
ax.set_ylabel(str(ylabel))
ax.set_xlabel("Epoch")
ax.legend(["Train", "Validation"])
if to_file:
fig_name = os.path.join(self.fig_dir, f"{metric}.png")
fig.savefig(fig_name)
except KeyError:
print(f"{metric_cap} was not logged during training.")
def plot_loss(self, to_file=True):
"""
Plot the training and validation loss against the epochs.
Returns
-------
None.
"""
self.plot_metric(metric="loss", to_file=to_file)
def plot_accuracy(self, to_file=True):
"""
Plot the training and validation accuracy against the epochs.
Returns
-------
None.
"""
self.plot_metric(
metric="accuracy",
ylabel="Classification accuracy",
to_file=to_file,
)
def plot_mse(self, to_file=True):
"""
Plots the training and validation mean squared error against the epochs.
Returns
-------
None.
"""
self.plot_metric(
metric="mse", title="MSE", ylabel="MSE", to_file=to_file
)
class WeightDistributions:
"""
Class to calculate weight distribution of a Bayesian model in keras.
"""
def __init__(self, bayesian_layers, fig_dir):
import warnings
warnings.filterwarnings("ignore")
self.bayesian_layers = bayesian_layers
self.names = [layer.name for layer in self.bayesian_layers]
self.fig_dir = fig_dir
def plot_weight_priors(self, to_file=True):
qm_vals = [
layer.kernel_prior.mean().numpy()
for layer in self.bayesian_layers
]
qs_vals = [
layer.kernel_prior.stddev().numpy()
for layer in self.bayesian_layers
]
return self.plot_distribution(
qm_vals, qs_vals, kind="prior", to_file=to_file
)
def plot_weight_posteriors(self, to_file=True):
qm_vals = [
layer.kernel_posterior.mean().numpy()
for layer in self.bayesian_layers
]
qs_vals = [
layer.kernel_posterior.stddev().numpy()
for layer in self.bayesian_layers
]
return self.plot_distribution(
qm_vals, qs_vals, kind="posterior", to_file=to_file
)
def plot_distribution(
self, qm_vals, qs_vals, kind="posterior", to_file=True
):
fig, _ = plt.subplots(figsize=(12, 6))
colors = iter(mcolors.TABLEAU_COLORS.keys())
ax1 = fig.add_subplot(1, 2, 1)
ax2 = fig.add_subplot(1, 2, 2)
for n, qm, qs in zip(self.names, qm_vals, qs_vals):
c = next(colors)
try:
sns.histplot(
np.reshape(qm, newshape=[-1]),
ax=ax1,
# bins=50,
label=n,
color=c,
kde=True,
stat="density",
)
sns.histplot(
np.reshape(qs, newshape=[-1]),
ax=ax2,
# bins=50,
label=n,
color=c,
kde=True,
stat="density",
)
except np.linalg.LinAlgError:
sns.histplot(
np.reshape(qm, newshape=[-1]),
ax=ax1,
# bins=50,
label=n,
color=c,
kde=False,
stat="density",
)
sns.histplot(
np.reshape(qs, newshape=[-1]),
ax=ax2,
# bins=50,
label=n,
color=c,
kde=False,
stat="density",
)
ax1.set_title(f"{kind.capitalize()}" + " weight means")
ax1.legend()
ax2.set_title(f"{kind.capitalize()}" + " weight standard deviations")
fig.tight_layout()
# plt.show()
return fig
class Report:
"""Report on the results of the training in keras."""
def __init__(self, dir_name=""):
"""
Initialize a docx document.
Load the data from the hyperparamters file.
Parameters
----------
dir_name : str, optional
The name of the directory where the report shall be saved.
The default is ''.
Returns
-------
None.
"""
self.document = Document()
style = self.document.styles["Normal"]
font = style.font
font.name = "Arial"
font.size = Pt(10)
# Get the data
root_dir = os.getcwd()
self.model_dir = os.path.join(*[root_dir, "runs", dir_name, "model",])
self.log_dir = os.path.join(*[root_dir, "runs", dir_name, "logs"])
self.fig_dir = os.path.join(*[root_dir, "runs", dir_name, "figures"])
(
self.name_data,
self.train_data,
self.model_summary,
) = self.get_hyperparams()
self.results = self.get_results()
self.class_dist = self.results["class_distribution"]
self.filename = os.path.join(self.log_dir, "report.docx")
self.create_document()
def create_document(self):
"""
Add data from the results to the report.
Returns
-------
None.
"""
self.document.add_heading("Training report", 0)
# Add the names and basic information.
self.document.add_heading("Data:", 1)
name_table = self.document.add_table(
rows=len(self.name_data.keys()), cols=2
)
for key, value in self.name_data.items():
j = int(list(self.name_data.keys()).index(key))
name_table.cell(j, 0).text = key + ":"
name_table.cell(j, 1).text = str(value)
self.document.add_heading("Distribution:", 1)
dist_table = self.document.add_table(
rows=len(self.class_dist.keys()) + 1,
cols=len(next(iter(self.class_dist.values()))) + 1,
)
dist_table.alignment = WD_TABLE_ALIGNMENT.CENTER
for i, name in enumerate(self.name_data["Labels"]):
dist_table.cell(0, i + 1).text = name
for item, param in self.class_dist.items():
j = int(list(self.class_dist.keys()).index(item)) + 1
dist_table.cell(j, 0).text = item
for key, value in enumerate(self.class_dist[item]):
k = int(key) + 1
dist_table.cell(j, k).text = str(np.round(value, 3))
self.document.add_page_break()
# Add information about the training parameters
self.document.add_heading("Training parameters:", 1)
train_table = self.document.add_table(
rows=len(self.train_data.keys()), cols=2
)
for key, value in self.train_data.items():
j = int(list(self.train_data.keys()).index(key))
train_table.cell(j, 0).text = key + ":"
train_table.cell(j, 1).text = str(value)
# Add the model architecture
self.document.add_heading("Model architecture", 1)
par = self.document.add_paragraph(self.model_summary)
par.paragraph_format.alignment = WD_ALIGN_PARAGRAPH.JUSTIFY
self.document.add_page_break()
# Add loss and accuracy values.
self.document.add_heading("Loss & accuracy", 1)
loss_file = os.path.join(self.fig_dir, "loss.png")
self.document.add_picture(loss_file, width=Cm(12))
last_paragraph = self.document.paragraphs[-1]
last_paragraph.alignment = WD_ALIGN_PARAGRAPH.CENTER
try:
acc_file = os.path.join(self.fig_dir, "accuracy.png")
self.document.add_picture(acc_file, width=Cm(12))
last_paragraph = self.document.paragraphs[-1]
last_paragraph.alignment = WD_ALIGN_PARAGRAPH.CENTER
except FileNotFoundError:
pass
# Add results on the test data.
self.document.add_heading("Results", 1)
result_table = self.document.add_table(rows=2, cols=2)
result_table.cell(0, 0).text = "Test loss:"
result_table.cell(0, 1).text = str(
np.round(self.results["test_loss"], decimals=3)
)
try:
result_table.cell(1, 0).text = "Test accuracy:"
result_table.cell(1, 1).text = str(
np.round(self.results["test_accuracy"], decimals=3)
)
for row in result_table.rows:
for cell in row.cells:
cell.paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.CENTER
except KeyError:
pass
self.document.add_page_break()
# Add predictions on random data.
self.document.add_heading("Predictions for 5 random examples", 1)
self.document.add_heading("Training data", 2)
r = np.random.randint(0, self.results["y_train"].shape[0] - 5)
pred_train_5 = self.results["pred_train"][r : r + 5, :]
y_train_5 = self.results["y_train"][r : r + 5, :]
p = self.document.add_paragraph()
p.paragraph_format.space_before = Pt(12)
p.paragraph_format.space_after = None
run = p.add_run()
run.text = "Predictions:"
run.font.underline = True
_ = self.add_result_table(pred_train_5)
p = self.document.add_paragraph()
p.paragraph_format.space_before = Pt(12)
p.paragraph_format.space_after = None
run = p.add_run()
run.text = "Correct labels:"
run.font.underline = True
self.add_result_table(y_train_5)
self.document.add_heading("Test data", 2)
s = np.random.randint(0, self.results["y_test"].shape[0] - 5)
pred_test_5 = self.results["pred_test"][s : s + 5, :]
y_test_5 = self.results["y_test"][s : s + 5, :]
p = self.document.add_paragraph()
p.paragraph_format.space_before = Pt(12)
p.paragraph_format.space_after = None
run = p.add_run()
run.text = "Predictions:"
run.font.underline = True
self.add_result_table(pred_test_5)
p = self.document.add_paragraph()
p.paragraph_format.space_before = Pt(12)
p.paragraph_format.space_after = None
run = p.add_run()
run.text = "Correct labels:"
run.font.underline = True
self.add_result_table(y_test_5)
def add_result_table(self, data_array):
"""
Store and display the results from training.
Parameters
----------
data_array : ndarray
Array with the results from training.
Returns
-------
None.
"""
new_table = self.document.add_table(
rows=data_array.shape[0] + 1, cols=data_array.shape[1]
)
for row in new_table.rows:
row.height = Cm(0.5)
row.height_rule = WD_ROW_HEIGHT_RULE.EXACTLY
for i, name in enumerate(self.name_data["Labels"]):
new_table.cell(0, i).text = name
new_table.cell(0, i).paragraphs[
0
].alignment = WD_ALIGN_PARAGRAPH.CENTER
if data_array.dtype == "float32":
a = np.around(data_array, decimals=4)
row_sums = a.sum(axis=1)
data_array = a / row_sums[:, np.newaxis]
data_array = np.around(data_array, decimals=2)
for i in range(data_array.shape[0]):
for j in range(data_array.shape[1]):
new_table.cell(i + 1, j).text = str(data_array[i, j])
new_table.cell(i + 1, j).paragraphs[
0
].alignment = WD_ALIGN_PARAGRAPH.CENTER
def get_hyperparams(self):
"""
Load the hyperparameters of the training from the JSON file.
Returns
-------
name_data : dict
Basic information about the experiment.
class_distribution : dict
Distribution of the class in the data sets.
train_data : dict
Information about the training parameters..
model_summary : dict
Summary of the model in str format.
"""
hyperparam_file_name = os.path.join(
self.log_dir, "hyperparameters.json"
)
with open(hyperparam_file_name, "r") as json_file:
data_dict = json.load(json_file)
name_data = {
"Name": data_dict["exp_name"],
"Time created": data_dict["time"],
"No. of classes": data_dict["num_of_classes"],
"Labels": data_dict["labels"],
"Total no. of samples": data_dict["no_of_examples"],
"Train-test-split": data_dict["train_test_split"],
"Train-val-split": data_dict["train_val_split"],
"No. of training samples": data_dict["No. of training samples"],
"No. of validation samples": data_dict[
"No. of validation samples"
],
"No. of test samples": data_dict["No. of test samples"],
"Shape of each sample": data_dict["Shape of each sample"],
}
train_data = {
"Optimizer": data_dict["optimizer"],
"Learning rate": data_dict["learning_rate"],
"Loss function": data_dict["loss"],
"Epochs trained": data_dict["epochs_trained"],
"Batch size": data_dict["batch_size"],
}
model_summary = data_dict["model_summary"]
return name_data, train_data, model_summary
def get_results(self):
"""
Load the results from the pickle file.
Results include e.g. the test data and the predictions.
Returns
-------
data : dict
Dictionary of numpy arrays with the results..
"""
file_name = os.path.join(self.log_dir, "results.pkl")
with open(file_name, "rb") as pickle_file:
data = pickle.load(pickle_file)
return data
def write(self):
"""
Store the document in the logs folder.
Returns
-------
None.
"""
self.document.save(self.filename)
print("Report saved!")
#%%
if __name__ == "__main__":
dir_name = "20210226_16h07m_Fe_4_classes_linear_comb_new_noise"
rep = Report(dir_name)
data = rep.get_results()
summary = rep.model_summary
rep.write()
| 31.033557 | 80 | 0.530882 | 2,739 | 23,120 | 4.323111 | 0.147864 | 0.030403 | 0.03167 | 0.01858 | 0.414745 | 0.343214 | 0.285111 | 0.253019 | 0.193058 | 0.154886 | 0 | 0.014253 | 0.356661 | 23,120 | 744 | 81 | 31.075269 | 0.781834 | 0.160856 | 0 | 0.263889 | 0 | 0 | 0.086225 | 0.003917 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043981 | false | 0.00463 | 0.027778 | 0 | 0.097222 | 0.006944 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64a76d80d9db98ea61ff9254492492045784c0fa | 7,743 | py | Python | pyzbar/wrapper.py | spmallick/pyzbar | 372003b5fc129e15bcf35d464118ea43432a56b8 | [
"MIT"
] | null | null | null | pyzbar/wrapper.py | spmallick/pyzbar | 372003b5fc129e15bcf35d464118ea43432a56b8 | [
"MIT"
] | null | null | null | pyzbar/wrapper.py | spmallick/pyzbar | 372003b5fc129e15bcf35d464118ea43432a56b8 | [
"MIT"
] | 2 | 2018-11-21T14:17:48.000Z | 2020-02-26T22:21:59.000Z | """Low-level wrapper around zbar's interface
"""
import platform
import sys
from ctypes import (
cdll, c_ubyte, c_char_p, c_int, c_uint, c_ulong, c_void_p, Structure,
CFUNCTYPE, POINTER
)
from ctypes.util import find_library
from enum import IntEnum, unique
from pathlib import Path
# Types
c_ubyte_p = POINTER(c_ubyte)
c_uint_p = POINTER(c_uint)
c_ulong_p = POINTER(c_ulong)
"""unsigned char* type
"""
# Defines and enums
@unique
class ZBarSymbol(IntEnum):
NONE = 0 # /**< no symbol decoded */
PARTIAL = 1 # /**< intermediate status */
EAN2 = 2 # /**< GS1 2-digit add-on */
EAN5 = 5 # /**< GS1 5-digit add-on */
EAN8 = 8 # /**< EAN-8 */
UPCE = 9 # /**< UPC-E */
ISBN10 = 10 # /**< ISBN-10 (from EAN-13). @since 0.4 */
UPCA = 12 # /**< UPC-A */
EAN13 = 13 # /**< EAN-13 */
ISBN13 = 14 # /**< ISBN-13 (from EAN-13). @since 0.4 */
COMPOSITE = 15 # /**< EAN/UPC composite */
I25 = 25 # /**< Interleaved 2 of 5. @since 0.4 */
DATABAR = 34 # /**< GS1 DataBar (RSS). @since 0.11 */
DATABAR_EXP = 35 # /**< GS1 DataBar Expanded. @since 0.11 */
CODABAR = 38 # /**< Codabar. @since 0.11 */
CODE39 = 39 # /**< Code 39. @since 0.4 */
PDF417 = 57 # /**< PDF417. @since 0.6 */
QRCODE = 64 # /**< QR Code. @since 0.10 */
CODE93 = 93 # /**< Code 93. @since 0.11 */
CODE128 = 128 # /**< Code 128 */
@unique
class ZBarConfig(IntEnum):
CFG_ENABLE = 0 # /**< enable symbology/feature */
CFG_ADD_CHECK = 1 # /**< enable check digit when optional */
CFG_EMIT_CHECK = 2 # /**< return check digit when present */
CFG_ASCII = 3 # /**< enable full ASCII character set */
CFG_NUM = 4 # /**< number of boolean decoder configs */
CFG_MIN_LEN = 0x20 # /**< minimum data length for valid decode */
CFG_MAX_LEN = 0x21 # /**< maximum data length for valid decode */
CFG_UNCERTAINTY = 0x40 # /**< required video consistency frames */
CFG_POSITION = 0x80 # /**< enable scanner to collect position data */
CFG_X_DENSITY = 0x100 # /**< image scanner vertical scan density */
CFG_Y_DENSITY = 0x101 # /**< image scanner horizontal scan density */
# Structs
class zbar_image_scanner(Structure):
"""Opaque C++ class with private implementation
"""
pass
class zbar_image(Structure):
"""Opaque C++ class with private implementation
"""
pass
# Globals populated in load_libzbar
LIBZBAR = None
"""ctypes.CDLL
"""
EXTERNAL_DEPENDENCIES = []
"""Sequence of instances of ctypes.CDLL
"""
def load_libzbar():
"""Loads the zbar shared library and its dependencies.
"""
global LIBZBAR
global EXTERNAL_DEPENDENCIES
if not LIBZBAR:
if 'Windows' == platform.system():
# Possible scenarios here
# 1. Run from source, DLLs are in pyzbar directory
# cdll.LoadLibrary() imports DLLs in repo root directory
# 2. Wheel install into CPython installation
# cdll.LoadLibrary() imports DLLs in package directory
# 3. Wheel install into virtualenv
# cdll.LoadLibrary() imports DLLs in package directory
# 4. Frozen
# cdll.LoadLibrary() imports DLLs alongside executable
# 'libzbar-64.dll' and 'libzbar-32.dll' have a dependent DLL
# 'libiconv.dll' and 'libiconv-2.dll' respectively.
if sys.maxsize > 2**32:
# 64-bit
fname = 'libzbar-64.dll'
dependencies = ['libiconv.dll']
else:
# 32-bit
fname = 'libzbar-32.dll'
dependencies = ['libiconv-2.dll']
def load(dir):
# Load dependencies before loading libzbar dll
deps = [
cdll.LoadLibrary(str(dir.joinpath(dep)))
for dep in dependencies
]
libzbar = cdll.LoadLibrary(str(dir.joinpath(fname)))
return deps, libzbar
try:
loaded_dependencies, libzbar = load(Path(''))
except OSError as e:
loaded_dependencies, libzbar = load(Path(__file__).parent)
else:
# Assume a shared library on the path
path = find_library('zbar')
if not path:
raise ImportError('Unable to find zbar shared library')
libzbar = cdll.LoadLibrary(path)
loaded_dependencies = []
LIBZBAR = libzbar
EXTERNAL_DEPENDENCIES = [LIBZBAR] + loaded_dependencies
return LIBZBAR
# Function signatures
def zbar_function(fname, restype, *args):
"""Returns a foreign function exported by `zbar`.
Args:
fname (:obj:`str`): Name of the exported function as string.
restype (:obj:): Return type - one of the `ctypes` primitive C data
types.
*args: Arguments - a sequence of `ctypes` primitive C data types.
Returns:
cddl.CFunctionType: A wrapper around the function.
"""
prototype = CFUNCTYPE(restype, *args)
return prototype((fname, load_libzbar()))
zbar_version = zbar_function(
'zbar_version',
c_int,
c_uint_p, # major,
c_uint_p, # minor
)
zbar_set_verbosity = zbar_function(
'zbar_set_verbosity',
None,
c_int
)
zbar_image_scanner_create = zbar_function(
'zbar_image_scanner_create',
POINTER(zbar_image_scanner)
)
zbar_image_scanner_destroy = zbar_function(
'zbar_image_scanner_destroy',
None,
POINTER(zbar_image_scanner)
)
zbar_parse_config = zbar_function(
'zbar_parse_config',
c_int,
c_char_p, # config_string,
POINTER(c_int), # symbology - values in ZBarSymbol
POINTER(c_int), # config - values in ZBarConfig
POINTER(c_int), # value
)
zbar_image_scanner_set_config = zbar_function(
'zbar_image_scanner_set_config',
c_int,
POINTER(zbar_image_scanner), # scanner
c_int, # symbology - values in ZBarSymbol
c_int, # config - values in ZBarConfig
c_int # value
)
zbar_image_create = zbar_function(
'zbar_image_create',
POINTER(zbar_image)
)
zbar_image_destroy = zbar_function(
'zbar_image_destroy',
None,
POINTER(zbar_image)
)
zbar_image_set_format = zbar_function(
'zbar_image_set_format',
None,
POINTER(zbar_image),
c_uint
)
zbar_image_set_size = zbar_function(
'zbar_image_set_size',
None,
POINTER(zbar_image),
c_uint, # width
c_uint # height
)
zbar_image_set_data = zbar_function(
'zbar_image_set_data',
None,
POINTER(zbar_image),
c_void_p, # data
c_ulong, # raw_image_data_length
c_void_p # A function pointer(!)
)
zbar_scan_image = zbar_function(
'zbar_scan_image',
c_int,
POINTER(zbar_image_scanner),
POINTER(zbar_image)
)
zbar_image_first_symbol = zbar_function(
'zbar_image_first_symbol',
POINTER(c_int), # values in ZBarSymbol
POINTER(zbar_image)
)
zbar_symbol_get_data_length = zbar_function(
'zbar_symbol_get_data_length',
c_uint,
POINTER(c_int) # values in ZBarSymbol
)
zbar_symbol_get_data = zbar_function(
'zbar_symbol_get_data',
c_char_p,
POINTER(c_int) # values in ZBarSymbol
)
zbar_symbol_next = zbar_function(
'zbar_symbol_next',
POINTER(c_int),
POINTER(c_int) # values in ZBarSymbol
)
| 28.784387 | 76 | 0.592923 | 915 | 7,743 | 4.791257 | 0.292896 | 0.063641 | 0.058394 | 0.043111 | 0.314325 | 0.157391 | 0.060675 | 0.040602 | 0 | 0 | 0 | 0.028953 | 0.304146 | 7,743 | 268 | 77 | 28.891791 | 0.784707 | 0.332042 | 0 | 0.26257 | 0 | 0 | 0.084948 | 0.030468 | 0 | 0 | 0.005246 | 0 | 0 | 1 | 0.01676 | false | 0.011173 | 0.039106 | 0 | 0.268156 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64a8df9ffa0c33a95bf84e16379536e77b819f43 | 1,281 | py | Python | connector/write_rnd.py | lodrantl/pimenk | 7871c872d1d93c2e2bb17d48c891696887dbf3b4 | [
"Apache-2.0"
] | null | null | null | connector/write_rnd.py | lodrantl/pimenk | 7871c872d1d93c2e2bb17d48c891696887dbf3b4 | [
"Apache-2.0"
] | 2 | 2016-10-12T15:43:59.000Z | 2016-10-12T15:46:38.000Z | connector/write_rnd.py | lodrantl/pimenk | 7871c872d1d93c2e2bb17d48c891696887dbf3b4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import argparse
import time
import random
from configparser import ConfigParser
from influxdb import SeriesHelper, InfluxDBClient
parser = argparse.ArgumentParser()
parser.add_argument('config', help='path to the config file')
args = parser.parse_args()
config_parser = ConfigParser()
config_parser.read(args.config)
config = config_parser['DEFAULT']
myclient = InfluxDBClient(
config['influx_remote_host'],
int(config['influx_remote_port']),
config['influx_remote_user'],
config['influx_remote_password'],
'pm',
config['influx_remote_https'] == 'true',
True,
timeout=30
)
myclient.create_database('pm')
myclient.create_retention_policy('pm_policy', 'INF', 3, default=True)
myclient.create_retention_policy('event_policy', 'INF', 3, default=False)
class PMSeriesHelper(SeriesHelper):
class Meta:
client = myclient
series_name = 'particulates'
fields = ['pm_25', 'pm_10']
tags = ['sensor_id']
bulk_size = 1
autocommit = True
def store(data):
PMSeriesHelper(sensor_id=config['sensor_id'], pm_25=data[0], pm_10=data[1])
while True:
x = random.randint(250, 500) / 10
y = random.randint(150, 400) / 10
#print(x, y)
store([x, y])
time.sleep(1)
| 23.722222 | 79 | 0.693208 | 163 | 1,281 | 5.257669 | 0.484663 | 0.070012 | 0.105018 | 0.067678 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.03128 | 0.176425 | 1,281 | 53 | 80 | 24.169811 | 0.781043 | 0.02498 | 0 | 0 | 0 | 0 | 0.165196 | 0.017642 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025641 | false | 0.025641 | 0.128205 | 0 | 0.205128 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64a8e9e329731638734656b698ff0ab9bec0827e | 2,084 | py | Python | kyu_5/number_of_trailing_zeros_of_n/test_zeros.py | pedrocodacyorg2/codewars | ba3ea81125b6082d867f0ae34c6c9be15e153966 | [
"Unlicense"
] | 1 | 2022-02-12T05:56:04.000Z | 2022-02-12T05:56:04.000Z | kyu_5/number_of_trailing_zeros_of_n/test_zeros.py | pedrocodacyorg2/codewars | ba3ea81125b6082d867f0ae34c6c9be15e153966 | [
"Unlicense"
] | 182 | 2020-04-30T00:51:36.000Z | 2021-09-07T04:15:05.000Z | kyu_5/number_of_trailing_zeros_of_n/test_zeros.py | pedrocodacyorg2/codewars | ba3ea81125b6082d867f0ae34c6c9be15e153966 | [
"Unlicense"
] | 4 | 2020-04-29T22:04:20.000Z | 2021-07-13T20:04:14.000Z | # Created by Egor Kostan.
# GitHub: https://github.com/ikostan
# LinkedIn: https://www.linkedin.com/in/egor-kostan/
# ALGORITHMS MATHEMATICS NUMBERS
import unittest
import allure
from kyu_5.number_of_trailing_zeros_of_n.zeros import zeros
from utils.log_func import print_log
@allure.epic('5 kyu')
@allure.parent_suite('Novice')
@allure.suite("Algorithms")
@allure.sub_suite("Unit Tests")
@allure.feature("Math")
@allure.story('Number of trailing zeros of N!')
@allure.tag('ALGORITHMS', 'MATHEMATICS', 'NUMBERS')
@allure.link(url='https://www.codewars.com/kata/52f787eb172a8b4ae1000a34/train/python',
name='Source/Kata')
class ZerosTestCase(unittest.TestCase):
"""
Testing zeros function
"""
def test_zeros(self):
"""
Testing 'zeros' program that should calculate the number
of trailing zeros in a factorial of a given number.
:return:
"""
allure.dynamic.title("Testing zeros function")
allure.dynamic.severity(allure.severity_level.NORMAL)
allure.dynamic.description_html('<h3>Codewars badge:</h3>'
'<img src="https://www.codewars.com/users/myFirstCode'
'/badges/large">'
'<h3>Test Description:</h3>'
"<p></p>")
with allure.step("Enter test number and verify the result"):
test_data = [
(0, 0, "Testing with n = 0"),
(6, 1, "Testing with n = 6"),
(10, 2, "Testing with n = 10"),
(12, 2, "Testing with n = 12"),
(30, 7, "Testing with n = 30"),
]
for data in test_data:
number = data[0]
expected = data[1]
message = data[2]
print_log(message=message,
number=number,
expected=expected)
self.assertEqual(expected,
zeros(number))
| 32.061538 | 94 | 0.540787 | 225 | 2,084 | 4.937778 | 0.448889 | 0.049505 | 0.054005 | 0.056706 | 0.043204 | 0.043204 | 0 | 0 | 0 | 0 | 0 | 0.033479 | 0.340691 | 2,084 | 64 | 95 | 32.5625 | 0.775109 | 0.136756 | 0 | 0 | 0 | 0 | 0.258046 | 0 | 0 | 0 | 0 | 0 | 0.025641 | 1 | 0.025641 | false | 0 | 0.102564 | 0 | 0.153846 | 0.051282 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64a9349d4758e8a5e3292009a5229618d221633a | 1,404 | py | Python | fairness/visualise_target_distribution_datasets_joosje.py | JSGoedhart/fairness-comparison | f6fcb7f39f15bb63aeab03ef24e41d0ffe353bb8 | [
"Apache-2.0"
] | null | null | null | fairness/visualise_target_distribution_datasets_joosje.py | JSGoedhart/fairness-comparison | f6fcb7f39f15bb63aeab03ef24e41d0ffe353bb8 | [
"Apache-2.0"
] | 1 | 2021-11-15T17:52:04.000Z | 2021-11-15T17:52:04.000Z | fairness/visualise_target_distribution_datasets_joosje.py | JSGoedhart/fairness-comparison | f6fcb7f39f15bb63aeab03ef24e41d0ffe353bb8 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import pandas as pd
import os
import seaborn as sns
import matplotlib.pyplot as plt
from fairness.data.objects.list import DATASETS, get_dataset_names
sns.set_context(rc={"figure.figsize": (8, 4)})
sns.set(style = 'white', font_scale = 1.5)
colors = ['#2ECC71' , '#FFEB3B'] # Green, yellow
current_path = os.getcwd()
preprocessed_path = os.path.join(current_path, 'fairness', 'data', 'preprocessed')
results_path = os.path.join(current_path, 'fairness', 'results', 'figs_joosje', 'data_distributions')
for dataset_obj in DATASETS:
for sens_attr in dataset_obj.get_sensitive_attributes():
dataset = dataset_obj.get_dataset_name()
target = dataset_obj.get_class_attribute()
df = pd.read_csv(os.path.join(preprocessed_path, dataset + '_numerical-binsensitive.csv'), sep = ',', usecols = [target, sens_attr])
print(dataset)
print(df.shape)
if dataset == 'german':
df[target] = df[target].replace({1.0: 1.0, 2.0: 0.0})
if 'propublica' in dataset:
if 'violent' not in dataset:
df[target] = df[target].replace({1.0:0.0, 0.0:1.0})
sns.catplot(x = sens_attr, y = target, kind = 'bar', data = df, palette = colors, ci = None, edgecolor = 'black')
sns.despine(top = False, right = False, left = False, bottom = False)
plt.xlabel('A')
plt.ylabel('Y')
plt.ylim(0, 1.0)
plt.savefig(os.path.join(results_path, dataset + '_' + sens_attr))
plt.close() | 33.428571 | 134 | 0.698718 | 214 | 1,404 | 4.443925 | 0.457944 | 0.012618 | 0.042061 | 0.029443 | 0.121977 | 0.121977 | 0.121977 | 0 | 0 | 0 | 0 | 0.022463 | 0.143875 | 1,404 | 42 | 135 | 33.428571 | 0.768719 | 0.009259 | 0 | 0 | 0 | 0 | 0.117266 | 0.019424 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.193548 | 0 | 0.193548 | 0.064516 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64ac4e0c696328a0793c73c2ffb9c229acc0aada | 2,508 | py | Python | examples/kddcup2021/WikiKG90M/feature/dump_feat/1_rrt_feat.py | zbmain/PGL | dbded6a1543248b0a33c05eb476ddc513401a774 | [
"Apache-2.0"
] | 1,389 | 2019-06-11T03:29:20.000Z | 2022-03-29T18:25:43.000Z | examples/kddcup2021/WikiKG90M/feature/dump_feat/1_rrt_feat.py | zbmain/PGL | dbded6a1543248b0a33c05eb476ddc513401a774 | [
"Apache-2.0"
] | 232 | 2019-06-21T06:52:10.000Z | 2022-03-29T08:20:31.000Z | examples/kddcup2021/WikiKG90M/feature/dump_feat/1_rrt_feat.py | zbmain/PGL | dbded6a1543248b0a33c05eb476ddc513401a774 | [
"Apache-2.0"
] | 229 | 2019-06-20T12:13:58.000Z | 2022-03-25T12:04:48.000Z | # -*- coding: utf-8 -*-
########################################################################
#
# Copyright (c) 2021 Baidu.com, Inc. All Rights Reserved
#
# Author: suweiyue(suweiyue@baidu.com)
# Date: 2021/06/03 23:12:20
#
########################################################################
"""
Comment.
"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import sys
import argparse
import logging
import numpy as np
import pickle
from tqdm import tqdm
import math
from multiprocessing import Pool
output_path = "feature_output"
test_num = 500000000
base_dir = "dataset"
val_t_correct_index = np.load(
base_dir + "/wikikg90m_kddcup2021/processed/val_t_correct_index.npy",
mmap_mode="r")
train_hrt = np.load(
base_dir + "/wikikg90m_kddcup2021/processed/train_hrt.npy", mmap_mode="r")
val_hr = np.load(
base_dir + "/wikikg90m_kddcup2021/processed/val_hr.npy", mmap_mode="r")
val_t_candidate = np.load(
base_dir + "/wikikg90m_kddcup2021/processed/val_t_candidate.npy",
mmap_mode="r")
test_hr = np.load(
base_dir + "/wikikg90m_kddcup2021/processed/test_hr.npy",
mmap_mode="r")[:test_num]
test_t_candidate = np.load(
base_dir + "/wikikg90m_kddcup2021/processed/test_t_candidate.npy",
mmap_mode="r")[:test_num]
prob_dir = output_path
r2t_prob = pickle.load(open(prob_dir + "/r2t_prob.pkl", "rb"))
t2r_prob = pickle.load(open(prob_dir + "/t2r_prob.pkl", "rb"))
print("load data done")
rrt = np.zeros((1315, 1315))
for i in tqdm(range(1315)):
for t in r2t_prob[i]:
prob = r2t_prob[i][t]
for r in t2r_prob[t]:
prob2 = t2r_prob[t][r]
rrt[i, r] += prob * prob2
#np.save("%s/test_feats/rrt_new.npy" % output_path, rrt)
def get_rrt_feat(t_candidate, hr, path):
rrt_feat = np.zeros(t_candidate.shape, dtype=np.float16)
for i in tqdm(range(t_candidate.shape[0])):
r1 = hr[i, 1]
for j in range(t_candidate.shape[1]):
tail = t_candidate[i, j]
if tail in t2r_prob:
for r2 in t2r_prob[tail]:
prob = rrt[r1, r2] * r2t_prob[r2][tail]
rrt_feat[i, j] += prob
np.save(path, rrt_feat)
get_rrt_feat(val_t_candidate, val_hr,
"%s/valid_feats/rrt_feat.npy" % output_path)
print("valid done")
get_rrt_feat(test_t_candidate, test_hr,
"%s/test_feats/rrt_feat.npy" % output_path)
print("test done")
| 30.216867 | 78 | 0.637959 | 368 | 2,508 | 4.057065 | 0.271739 | 0.073677 | 0.040188 | 0.052244 | 0.359009 | 0.316142 | 0.267917 | 0.166109 | 0.101139 | 0 | 0 | 0.048293 | 0.182616 | 2,508 | 82 | 79 | 30.585366 | 0.68 | 0.08134 | 0 | 0.067797 | 0 | 0 | 0.201308 | 0.159271 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016949 | false | 0 | 0.20339 | 0 | 0.220339 | 0.067797 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64ae4132f882d01178b0d9e0b77c964a7b55c91a | 959 | py | Python | tests/test_db_access_in_repr.py | bdauvergne/pytest-django | 66205b3d6ac21e65fbd3d95f1f541db30a596e53 | [
"BSD-3-Clause"
] | 967 | 2015-01-06T14:36:22.000Z | 2022-03-29T21:07:03.000Z | tests/test_db_access_in_repr.py | bdauvergne/pytest-django | 66205b3d6ac21e65fbd3d95f1f541db30a596e53 | [
"BSD-3-Clause"
] | 743 | 2015-01-02T12:20:13.000Z | 2022-03-25T17:13:05.000Z | tests/test_db_access_in_repr.py | bdauvergne/pytest-django | 66205b3d6ac21e65fbd3d95f1f541db30a596e53 | [
"BSD-3-Clause"
] | 308 | 2015-01-08T11:40:23.000Z | 2022-03-23T02:53:14.000Z | def test_db_access_with_repr_in_report(django_testdir) -> None:
django_testdir.create_test_module(
"""
import pytest
from .app.models import Item
def test_via_db_blocker(django_db_setup, django_db_blocker):
with django_db_blocker.unblock():
Item.objects.get(name='This one is not there')
def test_via_db_fixture(db):
Item.objects.get(name='This one is not there')
"""
)
result = django_testdir.runpytest_subprocess("--tb=auto")
result.stdout.fnmatch_lines([
"tpkg/test_the_test.py FF",
"E *DoesNotExist: Item matching query does not exist.",
"tpkg/test_the_test.py:8: ",
'self = *RuntimeError*Database access not allowed*',
"E *DoesNotExist: Item matching query does not exist.",
"* 2 failed*",
])
assert "INTERNALERROR" not in str(result.stdout) + str(result.stderr)
assert result.ret == 1
| 34.25 | 73 | 0.637122 | 124 | 959 | 4.693548 | 0.508065 | 0.036082 | 0.034364 | 0.041237 | 0.323024 | 0.264605 | 0.264605 | 0.264605 | 0.120275 | 0 | 0 | 0.004196 | 0.254432 | 959 | 27 | 74 | 35.518519 | 0.80979 | 0 | 0 | 0.142857 | 0 | 0 | 0.390523 | 0.109477 | 0 | 0 | 0 | 0 | 0.142857 | 1 | 0.071429 | false | 0 | 0 | 0 | 0.071429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64b9710adc87f43f75a6a4f22eff06a3622b0bf8 | 411 | py | Python | interface_blockchain/product/forms.py | kultimovn/interface_blockchain | 5e827d7241200c1aed1ca551649a1ef651297032 | [
"MIT"
] | null | null | null | interface_blockchain/product/forms.py | kultimovn/interface_blockchain | 5e827d7241200c1aed1ca551649a1ef651297032 | [
"MIT"
] | null | null | null | interface_blockchain/product/forms.py | kultimovn/interface_blockchain | 5e827d7241200c1aed1ca551649a1ef651297032 | [
"MIT"
] | null | null | null | from django import forms
from .models import Product, Good
class GoodForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(GoodForm, self).__init__(*args, **kwargs)
class Meta:
model = Good
fields = ('name', 'options', 'description', 'image', 'tmp_responsibility', 'tmp_amount')
widgets = {
'tmp_responsibility': forms.HiddenInput(),
}
| 27.4 | 96 | 0.620438 | 42 | 411 | 5.809524 | 0.666667 | 0.081967 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.243309 | 411 | 14 | 97 | 29.357143 | 0.784566 | 0 | 0 | 0 | 0 | 0 | 0.177616 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.181818 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64b97f2e2ba70fea221b6d73b9b60cadc991d3e9 | 9,219 | py | Python | shark/shark/report/warehouse_wise_stock_ageing/warehouse_wise_stock_ageing.py | umaepoch/Shark | 2ebf715efba796f96c2d9807bbe930e354606492 | [
"MIT"
] | null | null | null | shark/shark/report/warehouse_wise_stock_ageing/warehouse_wise_stock_ageing.py | umaepoch/Shark | 2ebf715efba796f96c2d9807bbe930e354606492 | [
"MIT"
] | null | null | null | shark/shark/report/warehouse_wise_stock_ageing/warehouse_wise_stock_ageing.py | umaepoch/Shark | 2ebf715efba796f96c2d9807bbe930e354606492 | [
"MIT"
] | null | null | null | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
from operator import itemgetter
import frappe
from frappe import _
from frappe.utils import cint, date_diff, flt
from six import iteritems
from erpnext.stock.doctype.serial_no.serial_no import get_serial_nos
def execute(filters=None):
columns = get_columns(filters)
item_details = get_fifo_queue(filters)
to_date = filters["to_date"]
_func = itemgetter(1)
data = []
for item, item_dict in iteritems(item_details):
earliest_age, latest_age = 0, 0
fifo_queue = sorted(filter(_func, item_dict["fifo_queue"]), key=_func)
details = item_dict["details"]
if not fifo_queue: continue
average_age = get_average_age(fifo_queue, to_date)
earliest_age = date_diff(to_date, fifo_queue[0][1])
latest_age = date_diff(to_date, fifo_queue[-1][1])
range1, range2, range3,range4,range5,above_range5 = get_range_age(filters, fifo_queue, to_date, item_dict)
row = [details.name, details.item_name,
details.description, details.item_group, details.brand]
if filters.get("show_warehouse_wise_stock"):
row.append(details.warehouse)
row.extend([item_dict.get("total_qty"), average_age,
range1, range2, range3, range4,range5,above_range5,
earliest_age, latest_age, details.stock_uom])
data.append(row)
chart_data = get_chart_data(data, filters)
return columns, data, None, chart_data
def get_average_age(fifo_queue, to_date):
batch_age = age_qty = total_qty = 0.0
for batch in fifo_queue:
batch_age = date_diff(to_date, batch[1])
if isinstance(batch[0], (int, float)):
age_qty += batch_age * batch[0]
total_qty += batch[0]
else:
age_qty += batch_age * 1
total_qty += 1
return flt(age_qty / total_qty, 2) if total_qty else 0.0
def get_range_age(filters, fifo_queue, to_date, item_dict):
range1 = range2 = range3 = range4 = range5 = above_range5 = 0.0
for item in fifo_queue:
age = date_diff(to_date, item[1])
qty = flt(item[0]) if not item_dict["has_serial_no"] else 1.0
if age <= filters.range1:
range1 += qty
elif age <= filters.range2:
range2 += qty
elif age <= filters.range3:
range3 += qty
elif age <= filters.range4:
range4 += qty
elif age <= filters.range5:
range5 += qty
else:
above_range5 += qty
return range1, range2, range3, range4,range5,above_range5
def get_columns(filters):
range_columns = []
setup_ageing_columns(filters, range_columns)
columns = [
{
"label": _("Item Code"),
"fieldname": "item_code",
"fieldtype": "Link",
"options": "Item",
"width": 100
},
{
"label": _("Item Name"),
"fieldname": "item_name",
"fieldtype": "Data",
"width": 100
},
{
"label": _("Description"),
"fieldname": "description",
"fieldtype": "Data",
"width": 200
},
{
"label": _("Item Group"),
"fieldname": "item_group",
"fieldtype": "Link",
"options": "Item Group",
"width": 100
},
{
"label": _("Brand"),
"fieldname": "brand",
"fieldtype": "Link",
"options": "Brand",
"width": 100
}]
if filters.get("show_warehouse_wise_stock"):
columns +=[{
"label": _("Warehouse"),
"fieldname": "warehouse",
"fieldtype": "Link",
"options": "Warehouse",
"width": 100
}]
columns.extend([
{
"label": _("Available Qty"),
"fieldname": "qty",
"fieldtype": "Float",
"width": 100
},
{
"label": _("Average Age"),
"fieldname": "average_age",
"fieldtype": "Float",
"width": 100
}])
columns.extend(range_columns)
columns.extend([
{
"label": _("Earliest"),
"fieldname": "earliest",
"fieldtype": "Int",
"width": 80
},
{
"label": _("Latest"),
"fieldname": "latest",
"fieldtype": "Int",
"width": 80
},
{
"label": _("UOM"),
"fieldname": "uom",
"fieldtype": "Link",
"options": "UOM",
"width": 100
}
])
return columns
def get_fifo_queue(filters, sle=None):
item_details = {}
transferred_item_details = {}
serial_no_batch_purchase_details = {}
if sle == None:
sle = get_stock_ledger_entries(filters)
for d in sle:
key = (d.name, d.warehouse) if filters.get('show_warehouse_wise_stock') else d.name
item_details.setdefault(key, {"details": d, "fifo_queue": []})
fifo_queue = item_details[key]["fifo_queue"]
transferred_item_key = (d.voucher_no, d.name, d.warehouse)
transferred_item_details.setdefault(transferred_item_key, [])
if d.voucher_type == "Stock Reconciliation":
d.actual_qty = flt(d.qty_after_transaction) - flt(item_details[key].get("qty_after_transaction", 0))
serial_no_list = get_serial_nos(d.serial_no) if d.serial_no else []
if d.actual_qty > 0:
if transferred_item_details.get(transferred_item_key):
batch = transferred_item_details[transferred_item_key][0]
fifo_queue.append(batch)
transferred_item_details[transferred_item_key].pop(0)
else:
if serial_no_list:
for serial_no in serial_no_list:
if serial_no_batch_purchase_details.get(serial_no):
fifo_queue.append([serial_no, serial_no_batch_purchase_details.get(serial_no)])
else:
serial_no_batch_purchase_details.setdefault(serial_no, d.posting_date)
fifo_queue.append([serial_no, d.posting_date])
else:
fifo_queue.append([d.actual_qty, d.posting_date])
else:
if serial_no_list:
fifo_queue[:] = [serial_no for serial_no in fifo_queue if serial_no[0] not in serial_no_list]
else:
qty_to_pop = abs(d.actual_qty)
while qty_to_pop:
batch = fifo_queue[0] if fifo_queue else [0, None]
if 0 < flt(batch[0]) <= qty_to_pop:
# if batch qty > 0
# not enough or exactly same qty in current batch, clear batch
qty_to_pop -= flt(batch[0])
transferred_item_details[transferred_item_key].append(fifo_queue.pop(0))
else:
# all from current batch
batch[0] = flt(batch[0]) - qty_to_pop
transferred_item_details[transferred_item_key].append([qty_to_pop, batch[1]])
qty_to_pop = 0
item_details[key]["qty_after_transaction"] = d.qty_after_transaction
if "total_qty" not in item_details[key]:
item_details[key]["total_qty"] = d.actual_qty
else:
item_details[key]["total_qty"] += d.actual_qty
item_details[key]["has_serial_no"] = d.has_serial_no
return item_details
def get_stock_ledger_entries(filters):
return frappe.db.sql("""select
item.name, item.item_name, item_group, brand, description, item.stock_uom, item.has_serial_no,
actual_qty, posting_date, voucher_type, voucher_no, serial_no, batch_no, qty_after_transaction, warehouse
from `tabStock Ledger Entry` sle,
(select name, item_name, description, stock_uom, brand, item_group, has_serial_no
from `tabItem` {item_conditions}) item
where item_code = item.name and
company = %(company)s and
posting_date <= %(to_date)s and
is_cancelled != 1
{sle_conditions}
order by posting_date, posting_time, sle.creation, actual_qty""" #nosec
.format(item_conditions=get_item_conditions(filters),
sle_conditions=get_sle_conditions(filters)), filters, as_dict=True)
def get_item_conditions(filters):
conditions = []
if filters.get("item_code"):
conditions.append("item_code=%(item_code)s")
if filters.get("brand"):
conditions.append("brand=%(brand)s")
return "where {}".format(" and ".join(conditions)) if conditions else ""
def get_sle_conditions(filters):
conditions = []
if filters.get("warehouse"):
lft, rgt = frappe.db.get_value('Warehouse', filters.get("warehouse"), ['lft', 'rgt'])
conditions.append("""warehouse in (select wh.name from `tabWarehouse` wh
where wh.lft >= {0} and rgt <= {1})""".format(lft, rgt))
return "and {}".format(" and ".join(conditions)) if conditions else ""
def get_chart_data(data, filters):
if not data:
return []
labels, datapoints = [], []
if filters.get("show_warehouse_wise_stock"):
return {}
data.sort(key = lambda row: row[6], reverse=True)
if len(data) > 10:
data = data[:10]
for row in data:
labels.append(row[0])
datapoints.append(row[6])
return {
"data" : {
"labels": labels,
"datasets": [
{
"name": _("Average Age"),
"values": datapoints
}
]
},
"type" : "bar"
}
def setup_ageing_columns(filters, range_columns):
for i, label in enumerate(["0-{range1}".format(range1=filters["range1"]),
"{range1}-{range2}".format(range1=cint(filters["range1"])+ 1, range2=filters["range2"]),
"{range2}-{range3}".format(range2=cint(filters["range2"])+ 1, range3=filters["range3"]),
"{range3}-{range4}".format(range3=cint(filters["range3"])+ 1, range4=filters["range4"]),
"{range4}-{range5}".format(range4=cint(filters["range4"])+ 1, range5=filters["range5"]),
"{range5}-{above}".format(range5=cint(filters["range5"])+ 1, above=_("Above"))]):
add_column(range_columns, label="Aging Range ("+ label +")", fieldname='range' + str(i+1))
def add_column(range_columns, label, fieldname, fieldtype='Float', width=140):
range_columns.append(dict(
label=label,
fieldname=fieldname,
fieldtype=fieldtype,
width=width
)) | 29.082019 | 108 | 0.678599 | 1,260 | 9,219 | 4.711905 | 0.139683 | 0.037729 | 0.014149 | 0.021897 | 0.258548 | 0.183931 | 0.158329 | 0.052889 | 0.028971 | 0.013812 | 0 | 0.021165 | 0.174856 | 9,219 | 317 | 109 | 29.082019 | 0.759301 | 0.024732 | 0 | 0.157088 | 0 | 0.003831 | 0.209238 | 0.020812 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042146 | false | 0 | 0.02682 | 0.003831 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64bb99f7ba68e5c75620ffdbf50cc594972bec36 | 2,652 | py | Python | src/nasty_analysis/document/maxqda_coded_nasty.py | lschmelzeisen/nasty-analysis | 50e2f2d5f6b8b9649a8c8adba1d94b59f01a8dca | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2020-05-23T19:18:42.000Z | 2020-05-26T12:33:44.000Z | src/nasty_analysis/document/maxqda_coded_nasty.py | lschmelzeisen/nasty-analysis | 50e2f2d5f6b8b9649a8c8adba1d94b59f01a8dca | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/nasty_analysis/document/maxqda_coded_nasty.py | lschmelzeisen/nasty-analysis | 50e2f2d5f6b8b9649a8c8adba1d94b59f01a8dca | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | #
# Copyright 2019-2020 Lukas Schmelzeisen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import csv
from datetime import datetime
from pathlib import Path
from typing import Iterator, Mapping, MutableMapping
from elasticsearch_dsl import Date, Float, Keyword, Text
from nasty_data import BaseDocument
from nasty_utils import DecompressingTextIOWrapper
from typing_extensions import Final
_INDEX_OPTIONS: Final[str] = "offsets"
_INDEX_PHRASES: Final[bool] = False
_INDEX_TERM_VECTOR: Final[str] = "with_positions_offsets"
class MaxqdaCodedNastyDocument(BaseDocument):
document_group = Keyword()
code_identifier = Keyword()
lang = Keyword()
created_at = Date()
code = Keyword()
segment = Text(
index_options=_INDEX_OPTIONS,
index_phrases=_INDEX_PHRASES,
term_vector=_INDEX_TERM_VECTOR,
analyzer="standard",
)
coverage = Float()
@classmethod
def prepare_doc_dict(cls, doc_dict: MutableMapping[str, object]) -> None:
super().prepare_doc_dict(doc_dict)
doc_dict.pop("Farbe")
doc_dict.pop("Kommentar")
doc_dict["document_group"] = doc_dict.pop("Dokumentgruppe")
doc_dict["created_at"] = datetime.strptime(
doc_dict.pop("Dokumentname"), "%d.%m.%Y %H:%M:%S"
)
doc_dict["_id"] = (
str(doc_dict["code_identifier"]) + "-" + str(doc_dict.pop("i"))
)
doc_dict["code"] = doc_dict.pop("Code")
doc_dict["segment"] = doc_dict.pop("Segment")
doc_dict["coverage"] = float(doc_dict.pop("Abdeckungsgrad %"))
def load_document_dicts_from_maxqda_coded_nasty_csv(
file: Path,
code_identifier: str,
lang: str,
progress_bar: bool = True,
) -> Iterator[Mapping[str, object]]:
with DecompressingTextIOWrapper(
file, encoding="UTF-8", warn_uncompressed=False, progress_bar=progress_bar
) as fin:
reader = csv.DictReader(fin)
for i, document_dict in enumerate(reader):
document_dict["i"] = i
document_dict["code_identifier"] = code_identifier
document_dict["lang"] = lang
yield document_dict
| 34 | 82 | 0.691931 | 335 | 2,652 | 5.268657 | 0.444776 | 0.075354 | 0.045326 | 0.01813 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006179 | 0.206637 | 2,652 | 77 | 83 | 34.441558 | 0.8327 | 0.211161 | 0 | 0 | 0 | 0 | 0.100674 | 0.010597 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.148148 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64bd439f7c8735ddf6edf662567de783df1b760f | 6,188 | py | Python | mpinterfaces/nanoparticle.py | yw-fang/MPInterfaces | ca2e43b590fdfbcf87a116c5c758e54cb7cb2d2e | [
"MIT"
] | null | null | null | mpinterfaces/nanoparticle.py | yw-fang/MPInterfaces | ca2e43b590fdfbcf87a116c5c758e54cb7cb2d2e | [
"MIT"
] | 12 | 2016-11-07T23:46:01.000Z | 2018-08-24T19:00:12.000Z | mpinterfaces/nanoparticle.py | joshgabriel/MPInterfaces | 2799ae161fa94c78842092fb24ef468607afa465 | [
"MIT"
] | null | null | null | # coding: utf-8
# Copyright (c) Henniggroup.
# Distributed under the terms of the MIT License.
from __future__ import division, print_function, unicode_literals, \
absolute_import
"""
Wulff construction to create the nanoparticle
"""
from six.moves import range
import itertools
from math import gcd
from functools import reduce
import numpy as np
from pymatgen.core.structure import Structure, Molecule
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.coord_utils import in_coord_list
from mpinterfaces import get_struct_from_mp
from mpinterfaces.default_logger import get_default_logger
logger = get_default_logger(__name__)
class Nanoparticle(Molecule):
"""
Construct nanoparticle using wulff construction
"""
def __init__(self, structure, rmax=15, hkl_family=((1, 0, 0), (1, 1, 1)),
surface_energies=(28, 25)):
self.structure = structure
self.rmax = rmax
self.hkl_family = list(hkl_family)
self.surface_energies = list(surface_energies)
spherical_neighbors = self.structure.get_sites_in_sphere(
[0.0, 0.0, 0.0], self.rmax)
recp_lattice = self.structure.lattice.reciprocal_lattice_crystallographic
self.recp_lattice = recp_lattice.scale(1)
self.set_miller_family()
Molecule.__init__(self, [sn[0].species_and_occu
for sn in spherical_neighbors],
[sn[0].coords for sn in spherical_neighbors],
charge=0)
def set_miller_family(self):
"""
get all miller indices for the given maximum index
get the list of indices that correspond to the given family
of indices
"""
recp_structure = Structure(self.recp_lattice, ["H"], [[0, 0, 0]])
analyzer = SpacegroupAnalyzer(recp_structure, symprec=0.001)
symm_ops = analyzer.get_symmetry_operations()
max_index = max(max(m) for m in self.hkl_family)
r = list(range(-max_index, max_index + 1))
r.reverse()
miller_indices = []
self.all_equiv_millers = []
self.all_surface_energies = []
for miller in itertools.product(r, r, r):
if any([i != 0 for i in miller]):
d = abs(reduce(gcd, miller))
miller_index = tuple([int(i / d) for i in miller])
for op in symm_ops:
for i, u_miller in enumerate(self.hkl_family):
if in_coord_list(u_miller, op.operate(miller_index)):
self.all_equiv_millers.append(miller_index)
self.all_surface_energies.append(
self.surface_energies[i])
def get_normals(self):
"""
get the normal to the plane (h,k,l)
"""
normals = []
for hkl in self.all_equiv_millers:
normal = self.recp_lattice.matrix[0, :] * hkl[0] + \
self.recp_lattice.matrix[1, :] * hkl[1] + \
self.recp_lattice.matrix[2, :] * hkl[2]
normals.append(normal / np.linalg.norm(normal))
return normals
def get_centered_molecule(self):
center = self.center_of_mass
new_coords = np.array(self.cart_coords) - center
return Molecule(self.species_and_occu, new_coords,
charge=self._charge,
spin_multiplicity=self._spin_multiplicity,
site_properties=self.site_properties)
def create(self):
"""
creates the nanoparticle by chopping of the corners normal to the
specified surfaces.
the distance to the surface from the center of the particel =
normalized surface energy * max radius
"""
mol = self.get_centered_molecule()
normalized_surface_energies = \
np.array(self.all_surface_energies) / float(
max(self.all_surface_energies))
surface_normals = self.get_normals()
remove_sites = []
for i, site in enumerate(mol):
for j, normal in enumerate(surface_normals):
n = np.array(normal)
n = n / np.linalg.norm(n)
if np.dot(site.coords, n) + self.rmax * \
normalized_surface_energies[j] <= 0:
remove_sites.append(i)
break
self.remove_sites(remove_sites)
# new_sites = [site for k, site in enumerate(mol) if k not in remove_sites]
# return Molecule.from_sites(new_sites)
if __name__ == '__main__':
# nanopartcle settings
# max radius in angstroms
rmax = 15
# surface families to be chopped off
surface_families = [(1, 0, 0), (1, 1, 1)]
# could be in any units, will be normalized
surface_energies = [28, 25]
# caution: set the structure wrt which the the miller indices are specified
# use your own API key
structure = get_struct_from_mp('PbS')
# primitve ---> conventional cell
sa = SpacegroupAnalyzer(structure)
structure_conventional = sa.get_conventional_standard_structure()
nanoparticle = Nanoparticle(structure_conventional, rmax=rmax,
hkl_family=surface_families,
surface_energies=surface_energies)
nanoparticle.create()
nanoparticle.to(fmt='xyz', filename='nanoparticle.xyz')
"""
Wulff construction using the ASE package
works only for cubic systems and doesn't support multiatom basis
from ase.cluster import wulff_construction
from pymatgen.io.aseio import AseAtomsAdaptor
symbol = 'Pt'
surfaces = [ (1,0,0), (1,1,1) ]
surface_energies = [1, 1]
size = 200 #number of atoms
structure = "fcc"
latticeconstant = 5.0
atoms = wulff_construction(symbol, surfaces, surface_energies, size, structure,
rounding='closest', latticeconstant=latticeconstant,
debug=False, maxiter=100)
#convert to pymatgen structure
pgen_structure = AseAtomsAdaptor().get_structure(atoms)
pgen_structure.to(fmt='poscar', filename='POSCAR_pt_nano.vasp')
"""
| 36.833333 | 83 | 0.626212 | 747 | 6,188 | 4.97992 | 0.302544 | 0.060484 | 0.004032 | 0.023656 | 0.027957 | 0.012903 | 0.01129 | 0.01129 | 0 | 0 | 0 | 0.015176 | 0.286522 | 6,188 | 167 | 84 | 37.053892 | 0.827407 | 0.136231 | 0 | 0 | 0 | 0 | 0.006993 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054348 | false | 0 | 0.130435 | 0 | 0.217391 | 0.01087 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64bd7c10430f6a161fe1d2e13a9a45bf6a6cda3c | 1,262 | py | Python | src/day07.py | birdman74/advent-of-code-2021 | 190cd4110ef3553258a26c8521bdf372c006a77c | [
"Apache-2.0"
] | null | null | null | src/day07.py | birdman74/advent-of-code-2021 | 190cd4110ef3553258a26c8521bdf372c006a77c | [
"Apache-2.0"
] | null | null | null | src/day07.py | birdman74/advent-of-code-2021 | 190cd4110ef3553258a26c8521bdf372c006a77c | [
"Apache-2.0"
] | null | null | null | import os
from collections import Counter
MODULE_DIR = os.path.dirname(os.path.realpath(__file__))
PROJECT_DIR = os.path.join(MODULE_DIR, "..")
INPUT_SOURCE_DIR = os.path.join(PROJECT_DIR, "input")
def get_data_lines(input_file_name):
input_file = os.path.join(INPUT_SOURCE_DIR, input_file_name)
print(f"Input file: {input_file}")
data_file = open(input_file)
return data_file.read().split("\n")
def do_the_thing(input_file_name, part_num):
data_lines = get_data_lines(input_file_name)
# hp = [16, 1, 2, 0, 4, 2, 7, 1, 2, 14]
hp = list(map(int, data_lines[0].split(",")))
left = min(hp)
right = max(hp)
span = right - left + 1
if part_num == 1:
gas_totals = [sum([abs(x-y) for x in hp]) for y in range(left, right + 1)]
else:
gas_totals = [sum([sum(range(abs(x-y) + 1)) for x in hp]) for y in range(left, right + 1)]
optimal_position = gas_totals.index(min(gas_totals))
print(f"Optimal position: {optimal_position}\nGas expended: {min(gas_totals)}\n#################################\n")
def day_7_do(input_file_name):
do_the_thing(input_file_name, 1)
def day_7_do_2(input_file_name):
do_the_thing(input_file_name, 2)
day_7_do("day07.txt")
day_7_do_2("day07.txt")
| 25.755102 | 120 | 0.659271 | 215 | 1,262 | 3.572093 | 0.311628 | 0.140625 | 0.135417 | 0.058594 | 0.264323 | 0.264323 | 0.169271 | 0.169271 | 0.169271 | 0.075521 | 0 | 0.028763 | 0.173534 | 1,262 | 48 | 121 | 26.291667 | 0.707574 | 0.029319 | 0 | 0 | 0 | 0 | 0.129191 | 0.06296 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.071429 | 0 | 0.25 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64bdb2fbf30aa7f6c7c89de13064542fa9d7162e | 1,460 | py | Python | bin/check_files_yml.py | synesthesiam/voice2json-profiles | 4618d121d8d64985deb43d14e2f01b1e12743a18 | [
"MIT"
] | 10 | 2020-01-01T09:08:50.000Z | 2022-01-09T00:18:33.000Z | bin/check_files_yml.py | synesthesiam/voice2json-profiles | 4618d121d8d64985deb43d14e2f01b1e12743a18 | [
"MIT"
] | null | null | null | bin/check_files_yml.py | synesthesiam/voice2json-profiles | 4618d121d8d64985deb43d14e2f01b1e12743a18 | [
"MIT"
] | 3 | 2020-07-25T03:20:21.000Z | 2022-01-23T03:56:16.000Z | #!/usr/bin/env python3
"""Verifies sizes and sha256 sums for files.yml files."""
import os
import subprocess
import sys
from pathlib import Path
import yaml
def main():
"""Main entry point"""
for files_yaml_path in sys.argv[1:]:
profile_root = Path(files_yaml_path).parent
with open(files_yaml_path, "r") as files_yaml_file:
files_yaml = yaml.safe_load(files_yaml_file)
file_count = 0
for condition, files in files_yaml.items():
for file_path, file_info in files.items():
full_path = profile_root / file_path
# Check byte size
expected_bytes = int(file_info["bytes"])
actual_bytes = os.path.getsize(full_path)
assert (
actual_bytes == expected_bytes
), f"Expected size of {full_path} to be {expected_bytes}, got {actual_bytes}"
# Check sha256 sum
expected_sum = str(file_info["sha256"]).strip()
sum_result = subprocess.check_output(["sha256sum", str(full_path)]).decode().strip()
actual_sum = sum_result.split()[0]
assert (
actual_sum == expected_sum
), f"Expected sha256 sum of {full_path} to be {expected_sum}, got {actual_sum}"
file_count += 1
print(profile_root.name, file_count, "OK")
if __name__ == "__main__":
main()
| 31.06383 | 100 | 0.584247 | 181 | 1,460 | 4.425414 | 0.381215 | 0.078652 | 0.048689 | 0.029963 | 0.054931 | 0.054931 | 0 | 0 | 0 | 0 | 0 | 0.02004 | 0.316438 | 1,460 | 46 | 101 | 31.73913 | 0.782565 | 0.084247 | 0 | 0.068966 | 0 | 0 | 0.132175 | 0 | 0 | 0 | 0 | 0 | 0.068966 | 1 | 0.034483 | false | 0 | 0.172414 | 0 | 0.206897 | 0.034483 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64beb60d078cf10c3f14199bcdc7a0d43c7a432a | 1,370 | py | Python | witness.py | barryZZJ/dp-finder | ddf8e3589110b4b35920b437d605b45dd56291da | [
"MIT"
] | null | null | null | witness.py | barryZZJ/dp-finder | ddf8e3589110b4b35920b437d605b45dd56291da | [
"MIT"
] | null | null | null | witness.py | barryZZJ/dp-finder | ddf8e3589110b4b35920b437d605b45dd56291da | [
"MIT"
] | null | null | null | from abc import ABC
from collections import OrderedDict
class Witness(ABC):
def __init__(self, a1, a2, s, eps, method, **kwargs):
self.meth = method
self.a1 = a1
self.a2 = a2
self.s = s
self.eps = eps
self.kwargs = OrderedDict(kwargs)
def get_witness(self):
d = OrderedDict({'a1': self.a1,
'a2': self.a2,
's': self.s,
'eps': self.eps})
return d
def get_full(self):
d = self.get_witness()
d.update(self.kwargs)
d['method'] = self.meth
return d
def get_keys(self):
return list(self.get_full().keys())
def __lt__(self, other):
return self.eps < other.eps
def __repr__(self):
return self.__str__()
def __str__(self):
d = self.get_full()
l = []
for k, v in d.items():
l.append(f'{k}={v}')
return ' '.join(l)
if __name__ == '__main__':
from dataLoader import DataLoader
from dataVisualizer import DataVisualizer
import pandas as pd
w=Witness(1,2,3,3,"dpfinder",pa=1,pb=2)
w2 = Witness(3,4,5,5,'statdp',epsm=3)
df = pd.DataFrame([w.get_full(), w2.get_full()])
# df1.to_excel("test.xls")
# dl = DataLoader()
# dl._push('SVT', w, w2)
# dv = DataVisualizer(dl)
# dv.to_excel(filename='test.xls') | 24.909091 | 57 | 0.550365 | 189 | 1,370 | 3.783069 | 0.365079 | 0.048951 | 0.022378 | 0.036364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.026233 | 0.30438 | 1,370 | 55 | 58 | 24.909091 | 0.724029 | 0.089051 | 0 | 0.05 | 0 | 0 | 0.035398 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.175 | false | 0 | 0.125 | 0.075 | 0.475 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64c0a446da789593ff960351a0ce0f86e665e35a | 6,873 | py | Python | TicTacToe/classes/TicTacToe.py | camilleAmaury/ReinforcementLearning | c5d9ec3f17ca02e84ea3786fa2048b35864fc724 | [
"CNRI-Python",
"Info-ZIP"
] | null | null | null | TicTacToe/classes/TicTacToe.py | camilleAmaury/ReinforcementLearning | c5d9ec3f17ca02e84ea3786fa2048b35864fc724 | [
"CNRI-Python",
"Info-ZIP"
] | null | null | null | TicTacToe/classes/TicTacToe.py | camilleAmaury/ReinforcementLearning | c5d9ec3f17ca02e84ea3786fa2048b35864fc724 | [
"CNRI-Python",
"Info-ZIP"
] | null | null | null | import numpy as np
from tqdm import tqdm
class TicTacToe(object):
# Constructor of the TicTacToe class
# <Params name="player1" type="Agent">The first player</Params>
# <Params name="player2" type="Agent">The second player</Params>
# <Params name="rewards" type="object">
# <Contains key="win_reward" type="float">The amount of reward for a win</Contains>
# <Contains key="lose_reward" type="float">The amount of reward for a defeat</Contains>
# <Contains key="draw_reward" type="float">The amount of reward for a draw</Contains>
# <Contains key="error_reward" type="float">The amount of reward to make an already done move</Contains>
# <Contains key="null_reward" type="float">The amount of reward for another agent error</Contains>
# </Params>
def __init__(self, player1, player2, rewards):
super(TicTacToe, self).__init__()
# Initial board state, {0 = void, 1 = player 1, 2 = player 2}
self.board = np.array([0,0,0,0,0,0,0,0,0], dtype=int)
self.players = np.array([player1, player2])
np.random.shuffle(self.players)
self.markers = {"0":"v", "1":"x", "2":"o"}
self.rewards = rewards
# Method which re-instanciate a new game
def reset_env(self):
self.board = np.array([0,0,0,0,0,0,0,0,0])
np.random.shuffle(self.players)
# Method launch a game
# <Params name="verbose" type="int">The more verbose is high, the more logs you will have</Params>
def train_game(self, verbose=0):
for i in range(self.board.shape[0]):
turn = i%2
agent_turn = self.players[turn]
agent_value = turn+1
agent_not_turn = self.players[agent_value%2]
# agent take action
action = agent_turn.step_train(self.board)
if self.board[action] == 0:
# correct action
self.board[action] = agent_value
# checks for victory
if not self.has_win(agent_value):
if self.is_board_full(verbose):
agent_turn.update(self.rewards["draw_reward"])
agent_not_turn.update(self.rewards["draw_reward"])
break
else:
agent_turn.update(self.rewards["win_reward"])
agent_not_turn.update(self.rewards["lose_reward"])
break
else:
# the player choose a wrong action (already taken)
agent_turn.update(self.rewards["error_reward"])
agent_not_turn.update(self.rewards["null_reward"])
break
# Method launch a game
# <Params name="verbose" type="int">The more verbose is high, the more logs you will have</Params>
def run_game(self, verbose=0):
for i in range(self.board.shape[0]):
turn = i%2
agent_turn = self.players[turn]
agent_value = turn+1
agent_not_turn = self.players[agent_value%2]
# agent take action
action = agent_turn.step(self.board)
if self.board[action] == 0:
# correct action
self.board[action] = agent_value
if verbose==2:
print(self)
# checks for victory
if not self.has_win(agent_value):
if self.is_board_full(verbose):
agent_turn.end_game(1)
agent_not_turn.end_game(1)
break
else:
agent_turn.end_game(0)
agent_not_turn.end_game(2)
if verbose >= 1:
print("Player {} wins :\n{}".format(agent_turn, self))
break
else:
# the player choose a wrong action (already taken)
if verbose >= 1:
print("Player {} choosed a wrong action : end".format(agent_turn))
agent_turn.end_game(3)
agent_not_turn.end_game(4)
break
# Method used to run multiples games and train RL agents
# <Params name="epochs" type="int">The number of game to train</Params>
# <Params name="verbose" type="int">The more verbose is high, the more logs you will have</Params>
def train(self, epochs=1, verbose=0):
for _ in tqdm(range(epochs), miniters=10000):
self.train_game(verbose)
self.reset_env()
# Method used to run multiples games
# <Params name="games" type="int">The number of game to play</Params>
# <Params name="verbose" type="int">The more verbose is high, the more logs you will have</Params>
def play(self, games=1, verbose=0):
for _ in tqdm(range(games), miniters=10000):
self.run_game(verbose)
self.reset_env()
# Method used to check whether the board is full or not
# <Params name="verbose" type="int">The more verbose is high, the more logs you will have</Params>
def is_board_full(self, verbose):
cond = self.board[self.board == 0].shape[0] == 0
if cond and verbose >= 1:
print("Game is finished with no winner")
return cond
# Method which check if a user win
# <Params name="marker" type="string">The marker representing the agent</Params>
def has_win(self, marker):
cond = False
# row
cond = cond or (self.board[0] == marker and self.board[1] == marker and self.board[2] == marker) or \
(self.board[3] == marker and self.board[4] == marker and self.board[5] == marker) or \
(self.board[6] == marker and self.board[7] == marker and self.board[8] == marker)
# column
cond = cond or (self.board[0] == marker and self.board[3] == marker and self.board[6] == marker) or \
(self.board[1] == marker and self.board[4] == marker and self.board[7] == marker) or \
(self.board[2] == marker and self.board[5] == marker and self.board[8] == marker)
# diagonals
cond = cond or (self.board[0] == marker and self.board[4] == marker and self.board[8] == marker) or \
(self.board[2] == marker and self.board[4] == marker and self.board[6] == marker)
return cond
def __str__(self):
s = "_____________\n"
for i in range(self.board.shape[0]):
if i % 3 == 0 and i != 0:
s += "|\n_____________\n"
s += "| {} ".format(self.markers[str(self.board[i])])
s += "|\n_____________"
return s
def __repr__(self):
return "<Object TicTacToe, Agents:[{},{}]>".format(self.players[0], self.players[1])
| 45.217105 | 112 | 0.559144 | 897 | 6,873 | 4.142698 | 0.16388 | 0.092034 | 0.055974 | 0.077503 | 0.636168 | 0.581539 | 0.535791 | 0.458019 | 0.422766 | 0.344187 | 0 | 0.021697 | 0.322712 | 6,873 | 152 | 113 | 45.217105 | 0.776584 | 0.279645 | 0 | 0.381443 | 0 | 0 | 0.050672 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103093 | false | 0 | 0.020619 | 0.010309 | 0.175258 | 0.041237 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64c0d45dc9b180f6c1dd6f97a24cb3227649d735 | 9,780 | py | Python | poem/cv_mim/action_recognition/train_with_encoder_base.py | DionysisChristopoulos/google-research | 7f59ef421beef32ca16c2a7215be74f7eba01a0f | [
"Apache-2.0"
] | 23,901 | 2018-10-04T19:48:53.000Z | 2022-03-31T21:27:42.000Z | poem/cv_mim/action_recognition/train_with_encoder_base.py | DionysisChristopoulos/google-research | 7f59ef421beef32ca16c2a7215be74f7eba01a0f | [
"Apache-2.0"
] | 891 | 2018-11-10T06:16:13.000Z | 2022-03-31T10:42:34.000Z | poem/cv_mim/action_recognition/train_with_encoder_base.py | admariner/google-research | 7cee4b22b925581d912e8d993625c180da2a5a4f | [
"Apache-2.0"
] | 6,047 | 2018-10-12T06:31:02.000Z | 2022-03-31T13:59:28.000Z | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encoder-based action recognition training base code."""
import math
import os
import time
from absl import flags
from absl import logging
import tensorflow as tf
from tensorflow_addons import optimizers as tfa_optimizers
from poem.core import pipeline_utils
from poem.cv_mim import algorithms
from poem.cv_mim import pipelines
from poem.cv_mim import utils
from poem.cv_mim.action_recognition import models
FLAGS = flags.FLAGS
flags.DEFINE_string('log_dir_path', None, 'Path to save checkpoints and logs.')
flags.mark_flag_as_required('log_dir_path')
flags.DEFINE_string('encoder_checkpoint_path', None,
'Path to load the encoder checkpoint.')
flags.mark_flag_as_required('encoder_checkpoint_path')
flags.DEFINE_enum('encoder_algorithm_type', 'DISENTANGLE',
algorithms.SUPPORTED_ALGORITHM_TYPES,
'Type of the algorithm used for training the encoder.')
flags.DEFINE_integer('encoder_pose_embedding_dim', 32,
'Dimension of the pose embedding.')
flags.DEFINE_integer(
'encoder_view_embedding_dim', 32,
'Dimension of the view embedding if encoder_algorithm_type is DISENTANGLE.')
flags.DEFINE_enum('encoder_embedder_type', 'POINT', ['POINT', 'GAUSSIAN'],
'Type of the encoder embedder.')
flags.DEFINE_string(
'encoder_output_activation', 'embedder',
'Activation name of the encoder output to be used as the input.')
flags.DEFINE_integer('encoder_output_dim', 32,
'Dimension of the encoder features.')
flags.DEFINE_list('input_tables', None,
'A list of input tf.Example table pattern.')
flags.mark_flag_as_required('input_tables')
flags.DEFINE_list('batch_sizes', None,
'A list of batch size for each input table.')
flags.mark_flag_as_required('batch_sizes')
flags.DEFINE_string('keypoint_profile_name_2d', 'LEGACY_2DCOCO13',
'Profile name for input 2D keypoints.')
flags.DEFINE_boolean('compile', True,
'Compiles functions for faster tf training.')
flags.DEFINE_integer(
'shuffle_buffer_size', 1157,
'Input shuffle buffer size (PennAction: 1157; NTU-RGBD: 32726).')
flags.DEFINE_float('learning_rate', 5e-3, 'Initial learning rate.')
flags.DEFINE_enum('classifier_type', 'CONVNET',
models.SUPPORTED_CLASSIFIER_TYPES, 'Type of the classifier.')
flags.DEFINE_integer(
'downsample_rate', 2,
'Downsample rate of input videos (PennAction: 2; NTU-RGBD: 1).')
flags.DEFINE_integer(
'num_classes', 14,
'Number of action classes (PennAction: 14; NTU-RGBD: 49).')
flags.DEFINE_integer(
'num_frames', 663,
'Number of frames in each video (PennAction: 663; NTU-RGBD: 300).')
flags.DEFINE_integer('num_iterations', 1000000,
'Num of iterations in terms of trainig.')
logging.set_verbosity('info')
logging.set_stderrthreshold('info')
def run(input_dataset_class, common_module, keypoint_profiles_module,
input_example_parser_creator):
"""Runs training pipeline.
Args:
input_dataset_class: An input dataset class that matches input table type.
common_module: A Python module that defines common flags and constants.
keypoint_profiles_module: A Python module that defines keypoint profiles.
input_example_parser_creator: A function handle for creating data parser
function. If None, uses the default parser creator.
"""
log_dir_path = FLAGS.log_dir_path
pipeline_utils.create_dir_and_save_flags(flags, log_dir_path,
'all_flags.train_with_encoder.json')
# Setup summary writer.
summary_writer = tf.summary.create_file_writer(
os.path.join(log_dir_path, 'train_logs'), flush_millis=10000)
# Setup configuration.
keypoint_profile_2d = keypoint_profiles_module.create_keypoint_profile_or_die(
FLAGS.keypoint_profile_name_2d)
# Setup model.
model = algorithms.get_algorithm(
algorithm_type=FLAGS.encoder_algorithm_type,
pose_embedding_dim=FLAGS.encoder_pose_embedding_dim,
view_embedding_dim=FLAGS.encoder_view_embedding_dim,
embedder_type=FLAGS.encoder_embedder_type)
checkpoint = tf.train.Checkpoint(model=model)
checkpoint.restore(FLAGS.encoder_checkpoint_path).expect_partial()
encoder = model.encoder
classifier = models.get_temporal_classifier(
FLAGS.classifier_type,
input_shape=(math.ceil(FLAGS.num_frames / FLAGS.downsample_rate),
FLAGS.encoder_output_dim),
num_classes=FLAGS.num_classes)
ema_classifier = tf.keras.models.clone_model(classifier)
optimizer = tf.keras.optimizers.Adam(learning_rate=FLAGS.learning_rate)
optimizer = tfa_optimizers.MovingAverage(optimizer)
global_step = optimizer.iterations
ckpt_manager, _, _ = utils.create_checkpoint(
log_dir_path,
optimizer=optimizer,
model=classifier,
ema_model=ema_classifier,
global_step=global_step)
# Setup the training dataset.
dataset = pipelines.create_dataset_from_tables(
FLAGS.input_tables, [int(x) for x in FLAGS.batch_sizes],
num_instances_per_record=1,
shuffle=True,
drop_remainder=True,
num_epochs=None,
keypoint_names_2d=keypoint_profile_2d.keypoint_names,
num_classes=FLAGS.num_classes,
num_frames=FLAGS.num_frames,
shuffle_buffer_size=FLAGS.shuffle_buffer_size,
common_module=common_module,
dataset_class=input_dataset_class,
input_example_parser_creator=input_example_parser_creator)
loss_object = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
def train_one_iteration(inputs):
"""Trains the model for one iteration.
Args:
inputs: A dictionary for training inputs.
Returns:
loss: The training loss for this iteration.
"""
_, side_outputs = pipelines.create_model_input(
inputs, common_module.MODEL_INPUT_KEYPOINT_TYPE_2D_INPUT,
keypoint_profile_2d)
keypoints_2d = side_outputs[common_module.KEY_PREPROCESSED_KEYPOINTS_2D]
keypoints_2d = tf.squeeze(keypoints_2d, axis=1)
features = keypoints_2d[:, ::FLAGS.downsample_rate, Ellipsis]
labels = inputs[common_module.KEY_CLASS_TARGETS]
labels = tf.squeeze(labels, axis=1)
batch_size, num_frames, num_joints, feature_dim = features.shape
features = tf.reshape(features, (-1, num_joints, feature_dim))
_, features = encoder(features, training=False)
features = features[FLAGS.encoder_output_activation]
features = tf.reshape(features, (batch_size, num_frames, -1))
if (FLAGS.encoder_output_activation == 'embedder') and (
FLAGS.encoder_algorithm_type != algorithms.TYPE_ALGORITHM_ALIGN):
features, _ = tf.split(
features,
num_or_size_splits=[
FLAGS.encoder_pose_embedding_dim, FLAGS.encoder_view_embedding_dim
],
axis=-1)
with tf.GradientTape() as tape:
outputs = classifier(features, training=True)
regularization_loss = sum(classifier.losses)
crossentropy_loss = loss_object(labels, outputs)
total_loss = crossentropy_loss + regularization_loss
trainable_variables = classifier.trainable_variables
grads = tape.gradient(total_loss, trainable_variables)
optimizer.apply_gradients(zip(grads, trainable_variables))
for grad, trainable_variable in zip(grads, trainable_variables):
tf.summary.scalar(
'summarize_grads/' + trainable_variable.name,
tf.linalg.norm(grad),
step=global_step)
return dict(
total_loss=total_loss,
crossentropy_loss=crossentropy_loss,
regularization_loss=regularization_loss)
if FLAGS.compile:
train_one_iteration = tf.function(train_one_iteration)
record_every_n_steps = min(5, FLAGS.num_iterations)
save_ckpt_every_n_steps = min(500, FLAGS.num_iterations)
with summary_writer.as_default():
with tf.summary.record_if(global_step % record_every_n_steps == 0):
start = time.time()
for inputs in dataset:
if global_step >= FLAGS.num_iterations:
break
model_losses = train_one_iteration(inputs)
duration = time.time() - start
start = time.time()
for name, loss in model_losses.items():
tf.summary.scalar('train/' + name, loss, step=global_step)
tf.summary.scalar('train/learning_rate', optimizer.lr, step=global_step)
tf.summary.scalar('train/batch_time', duration, step=global_step)
tf.summary.scalar('global_step/sec', 1 / duration, step=global_step)
if global_step % record_every_n_steps == 0:
logging.info('Iter[{}/{}], {:.6f}s/iter, loss: {:.4f}'.format(
global_step.numpy(), FLAGS.num_iterations, duration,
model_losses['total_loss'].numpy()))
# Save checkpoint.
if global_step % save_ckpt_every_n_steps == 0:
utils.assign_moving_average_vars(classifier, ema_classifier,
optimizer)
ckpt_manager.save(checkpoint_number=global_step)
logging.info('Checkpoint saved at step %d.', global_step.numpy())
| 37.760618 | 80 | 0.720143 | 1,251 | 9,780 | 5.353317 | 0.258193 | 0.031208 | 0.021502 | 0.007765 | 0.122742 | 0.05271 | 0.031059 | 0.008959 | 0 | 0 | 0 | 0.011641 | 0.191922 | 9,780 | 258 | 81 | 37.906977 | 0.835759 | 0.129346 | 0 | 0.04023 | 0 | 0 | 0.176199 | 0.029011 | 0 | 0 | 0 | 0 | 0 | 1 | 0.011494 | false | 0 | 0.068966 | 0 | 0.086207 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64c210806c5cc219f6c4e3fa272f46558ce9bbfc | 1,216 | py | Python | misc/get_magnitude.py | cinjon/ml-capsules-inverted-attention-routing | 978b0f58eba1007bcef0b6cb045f3d2040f76a31 | [
"AML"
] | null | null | null | misc/get_magnitude.py | cinjon/ml-capsules-inverted-attention-routing | 978b0f58eba1007bcef0b6cb045f3d2040f76a31 | [
"AML"
] | null | null | null | misc/get_magnitude.py | cinjon/ml-capsules-inverted-attention-routing | 978b0f58eba1007bcef0b6cb045f3d2040f76a31 | [
"AML"
] | null | null | null | import os
import json
import argparse
import numpy as np
from glob import glob
from multiprocessing import Pool
parser = argparse.ArgumentParser()
parser.add_argument(
"--root",
default="/misc/kcgscratch1/ChoGroup/resnick/spaceofmotion/flows",
type=str)
parser.add_argument(
"--store_dir",
default="/misc/kcgscratch1/ChoGroup/resnick/spaceofmotion",
type=str)
args = parser.parse_args()
def f(folder_path):
print(os.path.basename(folder_path[:-1]))
magnitude_list = []
paths = sorted(glob(os.path.join(folder_path, "*.npy")))
for path in paths:
flow = np.load(path)
u = flow[:, :, 0]
v = flow[:, :, 1]
# rad = np.sqrt(np.square(u) + np.square(v))
# rad_max = np.max(rad)
# epsilon = 1e-5
# u = u / (rad_max + epsilon)
# v = v / (rad_max + epsilon)
rad = np.sqrt(np.square(u) + np.square(v))
magnitude_list.append(np.average(rad))
return [os.path.basename(folder_path[:-1]), np.array(magnitude_list)]
if __name__ == "__main__":
p = Pool(30)
magnitude = p.map(f, sorted(glob(os.path.join(args.root, "*/"))))
np.save(os.path.join(args.store_dir, "new_magnitude.npy"), magnitude)
| 29.658537 | 73 | 0.629934 | 169 | 1,216 | 4.390533 | 0.402367 | 0.040431 | 0.040431 | 0.080863 | 0.328841 | 0.274933 | 0.072776 | 0.072776 | 0.072776 | 0 | 0 | 0.010384 | 0.208059 | 1,216 | 40 | 74 | 30.4 | 0.760125 | 0.11102 | 0 | 0.129032 | 0 | 0 | 0.140465 | 0.094884 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032258 | false | 0 | 0.193548 | 0 | 0.258065 | 0.032258 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64c2188244333a65c7be325fa85c1fffbc0b6a2e | 1,171 | py | Python | libs/core/setup.py | ggsdc/corn | 4c17c46a70f95b8882bcb6a55ef7daa1f69e0456 | [
"MIT"
] | 2 | 2020-07-09T20:58:47.000Z | 2020-07-20T20:40:46.000Z | libs/core/setup.py | baobabsoluciones/cornflow | bd7cae22107e5fe148704d5f41d4f58f9c410b40 | [
"Apache-2.0"
] | 2 | 2022-03-31T08:42:10.000Z | 2022-03-31T12:05:23.000Z | libs/core/setup.py | ggsdc/corn | 4c17c46a70f95b8882bcb6a55ef7daa1f69e0456 | [
"MIT"
] | null | null | null | import setuptools
with open("README.rst") as fh:
long_description = fh.read()
required = []
with open("requirements.txt", "r") as fh:
required.append(fh.read().splitlines())
setuptools.setup(
name="cornflow-core",
version="0.0.3a11",
author="baobab soluciones",
author_email="sistemas@baobabsoluciones.es",
description="REST API flask backend components used by cornflow and other REST APIs",
long_description=long_description,
long_description_content_type="text/x-rst",
url="https://github.com/baobabsoluciones/cornflow",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 2 - Pre-Alpha",
],
python_requires=">=3.7",
include_package_data=True,
install_requires=required,
entry_points={
"console_scripts": [
"generate_from_schema = cornflow_core.cli.generate_from_schema:generate_from_schema",
"schema_from_models = cornflow_core.cli.schema_from_models:schema_from_models",
]
},
)
| 32.527778 | 97 | 0.680615 | 134 | 1,171 | 5.738806 | 0.634328 | 0.078023 | 0.070221 | 0.078023 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009544 | 0.194705 | 1,171 | 35 | 98 | 33.457143 | 0.805938 | 0 | 0 | 0 | 0 | 0 | 0.458582 | 0.121264 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.03125 | 0 | 0.03125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64c313cfd9b762378f88f2ab75f1f1485ab4f677 | 17,890 | py | Python | quex/output/analyzer/lexeme_converter.py | smmckay/quex-mirror | 7d75ed560e9f3a591935e59243188676eecb112a | [
"MIT"
] | null | null | null | quex/output/analyzer/lexeme_converter.py | smmckay/quex-mirror | 7d75ed560e9f3a591935e59243188676eecb112a | [
"MIT"
] | null | null | null | quex/output/analyzer/lexeme_converter.py | smmckay/quex-mirror | 7d75ed560e9f3a591935e59243188676eecb112a | [
"MIT"
] | null | null | null | """PURPOSE: Converters: Lexemes towards encodings UTF8/UTF16/UTF32.
During exical analysis matches delivers lexemes in the encoding of the buffer.
The functions develops converters of those lexemes to standard encodings, so
that they can easily be reflected.
Let 'Character' be a character in the buffer's encoding, 'Unicode' its
correspondance in UCS, and 'Code Sequence' be 'Unicode'-s representation
in the target encoding (be it UTF8, UTF16, or UTF32). Then the process
of conversion of a 'Character' to the target encoding can be described
by two steps
(1) Unicode = Character +/- offset.
(2) Code Sequence = f(Unicode)
Where the range of 'Character' is split into contigous regions where 'offset'
and the 'f(Unicode)' is the same. Thus, the character conversion is preceeded
by a search of the range in which it belong.
(C) 2006-2017 Frank-Rene Schaefer
"""
import os
import sys
sys.path.append(os.environ["QUEX_PATH"])
from quex.engine.misc.string_handling import blue_print
from quex.engine.misc.interval_handling import Interval
from quex.engine.misc.tools import typed
import quex.engine.misc.error as error
from quex.engine.state_machine.transformation.state_split import EncodingTrafoBySplit
import quex.output.core.state.transition_map.core as transition_map
from quex.blackboard import setup as Setup, \
Lng
from quex.constants import INTEGER_MAX
from operator import attrgetter
from copy import copy
def do():
"""RETURNS: list of (content, file_name)
where 'content' is the content to be written into 'file_name'.
"""
if not Setup.converter_only_f:
source_name = "lexeme"
else:
if Setup.converter_source_name: source_name = Setup.converter_source_name
else: source_name = Setup.buffer_encoding.name
header_txt = Lng.template_converter_header()
implementation_txt = blue_print(Lng.template_converter_implementation(), [
("$$CONVERTER_HEADER$$", Lng.file_name_converter_header(source_name)),
("$$CHARACTER_CONVERTERS$$", _character_converters()),
("$$STRING_CONVERTERS$$", _string_converters())])
if Setup.converter_only_f:
implementation_txt = implementation_txt.replace("QUEX_TYPE_LEXATOM",
Setup.lexatom.type)
implementation_txt = implementation_txt.replace("QUEX_INLINE",
Lng.INLINE)
implementation_txt = Lng.Match_QUEX_NAME_lexeme.sub("QUEX_NAME(%s_" % source_name,
implementation_txt)
header_txt = header_txt.replace("QUEX_TYPE_LEXATOM",
Setup.lexatom.type)
header_txt = header_txt.replace("QUEX_INLINE",
Lng.INLINE)
header_txt = Lng.Match_QUEX_NAME_lexeme.sub("QUEX_NAME(%s_" % source_name,
header_txt)
return [
(header_txt, Lng.file_name_converter_header(source_name)),
(implementation_txt, Lng.file_name_converter_implementation(source_name)),
]
def _character_converters():
if isinstance(Setup.buffer_encoding, EncodingTrafoBySplit):
encoding_name = Lng.SAFE_IDENTIFIER(Setup.adapted_encoding_name())
return Lng.template_converter_character_functions_standard(encoding_name)
else:
return _table_character_converters(Setup.buffer_encoding)
def _string_converters():
drain_encoding_list = [
("utf8", "uint8_t", 4),
("utf16", "uint16_t", 2),
("utf32", "uint32_t", 1),
("char", "char", 4),
("pretty_char", "char", 4),
("wchar_t", "wchar_t", 4)
]
string_template = Lng.template_converter_string_functions()
def prepare(template, name, code_unit_type, max_code_unit_n):
result = blue_print(string_template, [
["$$DRAIN_ENCODING$$", name],
["$$DRAIN_CODE_UNIT_TYPE$$", code_unit_type],
["$$DRAIN_ENCODING_MAX_CODE_UNIT_N$$", str(max_code_unit_n)],
])
if name == "wchar_t":
result = "\n".join([
"#if ! defined(QUEX_OPTION_WCHAR_T_DISABLED_EXT)",
result,
"#endif"
])
return result
txt = [
prepare(string_template, name, code_unit_type, max_code_unit_n)
for name, code_unit_type, max_code_unit_n in drain_encoding_list
]
return "\n".join(txt)
def _table_character_converters(unicode_trafo_info):
"""
PURPOSE: Writes converters for conversion towards UTF8/UTF16/UCS2/UCS4.
UnicodeTrafoInfo:
Provides the information about the relation of character codes in a particular
coding to unicode character codes:
# Codec Values Unicode Values
[
(Source0_Begin, Source0_End, TargetInterval0_Begin),
(Source1_Begin, Source1_End, TargetInterval1_Begin),
(Source2_Begin, Source2_End, TargetInterval2_Begin),
...
]
"""
encoding_name = Lng.SAFE_IDENTIFIER(unicode_trafo_info.name)
if encoding_name in ("utf32", "unicode"):
source_interval_begin = 0
lexatom_size_in_byte = Setup.lexatom.size_in_byte
if lexatom_size_in_byte == -1: lexatom_size_in_byte = 4
source_interval_end = min(256**lexatom_size_in_byte, 0x200000)
target_interval_begin = 0
unicode_trafo_info = [
(source_interval_begin, source_interval_end, target_interval_begin)
]
utf8_function_body = ConverterWriterUTF8().do(unicode_trafo_info)
utf16_function_body = ConverterWriterUTF16().do(unicode_trafo_info)
utf32_function_body = ConverterWriterUTF32().do(unicode_trafo_info)
return blue_print(Lng.template_converter_character_functions(), [
["$$BODY_UTF8$$", utf8_function_body],
["$$BODY_UTF16$$", utf16_function_body],
["$$BODY_UTF32$$", utf32_function_body]])
class ConversionInfo:
"""A given interval in the character encoding corresponds to a certain byte
formatting range in the target encoding, where all bytes are formatted
the same way.
-- The codec interval is determined by:
.codec_interval_begin
.codec_interval_size
-- The byte formatting range is determined by its index.
.code_unit_n
-- In order to know where to start, the unicode offset that corresponds
to the codec interval must be specified:
.codec_interval_begin_unicode
Figure:
Source Codec
ci_begin
|
................[xxxxxxxxxxxxxxx]................
|--- ci_size -->|
belongs to
Unicode |<---- byte formatting range ---->|
| |--- ci_size-->| |
...........[+++++++++++++++++++++++++++|xxxxxxxxxxxxxx|++++++][
|
ci_begin_unicode
The codec interval always lies inside a single utf8 range.
"""
def __init__(self, CodeUnitN, CI_Begin_in_Unicode, CI_Begin, CI_Size=-1):
self.code_unit_n = CodeUnitN
self.codec_interval_begin = CI_Begin
self.codec_interval_size = CI_Size
self.codec_interval_begin_unicode = CI_Begin_in_Unicode
def __repr__(self):
return "[%i] at %08X: Codec Interval [%X,%X)" % \
(self.code_unit_n,
self.codec_interval_begin_unicode,
self.codec_interval_begin,
self.codec_interval_begin + self.codec_interval_size)
class ConverterWriter:
def __init__(self):
self.code_unit_n_occurrence_set = set([])
def do(self, UnicodeTrafoInfo, ProvidedConversionInfoF=False):
"""Creates code for a conversion to target encoding according to the conversion_table.
"""
# 'ProvidedConversionTableF' is only to be used for Unit Tests
if ProvidedConversionInfoF: conversion_table = UnicodeTrafoInfo
else: conversion_table = self.get_conversion_table(UnicodeTrafoInfo)
assert all(isinstance(entry, ConversionInfo) for entry in conversion_table)
# Make sure that the conversion table is sorted
conversion_table.sort(key=attrgetter("codec_interval_begin"))
def action(ci):
return "{ %s %s }" % \
(self.get_offset_code(ci),
self.jump_to_output_formatter(ci.code_unit_n))
if len(conversion_table) == 1:
ci = conversion_table[0]
txt = [ " %s" % self.get_offset_code(ci) ]
txt.extend(self.unicode_to_output(ci.code_unit_n))
else:
tm = [
(Interval(ci.codec_interval_begin, ci.codec_interval_begin + ci.codec_interval_size),
action(ci))
for ci in conversion_table
]
txt = []
transition_map.do(txt, tm, AssertBorderF=False)
txt.append(self.unicode_to_output_all_ranges())
return "\n".join(txt)
@typed(Info=ConversionInfo)
def get_offset_code(self, Info):
"""RETURNS: Code to implement code conversion to UNICODE by adding or
subtracting an offset.
"""
offset = Info.codec_interval_begin_unicode - Info.codec_interval_begin
return "%s" % Lng.ASSIGN("offset", "(int32_t)(%s)" % offset)
def get_conversion_table(self, UnicodeTrafoInfo):
"""The UnicodeTrafoInfo tells what ranges in the codec are mapped to
what ranges in unicode. The codec (e.g. UTF8/UTF16) has ranges of
different byte formatting.
This function identifies ranges in the codec that:
(1) map linearly to unicode
(2) belong to the same byte format range.
The result is a list of objects that identify those ranges in the codec
and their relation to unicode. See definition of class ConversionInfo
for a detailed description and a nice picture.
"""
trafo_info = copy(UnicodeTrafoInfo)
border_list = self.get_byte_format_range_border_list()
L = len(border_list)
# Sort transform info database according to target range
result = []
trafo_info.sort(lambda a, b: cmp(a[2], b[2]))
# Unicode Transformation Info -- A list of the following:
for source_interval_begin, source_interval_end, target_interval_begin in trafo_info:
# How does the target interval has to be split according to utf8-ranges?
begin_i = 0
while source_interval_begin >= border_list[begin_i]:
begin_i += 1
begin_i -= 1
# 'i' now stands on the first utf8_range that touches the source interval
info = ConversionInfo(begin_i+1, source_interval_begin, target_interval_begin)
# NOTE: size of target interval = size of source interval
remaining_size = source_interval_end - source_interval_begin
for i in range(begin_i, L-1):
remaining_utf8_range_size = border_list[i+1] - source_interval_begin
if remaining_utf8_range_size <= 0: break
info.codec_interval_size = min(remaining_utf8_range_size, remaining_size)
## print i, "%X: %x, %x" % (border_list[i+1], remaining_utf8_range_size, remaining_size)
result.append(info)
source_interval_begin = border_list[i+1]
target_interval_begin += info.codec_interval_size
remaining_size -= info.codec_interval_size
i += 1
if remaining_size <= 0: break
info = ConversionInfo(i+1, source_interval_begin, target_interval_begin)
if remaining_size != 0:
info.codec_interval_size = remaining_size
result.append(info)
result.sort(key=attrgetter("codec_interval_begin"))
return result
def jump_to_output_formatter(self, CodeUnitN):
assert CodeUnitN >= 1
assert CodeUnitN <= 4
self.code_unit_n_occurrence_set.add(CodeUnitN)
return Lng.GOTO_STRING("code_unit_n_%i" % CodeUnitN)
def unicode_to_output_all_ranges(self):
if max(self.code_unit_n_occurrence_set) > self.max_code_unit_n():
error.note("The lexer's functionality and robustness IS NOT affected by the following:\n"
"Optionally provided helper functions for conversion of lexemes towards\n"
"UTF8, UTF16 and UTF32 malfunction in case of usage beyond unicode.\n")
txt = []
for code_unit_n in sorted(self.code_unit_n_occurrence_set):
txt.append(Lng.LABEL_PLAIN("code_unit_n_%i" % code_unit_n))
txt.extend(self.unicode_to_output(code_unit_n))
return "\n".join(txt)
def unicode_to_output(self, CodeUnitN):
txt = [ Lng.ASSIGN("unicode", "(uint32_t)(%s)" % Lng.OP("(int32_t)input", "+", "offset")) ]
txt.extend(self.get_output_formatter(CodeUnitN))
txt.append(Lng.PURE_RETURN)
return [ " %s" % line for line in txt ]
class ConverterWriterUTF8(ConverterWriter):
def max_code_unit_n(self):
return 4
def get_output_formatter(self, CodeUnitN):
last_but_two = Lng.OP("0x80", "|",
"(%s)" % Lng.OP("(%s)" % Lng.OP("unicode", "&", "(uint32_t)0x3FFFF"),
">>", "12"))
last_but_one = Lng.OP("0x80", "|",
"(%s)" % Lng.OP("(%s)" % Lng.OP("unicode", "&", "(uint32_t)0xFFF"),
">>", "6"))
last = Lng.OP("0x80", "|",
"(%s)" % Lng.OP("unicode", "&", "(uint32_t)0x3F"))
rvalue_list = {
1: [
"unicode",
],
2: [
Lng.OP("0xC0", "|", "(%s)" % Lng.OP("unicode", ">>", "6")),
last,
],
3: [
Lng.OP("0xE0", "|", "(%s)" % Lng.OP("unicode", ">>", "12")),
last_but_one,
last,
],
4: [
Lng.OP("0xF0", "|", "(%s)" % Lng.OP("unicode", ">>", "18")),
last_but_two,
last_but_one,
last,
]
}[CodeUnitN]
return [
"%s" % Lng.INCREMENT_ITERATOR_THEN_ASSIGN("*output_pp", "(uint8_t)(%s)" % rvalue)
for rvalue in rvalue_list
]
def get_byte_format_range_border_list(self):
"""UTF8 covers the following regions with the corresponding numbers of bytes:
0x00000000 - 0x0000007F: 1 byte - 0xxxxxxx
0x00000080 - 0x000007FF: 2 bytes - 110xxxxx 10xxxxxx
0x00000800 - 0x0000FFFF: 3 bytes - 1110xxxx 10xxxxxx 10xxxxxx
0x00010000 - 0x001FFFFF: 4 bytes - 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
0x00200000 - 0x03FFFFFF: 5 bytes ... (not for unicode)
0x04000000 - 0x7FFFFFFF:
The range borders are, therefore, as mentioned in the return value.
"""
return [ 0x0, 0x00000080, 0x00000800, 0x00010000, 0x00200000, 0x04000000, 0x80000000, INTEGER_MAX]
class ConverterWriterUTF16(ConverterWriter):
def max_code_unit_n(self):
return 2
def get_output_formatter(self, CodeUnitN):
UnicodeMinus0x10000 = "(%s)" % Lng.OP("unicode", "-", "0x10000")
Offset_10bit_high = "(uint16_t)(%s)" % Lng.OP(UnicodeMinus0x10000, ">>", 10)
Offset_10bit_low = "(uint16_t)(%s)" % Lng.OP(UnicodeMinus0x10000, "&", "0x3FF")
return {
1: [
Lng.INCREMENT_ITERATOR_THEN_ASSIGN("*output_pp", "(uint16_t)(unicode)"),
],
2: [
Lng.INCREMENT_ITERATOR_THEN_ASSIGN("*output_pp",
"(uint16_t)(%s)" % Lng.OP("0xD800", "|" , Offset_10bit_high)),
Lng.INCREMENT_ITERATOR_THEN_ASSIGN("*output_pp",
"(uint16_t)(%s)" % Lng.OP("0xDC00", "|" , Offset_10bit_low)),
]
}[CodeUnitN]
def get_byte_format_range_border_list(self):
"""UCS4 covers the whole range of unicode (extend 0x10FFFF to INTEGER_MAX to be nice)."""
return [ 0x0, 0x10000, INTEGER_MAX]
class ConverterWriterUTF32(ConverterWriter):
def max_code_unit_n(self):
return 1
def get_output_formatter(self, CodeUnitN):
return {
1: [ Lng.INCREMENT_ITERATOR_THEN_ASSIGN("*output_pp", "unicode") ]
}[CodeUnitN]
def get_byte_format_range_border_list(self):
"""UCS4 covers the whole range of unicode (extend 0x10FFFF to INTEGER_MAX to be nice)."""
return [ 0x0, INTEGER_MAX]
| 41.896956 | 110 | 0.578927 | 1,968 | 17,890 | 4.991362 | 0.192581 | 0.022804 | 0.021073 | 0.010995 | 0.268553 | 0.220605 | 0.13784 | 0.114833 | 0.079711 | 0.051308 | 0 | 0.03498 | 0.325657 | 17,890 | 426 | 111 | 41.995305 | 0.779261 | 0.253102 | 0 | 0.194444 | 0 | 0 | 0.093684 | 0.011214 | 0 | 0 | 0.011526 | 0 | 0.015873 | 1 | 0.095238 | false | 0 | 0.047619 | 0.02381 | 0.253968 | 0.015873 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64c6ded8ce35837ed8c633aa32ede13e3f7ab314 | 9,500 | py | Python | code/reasoningtool/QueryBioLink.py | dkoslicki/NCATS | 5d8e38d5868d830d8f8c215b8c649e3979ca03fb | [
"MIT"
] | 2 | 2018-02-26T19:16:26.000Z | 2020-12-16T03:31:58.000Z | code/reasoningtool/QueryBioLink.py | dkoslicki/NCATS | 5d8e38d5868d830d8f8c215b8c649e3979ca03fb | [
"MIT"
] | 17 | 2017-12-09T01:13:20.000Z | 2018-03-06T02:17:32.000Z | code/reasoningtool/QueryBioLink.py | dkoslicki/NCATS | 5d8e38d5868d830d8f8c215b8c649e3979ca03fb | [
"MIT"
] | null | null | null | ''' This module defines the class QueryBioLink. QueryBioLink class is designed
to communicate with Monarch APIs and their corresponding data sources. The
available methods include:
* query phenotype for disease
* query disease for gene
* query gene for disease
* query phenotype for gene
* query gene for pathway
* query label for disease
* query label for phenotype
* query anatomy for gene
* query gene for anatomy
* query anatomy for phenotype
'''
__author__ = 'Zheng Liu'
__copyright__ = 'Oregon State University'
__credits__ = ['Zheng Liu', 'Stephen Ramsey', 'Yao Yao']
__license__ = 'MIT'
__version__ = '0.1.0'
__maintainer__ = ''
__email__ = ''
__status__ = 'Prototype'
import requests
import sys
class QueryBioLink:
TIMEOUT_SEC = 120
API_BASE_URL = 'https://api.monarchinitiative.org/api/bioentity'
HANDLER_MAP = {
'get_phenotypes_for_disease': 'disease/{disease_id}/phenotypes',
'get_diseases_for_gene': 'gene/{gene_id}/diseases',
'get_genes_for_disease': 'disease/{disease_id}/genes',
'get_phenotypes_for_gene': 'gene/{gene_id}/phenotypes?exclude_automatic_assertions=true&unselect_evidence=true',
'get_genes_for_pathway': 'pathway/{pathway_id}/genes&unselect_evidence=true',
'get_label_for_disease': 'disease/{disease_id}',
'get_label_for_phenotype': 'phenotype/{phenotype_id}',
'get_anatomies_for_gene': 'gene/{gene_id}/expression/anatomy',
'get_genes_for_anatomy': 'anatomy/{anatomy_id}/genes',
'get_anatomies_for_phenotype': 'phenotype/{phenotype_id}/anatomy',
'get_synonyms_for_disease': '{disease_id}/associations'
}
@staticmethod
def __access_api(handler):
url = QueryBioLink.API_BASE_URL + '/' + handler
try:
res = requests.get(url,
timeout=QueryBioLink.TIMEOUT_SEC)
except requests.exceptions.Timeout:
print(url, file=sys.stderr)
print('Timeout in QueryBioLink for URL: ' + url, file=sys.stderr)
return None
status_code = res.status_code
if status_code != 200:
print(url, file=sys.stderr)
print('Status code ' + str(status_code) + ' for url: ' + url, file=sys.stderr)
return None
return res.json()
@staticmethod
def get_label_for_disease(disease_id):
handler = QueryBioLink.HANDLER_MAP['get_label_for_disease'].format(disease_id=disease_id)
results = QueryBioLink.__access_api(handler)
result_str = 'UNKNOWN'
if results is not None:
result_str = results['label']
return result_str
@staticmethod
def get_phenotypes_for_disease_desc(disease_id):
handler = QueryBioLink.HANDLER_MAP['get_phenotypes_for_disease'].format(disease_id=disease_id)
results = QueryBioLink.__access_api(handler)
ret_dict = dict()
if results is None:
return ret_dict
res_list = results['objects']
if len(res_list) > 200:
print('Number of phenotypes found for disease: ' + disease_id + ' is: ' + str(len(res_list)), file=sys.stderr)
for phenotype_id_str in res_list:
phenotype_label_str = QueryBioLink.get_label_for_phenotype(phenotype_id_str)
ret_dict[phenotype_id_str] = phenotype_label_str
return ret_dict
@staticmethod
def get_diseases_for_gene_desc(gene_id):
'''for a given NCBI Entrez Gene ID, returns a ``set`` of DOI disease identifiers for the gene
:returns: a ``set`` containing ``str`` disease ontology identifiers
'''
handler = QueryBioLink.HANDLER_MAP['get_diseases_for_gene'].format(gene_id=gene_id)
results = QueryBioLink.__access_api(handler)
ret_data = dict()
if results is None:
return ret_data
ret_list = results['objects']
if len(ret_list) > 200:
print('Number of diseases found for gene ' + gene_id + ' is: ' + str(len(ret_list)), file=sys.stderr)
for disease_id in ret_list:
if 'DOID:' in disease_id or 'OMIM:' in disease_id:
ret_data[disease_id] = QueryBioLink.get_label_for_disease(disease_id)
return ret_data
@staticmethod
def get_genes_for_disease_desc(disease_id):
handler = QueryBioLink.HANDLER_MAP['get_genes_for_disease'].format(disease_id=disease_id)
results = QueryBioLink.__access_api(handler)
ret_list = []
if results is None:
return ret_list
ret_list = results['objects']
if len(ret_list) > 100:
print('number of genes found for disease ' + disease_id + ' is: ' + str(len(ret_list)), file=sys.stderr)
return ret_list
@staticmethod
def get_label_for_phenotype(phenotype_id_str):
handler = QueryBioLink.HANDLER_MAP['get_label_for_phenotype'].format(phenotype_id=phenotype_id_str)
results = QueryBioLink.__access_api(handler)
result_str = 'UNKNOWN'
if results is not None:
result_str = results['label']
return result_str
@staticmethod
def get_phenotypes_for_gene(gene_id):
handler = QueryBioLink.HANDLER_MAP['get_phenotypes_for_gene'].format(gene_id=gene_id)
results = QueryBioLink.__access_api(handler)
ret_list = []
if results is None:
return ret_list
ret_list = results['objects']
if len(ret_list) > 200:
print('Warning, got ' + str(len(ret_list)) + ' phenotypes for gene ' + gene_id, file=sys.stderr)
return ret_list
@staticmethod
def get_phenotypes_for_gene_desc(ncbi_entrez_gene_id):
phenotype_id_set = QueryBioLink.get_phenotypes_for_gene(ncbi_entrez_gene_id)
ret_dict = dict()
for phenotype_id_str in phenotype_id_set:
phenotype_label_str = QueryBioLink.get_label_for_phenotype(phenotype_id_str)
if 'HP:' in phenotype_id_str:
ret_dict[phenotype_id_str] = phenotype_label_str
return ret_dict
@staticmethod
def get_anatomies_for_gene(gene_id):
'''for a given NCBI Entrez Gene ID, returns a ``dict`` of Anatomy IDs and labels for the gene
:returns: a ``dict`` of <anatomy_ID, label>
'''
handler = QueryBioLink.HANDLER_MAP['get_anatomies_for_gene'].format(gene_id=gene_id)
results = QueryBioLink.__access_api(handler)
ret_dict = dict()
if results is None:
return ret_dict
res_dict = results['associations']
ret_dict = dict(map(lambda r: (r['object']['id'], r['object']['label']), res_dict))
if len(ret_dict) > 200:
print('Warning, got {} anatomies for gene {}'.format(len(ret_dict), gene_id), file=sys.stderr)
return ret_dict
@staticmethod
def get_genes_for_anatomy(anatomy_id):
'''for a given Anatomy ID, returns a ``list`` of Gene ID for the anatomy
:returns: a ``list`` of gene ID
'''
handler = QueryBioLink.HANDLER_MAP['get_genes_for_anatomy'].format(anatomy_id=anatomy_id)
results = QueryBioLink.__access_api(handler)
ret_list = []
if results is None:
return ret_list
res_dict = results['associations']
ret_list = list(map(lambda r: r['subject']['id'], res_dict))
if len(ret_list) > 200:
print('Warning, got {} genes for anatomy {}'.format(len(ret_list), anatomy_id), file=sys.stderr)
return ret_list
@staticmethod
def get_anatomies_for_phenotype(phenotype_id):
'''for a given phenotype ID, returns a ``dict`` of Anatomy IDs and labels for the phenotype
:returns: a ``dict`` of <anatomy_ID, label>
'''
handler = QueryBioLink.HANDLER_MAP['get_anatomies_for_phenotype'].format(phenotype_id=phenotype_id)
results = QueryBioLink.__access_api(handler)
ret_dict = dict()
if results is None:
return ret_dict
ret_dict = dict(map(lambda r: (r['id'], r['label']), results))
if len(ret_dict) > 200:
print('Warning, got {} anatomies for phenotype {}'.format(len(ret_dict), phenotype_id), file=sys.stderr)
return ret_dict
if __name__ == '__main__':
print(QueryBioLink.get_phenotypes_for_disease_desc('OMIM:605543'))
print(QueryBioLink.get_genes_for_disease_desc('OMIM:XXXXXX'))
print(QueryBioLink.get_genes_for_disease_desc('OMIM:605543'))
print(QueryBioLink.get_phenotypes_for_gene_desc('NCBIGene:1080')) # test for issue #22
print(QueryBioLink.get_diseases_for_gene_desc('NCBIGene:407053'))
print(QueryBioLink.get_diseases_for_gene_desc('NCBIGene:100048912'))
print(QueryBioLink.get_phenotypes_for_gene_desc('NCBIGene:4750'))
print(QueryBioLink.get_phenotypes_for_gene('NCBIGene:4750'))
print(QueryBioLink.get_diseases_for_gene_desc('NCBIGene:4750'))
print(QueryBioLink.get_diseases_for_gene_desc('NCBIGene:1111111'))
print(QueryBioLink.get_label_for_disease('DOID:1498'))
print(QueryBioLink.get_label_for_disease('OMIM:605543'))
print(QueryBioLink.get_label_for_phenotype('HP:0000003'))
print(QueryBioLink.get_anatomies_for_gene('NCBIGene:407053'))
print(QueryBioLink.get_genes_for_anatomy('UBERON:0000006'))
print(QueryBioLink.get_anatomies_for_phenotype('HP:0000003'))
| 39.915966 | 124 | 0.666211 | 1,191 | 9,500 | 4.97649 | 0.126784 | 0.029526 | 0.05399 | 0.044036 | 0.714864 | 0.55863 | 0.509701 | 0.453518 | 0.353805 | 0.338451 | 0 | 0.016346 | 0.233684 | 9,500 | 237 | 125 | 40.084388 | 0.797802 | 0.108842 | 0 | 0.443787 | 0 | 0 | 0.192974 | 0.096308 | 0 | 0 | 0 | 0 | 0.005917 | 1 | 0.065089 | false | 0 | 0.011834 | 0 | 0.218935 | 0.159763 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64c72ead265362a6fd2cf5c37958f0fa44622e3f | 7,319 | py | Python | tb_auto_publsh/publish_product.py | sinalma/taobao_tools | 222800ccd808ef4bbbcbcffc6b3f4fde133d8685 | [
"MIT"
] | null | null | null | tb_auto_publsh/publish_product.py | sinalma/taobao_tools | 222800ccd808ef4bbbcbcffc6b3f4fde133d8685 | [
"MIT"
] | null | null | null | tb_auto_publsh/publish_product.py | sinalma/taobao_tools | 222800ccd808ef4bbbcbcffc6b3f4fde133d8685 | [
"MIT"
] | null | null | null | import os
from selenium import webdriver
driver = webdriver.Chrome("D:/Programming/python/chromedriver.exe")
driver.maximize_window()
from os import path
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import UnexpectedAlertPresentException
import time,unittest, re
from selenium.webdriver.common.keys import Keys
sleepTime = 0.1
product = {}
param = {}
def loadData():
global product,param
f = open(r"F:/Python/Code/tb_auto_publsh/product.txt",encoding='UTF-8')
tmpProduct = f.readline()
product = convertToDict(tmpProduct)
f = open(r"F:/Python/Code/tb_auto_publsh/param.txt",encoding='UTF-8')
tmpParam = f.readline()
param = convertToDict(tmpParam)
# string convert to dictionary
def convertToDict(str):
str = str.replace("{","").replace("}","")
str = str.split(',')
tmpDict = {}
for idx1 in range(0,len(str)):
keyValue = str[idx1].replace("'","").split(':')
tmpDict[keyValue[0]] = keyValue[1]
return tmpDict
def loginWithScan():
driver.get("https://www.taobao.com/")
time.sleep(2)
driver.find_element_by_link_text(param['loginTrigger']).click()
time.sleep(6)
def inputDelay(obj,value):
for idx in xrange(0,len(value)):
param.find_element_by_tag_name('input').send_keys(value[idx])
time.sleep(0.1)
def choiceParam(id,value):
excitation = driver.find_element_by_id(id)
excitation.find_element_by_class_name('content').click()
driver.find_element_by_xpath("//*[@title='"+value+"']").click()
if excitation.find_element_by_link_text(value):
return
else:
choiceParam(id,value)
time.sleep(1)
# counting
maxCount = 5
def writeParam(id,value):
global maxCount
maxCount -= 1
if maxCount < 0 :
return
param = driver.find_element_by_id(id)
for idx in range(0,len(value)):
param.find_element_by_tag_name('input').send_keys(value[idx])
time.sleep(0.2)
time.sleep(1)
text = param.find_element_by_tag_name('input').get_attribute('value')
if len(text) <= 0 or text != value:
param.find_element_by_tag_name('input').clear()
writeParam(id,value)
time.sleep(1)
else:
maxCount = 5
def writeDoubleParam(id,value1,value2):
size = driver.find_element_by_id(id)
size_xy = size.find_elements_by_class_name('sell-o-measurement-operand')
size_x_input = size_xy[0].find_element_by_tag_name('input')
size_x_input.send_keys(value1)
size_y_input = size_xy[1].find_element_by_tag_name('input')
size_y_input.send_keys(value2)
def setCatogory():
driver.get('https://upload.taobao.com/auction/sell.jhtml?spm=a313o.201708ban.category.d48.64f0197aLZBDbE&mytmenu=wym')
time.sleep(1)
# choice main category
driver.find_element_by_id('J_SearchKeyWord').send_keys(param['createCategory'])
time.sleep(1)
driver.find_element_by_id('J_SearchButton').click()
time.sleep(1)
driver.find_element_by_id('J_CatePubBtn').click()
time.sleep(1)
def publishProd():
setCatogory()
# set title
driver.find_element_by_id('title').send_keys(product['title'])
oriPlace = driver.find_element_by_id('struct-globalStock')
oriPlace.find_element_by_xpath("//input[@aria-checked='true']").send_keys(Keys.SPACE)
oriPlace.find_element_by_xpath("//input[@aria-checked='false']").click()
# .find_element_by_tag_name('input').click()
# mods = oriPlace.find_elements_by_class_name('tabNest-radio-info')
# mods[1].find_element_by_class_name('next-radio').find_element_by_tag_name('input').click()
# next-radio-inner press
# radios = oriPlace.find_element_by_class_name('info-content').find_element_by_class_name('next-radio-inner').click()
# oriPlaceRadios = oriPlace.find_elements_by_class_name('tabNest-radio-info')
# oriPlaceRadios[1].find_element_by_tag_name('input').click()
# oriPlaceRadios[1].find_element_by_link_text(product['originPlace']).send_keys(keys.space)
# oriPlace.find_element_by_link_text(product['originPlace']).click()
# oriPlaceRadios[1].find_element_by_link_text(product['originPlace']).click()
# set left module param
writeParam('struct-p-20000',product['brand'])
writeDoubleParam('struct-p-148060595',product['sizeX'],product['sizeY'])
writeParam('struct-p-10016',product['model'])
writeParam('struct-p-29112',product['installMethod'])
writeParam('struct-p-192254056',product['temperature'])
writeParam('struct-p-186826808',product['lineLength'])
writeParam('struct-p-191164129',product['encodeType'])
writeParam('struct-p-195174015',product['rotation'])
# set right module param
# choiceParam('struct-p-195270003',product['axlehead'])
writeParam('struct-p-122216515',product['scene'])
writeParam('struct-p-147908493',product['weight'])
writeParam('struct-p-159198215',product['power'])
writeParam('struct-p-192190064',product['torque'])
writeParam('struct-p-180944594',product['voltage'])
writeParam('struct-p-195206008',product['electric'])
writeParam('struct-p-195206009',product['speed'])
writeParam('struct-p-191164130',product['gear'])
choiceParam('struct-p-159662152',product['protectlevel'])
choiceParam('struct-p-21299',product['place'])
choiceParam('struct-p-192256056',product['excitation'])
def getPage():
driver.get('https://shop70362492.taobao.com/category-1056421148.htm?spm=a1z10.1-c.0.0.19475140cHJ39v&search=y&catName=%B0%B2%B4%A8%CB%C5%B7%FE')
productLines = driver.find_elements_by_class_name('item3line1')
print(productLines)
for idx in range(0,len(productLines)):
products = productLines[idx]
products = products.find_elements_by_class_name('item')
for idx2 in range(0,len(products)):
product = products[idx2]
text = product.find_elements_by_class_name('item-name')
print(text[0].text)
def publishProd_l():
setCatogory()
# set title
driver.find_element_by_id('title').send_keys(product['title'])
# set left module param
choiceParam('struct-p-21299',product['place'])
writeParam('struct-p-20000',product['brand'])
writeDoubleParam('struct-p-148060595',product['sizeX'],product['sizeY'])
choiceParam('struct-p-192256056',product['excitation'])
writeParam('struct-p-10016',product['model'])
writeParam('struct-p-29112',product['installMethod'])
writeParam('struct-p-192254056',product['temperature'])
writeParam('struct-p-186826808',product['lineLength'])
writeParam('struct-p-191164129',product['encodeType'])
writeParam('struct-p-195174015',product['rotation'])
# set right module param
# choiceParam('struct-p-195270003',product['axlehead'])
writeParam('struct-p-147908493',product['weight'])
choiceParam('struct-p-159662152',product['protectlevel'])
writeParam('struct-p-159198215',product['power'])
writeParam('struct-p-192190064',product['torque'])
writeParam('struct-p-180944594',product['voltage'])
writeParam('struct-p-195206008',product['electric'])
writeParam('struct-p-195206009',product['speed'])
writeParam('struct-p-191164130',product['gear'])
loadData()
loginWithScan()
time.sleep(2)
publishProd()
| 35.357488 | 148 | 0.704741 | 933 | 7,319 | 5.353698 | 0.239014 | 0.054655 | 0.078078 | 0.041842 | 0.606607 | 0.564164 | 0.466867 | 0.419419 | 0.378378 | 0.335135 | 0 | 0.067068 | 0.13827 | 7,319 | 206 | 149 | 35.529126 | 0.724909 | 0.134445 | 0 | 0.404255 | 0 | 0.014184 | 0.25 | 0.032161 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078014 | false | 0 | 0.049645 | 0 | 0.148936 | 0.014184 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64cc5aac32b4fc2a5556f4d550730a0acdbd74d3 | 5,781 | py | Python | sailor/assetcentral/workorder.py | gecko17/project-sailor | 7a35eeec2a6a8ec9bc998e39e8ffad4703cec5d7 | [
"Apache-2.0"
] | 19 | 2021-03-02T12:27:48.000Z | 2022-03-31T15:24:41.000Z | sailor/assetcentral/workorder.py | gecko17/project-sailor | 7a35eeec2a6a8ec9bc998e39e8ffad4703cec5d7 | [
"Apache-2.0"
] | 75 | 2021-03-04T16:58:47.000Z | 2022-03-31T08:31:06.000Z | sailor/assetcentral/workorder.py | gecko17/project-sailor | 7a35eeec2a6a8ec9bc998e39e8ffad4703cec5d7 | [
"Apache-2.0"
] | 2 | 2021-11-08T12:45:05.000Z | 2022-02-27T18:42:13.000Z | """
Workorder module can be used to retrieve Workorder information from AssetCentral.
Classes are provided for individual Workorders as well as groups of Workorders (WorkorderSet).
"""
from sailor import _base
from ..utils.timestamps import _string_to_timestamp_parser
from .constants import VIEW_WORKORDERS
from .utils import (AssetcentralEntity, _AssetcentralField, AssetcentralEntitySet,
_ac_application_url, _ac_fetch_data)
_WORKORDER_FIELDS = [
_AssetcentralField('name', 'internalId'),
_AssetcentralField('type_description', 'workOrderTypeDescription'),
_AssetcentralField('priority_description', 'priorityDescription'),
_AssetcentralField('status_text', 'statusDescription'),
_AssetcentralField('short_description', 'shortDescription'),
_AssetcentralField('equipment_name', 'equipmentName'),
_AssetcentralField('location', 'location'),
_AssetcentralField('plant', 'plant'),
_AssetcentralField('start_date', 'startDate',
query_transformer=_base.masterdata._qt_date),
_AssetcentralField('end_date', 'endDate',
query_transformer=_base.masterdata._qt_date),
_AssetcentralField('long_description', 'longDescription'),
_AssetcentralField('id', 'workOrderID'),
_AssetcentralField('equipment_id', 'equipmentId'),
_AssetcentralField('model_id', 'modelId'),
_AssetcentralField('type', 'workOrderType'),
_AssetcentralField('_status', 'status'),
_AssetcentralField('_priority', 'priority'),
_AssetcentralField('_workcenter', 'workCenter'),
_AssetcentralField('_is_internal', 'isInternal'),
_AssetcentralField('_created_by', 'createdBy'),
_AssetcentralField('_created_on', 'creationDateTime'),
_AssetcentralField('_lastChangedBy', 'lastChangedBy'),
_AssetcentralField('_changed_on', 'lastChangeDateTime'),
_AssetcentralField('_basic_start_date', 'basicStartDate', get_extractor=_string_to_timestamp_parser(unit='ms')),
_AssetcentralField('_basic_end_date', 'basicEndDate', get_extractor=_string_to_timestamp_parser(unit='ms')),
_AssetcentralField('_actual_start_date', 'actualStartDate',
get_extractor=_string_to_timestamp_parser(unit='ms')),
_AssetcentralField('_actual_end_date', 'actualEndDate', get_extractor=_string_to_timestamp_parser(unit='ms')),
_AssetcentralField('_progress_status', 'progressStatus'),
_AssetcentralField('_progress_status_description', 'progressStatusDescription'),
_AssetcentralField('_root_equipment_id', 'rootEquipmentId'),
_AssetcentralField('_root_equipment_name', 'rootEquipmentName'),
_AssetcentralField('_person_responsible', 'personResponsible'),
_AssetcentralField('_location_id', 'locationId'),
_AssetcentralField('_coordinates', 'coordinates'),
_AssetcentralField('_source', 'source'),
_AssetcentralField('_source_id', 'sourceId'),
_AssetcentralField('_operator_id', 'operatorId'),
_AssetcentralField('_is_source_active', 'isSourceActive'),
_AssetcentralField('_asset_core_equipment_id', 'assetCoreEquipmentId'),
_AssetcentralField('_operator', 'operator'),
]
@_base.add_properties
class Workorder(AssetcentralEntity):
"""AssetCentral Workorder Object."""
_field_map = {field.our_name: field for field in _WORKORDER_FIELDS}
class WorkorderSet(AssetcentralEntitySet):
"""Class representing a group of Workorders."""
_element_type = Workorder
_method_defaults = {
'plot_distribution': {
'by': 'equipment_name',
},
}
def find_workorders(*, extended_filters=(), **kwargs) -> WorkorderSet:
"""Fetch Workorders from AssetCentral with the applied filters, return a WorkorderSet.
This method supports the usual filter criteria, i.e.
- Any named keyword arguments applied as equality filters, i.e. the name of the Workorder property is checked
against the value of the keyword argument. If the value of the keyword argument is an iterable (e.g. a list)
then all objects matching any of the values in the iterable are returned.
Parameters
----------
extended_filters
See :ref:`filter`.
**kwargs
See :ref:`filter`.
Examples
--------
Find all Workorders with name 'MyWorkorder'::
find_workorders(name='MyWorkorder')
Find all Workorders which either have the name 'MyWorkorder' or the name 'MyOtherWorkorder'::
find_workorders(name=['MyWorkorder', 'MyOtherWorkorder'])
Find all workorders with very high priority::
find_workorders(priority = 20)
If multiple named arguments are provided then *all* conditions have to match.
Example
-------
Find all workorders with very high priority (20) and has progress status 'pending'(15) ::
find_workorders(priority = 20, progressStatus = 15).
The ``extended_filters`` parameter can be used to specify filters that can not be expressed as an equality. Each
extended_filter needs to be provided as a string, multiple filters can be passed as a list of strings. As above,
all filter criteria need to match. Extended filters can be freely combined with named arguments. Here, too all
filter criteria need to match for a Workorder to be returned.
Example
-------
Find all Workorders with start date higher than 2020-01-01::
find_workorders(extended_filters=['start_date >= "2020-01-01"'])
"""
unbreakable_filters, breakable_filters = \
_base.parse_filter_parameters(kwargs, extended_filters, Workorder._field_map)
endpoint_url = _ac_application_url() + VIEW_WORKORDERS
object_list = _ac_fetch_data(endpoint_url, unbreakable_filters, breakable_filters)
return WorkorderSet([Workorder(obj) for obj in object_list])
| 43.141791 | 116 | 0.733437 | 589 | 5,781 | 6.872666 | 0.36163 | 0.020751 | 0.020998 | 0.028409 | 0.141057 | 0.132411 | 0.104743 | 0.060277 | 0.060277 | 0.031621 | 0 | 0.005371 | 0.162602 | 5,781 | 133 | 117 | 43.466165 | 0.83082 | 0.331431 | 0 | 0.030303 | 0 | 0 | 0.284406 | 0.027202 | 0 | 0 | 0 | 0 | 0 | 1 | 0.015152 | false | 0 | 0.060606 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64cc7a5f5c43f80efef521c2171660053264a4c7 | 796 | py | Python | widgets2/pixmap.py | gnthibault/PyQt6-Tutorial-Examples | c54819f74154de923df0bdfaa302d62b4bad890b | [
"BSD-2-Clause"
] | 38 | 2021-05-18T07:47:54.000Z | 2022-03-31T13:10:41.000Z | widgets2/pixmap.py | gnthibault/PyQt6-Tutorial-Examples | c54819f74154de923df0bdfaa302d62b4bad890b | [
"BSD-2-Clause"
] | 3 | 2021-08-03T03:49:42.000Z | 2021-09-09T08:09:23.000Z | widgets2/pixmap.py | gnthibault/PyQt6-Tutorial-Examples | c54819f74154de923df0bdfaa302d62b4bad890b | [
"BSD-2-Clause"
] | 16 | 2021-06-12T11:25:58.000Z | 2022-03-05T07:43:10.000Z | #!/usr/bin/python
"""
ZetCode PyQt6 tutorial
In this example, we display an image
on the window.
Author: Jan Bodnar
Website: zetcode.com
"""
from PyQt6.QtWidgets import (QWidget, QHBoxLayout,
QLabel, QApplication)
from PyQt6.QtGui import QPixmap
import sys
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
hbox = QHBoxLayout(self)
pixmap = QPixmap('sid.jpg')
lbl = QLabel(self)
lbl.setPixmap(pixmap)
hbox.addWidget(lbl)
self.setLayout(hbox)
self.move(300, 200)
self.setWindowTitle('Sid')
self.show()
def main():
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec())
if __name__ == '__main__':
main()
| 15.307692 | 50 | 0.614322 | 94 | 796 | 5.031915 | 0.617021 | 0.038055 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015385 | 0.265075 | 796 | 51 | 51 | 15.607843 | 0.793162 | 0.167085 | 0 | 0 | 0 | 0 | 0.027481 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.291667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64ce6f6efb2fdb2724c45647b09d052115673dcc | 3,423 | py | Python | scripts/get_dataset100.py | AShedko/UIR8_im2gps | f4a7adb7e632b42a306ebdfbffb8a3843d43a594 | [
"MIT"
] | 1 | 2020-11-12T11:46:30.000Z | 2020-11-12T11:46:30.000Z | scripts/get_dataset100.py | AShedko/UIR8_im2gps | f4a7adb7e632b42a306ebdfbffb8a3843d43a594 | [
"MIT"
] | null | null | null | scripts/get_dataset100.py | AShedko/UIR8_im2gps | f4a7adb7e632b42a306ebdfbffb8a3843d43a594 | [
"MIT"
] | null | null | null |
import math
import os
import random
import sys
from multiprocessing.pool import ThreadPool
import urllib
import file_util
import pandas as pd
ds = pd.read_csv("../data/simplemaps-worldcities-basic.csv")
# Heuristically select from DB cities likely to be on google maps
cities = ds[ds["pop"]>20000][ds.iso3.isin([
"USA", "GBR", "FRA", "JAP", "POL", "AUS","ARG" ,"KOR"])].sample(100)[["city_ascii", "lat", "lng"]]
cities.city_ascii = cities.city_ascii.apply(lambda x: x.replace(" ", "_"))
cities = { city[0]: (city[1], city[2]) for city in cities.as_matrix()}
# radius of the Earth
R = 6378.1
# radius of images around center of city
IMAGE_RADIUS = 10
# number of images to download from each city
NUM_IMAGES_PER_CITY = 100
# size of failed-download image
FAILED_DOWNLOAD_IMAGE_SIZE = 3464
# place key in a file in the Geo-Localization directory
# as the only text in the file on one line
KEY_FILEPATH = "/home/ashedko/Projects/UIR/im2gps/LittlePlaNet/api_key.key"
API_KEY = file_utils.load_key(KEY_FILEPATH)
GOOGLE_URL = ("http://maps.googleapis.com/maps/api/streetview?"
"size=256x256&fov=120&pitch=10&key=" + API_KEY)
IMAGES_DIR = '../data/cities/'
def download_images_for_city(city, lat, lon):
print('downloading images of {}'.format(city))
num_imgs = 0
misses = 0
cur_directory = os.path.join(IMAGES_DIR, city)
if not os.path.exists(cur_directory):
os.makedirs(cur_directory)
while num_imgs < NUM_IMAGES_PER_CITY:
# randomly select latitude and longitude in the city
brng = math.radians(random.uniform(0, 360)) # bearing is 90 degrees converted to radians.
d = random.uniform(0, IMAGE_RADIUS)
lat_rad = math.radians(lat) # current lat point converted to radians
lon_rad = math.radians(lon) # current long point converted to radians
rand_lat = math.asin(math.sin(lat_rad)*math.cos(d/R) +
math.cos(lat_rad)*math.sin(d/R)*math.cos(brng))
rand_lon = lon_rad + math.atan2(math.sin(brng)*math.sin(d/R)*math.cos(lat_rad),
math.cos(d/R)-math.sin(lat_rad)*math.sin(rand_lat))
rand_lat = math.degrees(rand_lat)
rand_lon = math.degrees(rand_lon)
# download image
filename = 'lat-{}-lon-{}.jpg'.format(round(rand_lat, 4), round(rand_lon, 4))
filepath = os.path.join(cur_directory, filename)
url = GOOGLE_URL + "&location=" + str(rand_lat) + ","+ str(rand_lon)+"&heading="+str(brng)
res = urllib.request.urlretrieve(url, filepath)
# check if the downloaded image was invalid and if so remove it
if os.path.isfile(filepath):
size = os.path.getsize(filepath)
if size == FAILED_DOWNLOAD_IMAGE_SIZE:
os.remove(filepath)
misses += 1
else:
num_imgs += 1
print('invalid photo of {} downloaded {} times'.format(city, misses))
# file_utils.upload_directory_to_aws(cur_directory)
def download_images():
# download images for each city in a different thread
num_threads = 8
pool = ThreadPool(num_threads)
for city, (lat, lon) in cities.items():
pool.apply_async(download_images_for_city, (city, lat, lon))
pool.close()
pool.join()
if __name__ == '__main__':
download_images()
| 36.414894 | 103 | 0.646217 | 488 | 3,423 | 4.366803 | 0.368852 | 0.022994 | 0.023463 | 0.01267 | 0.075551 | 0.066166 | 0.05725 | 0 | 0 | 0 | 0 | 0.020183 | 0.232837 | 3,423 | 93 | 104 | 36.806452 | 0.791318 | 0.189308 | 0 | 0 | 0 | 0 | 0.130206 | 0.049531 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032787 | false | 0 | 0.131148 | 0 | 0.163934 | 0.032787 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64cf331bf3aec4cd73fb15b8c3551c47a61671fb | 1,501 | py | Python | mistral/auth/keystone.py | soda-research/mistral | 550a3de9c2defc7ce26336cb705d9c8d87bbaddd | [
"Apache-2.0"
] | 205 | 2015-06-21T11:51:47.000Z | 2022-03-05T04:00:04.000Z | mistral/auth/keystone.py | soda-research/mistral | 550a3de9c2defc7ce26336cb705d9c8d87bbaddd | [
"Apache-2.0"
] | 21 | 2015-04-14T22:41:53.000Z | 2019-02-20T09:30:10.000Z | mistral/auth/keystone.py | soda-research/mistral | 550a3de9c2defc7ce26336cb705d9c8d87bbaddd | [
"Apache-2.0"
] | 110 | 2015-06-14T03:34:38.000Z | 2021-11-11T12:12:56.000Z | # Copyright 2016 - Brocade Communications Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from mistral import auth
from mistral import exceptions as exc
CONF = cfg.CONF
class KeystoneAuthHandler(auth.AuthHandler):
def authenticate(self, req):
# Note(nmakhotkin): Since we have deferred authentication,
# need to check for auth manually (check for corresponding
# headers according to keystonemiddleware docs.
identity_status = req.headers.get('X-Identity-Status')
service_identity_status = req.headers.get('X-Service-Identity-Status')
if (identity_status == 'Confirmed' or
service_identity_status == 'Confirmed'):
return
if req.headers.get('X-Auth-Token'):
msg = 'Auth token is invalid: %s' % req.headers['X-Auth-Token']
else:
msg = 'Authentication required'
raise exc.UnauthorizedException(msg)
| 34.906977 | 78 | 0.691539 | 193 | 1,501 | 5.341969 | 0.580311 | 0.058196 | 0.037827 | 0.040737 | 0.054316 | 0.054316 | 0 | 0 | 0 | 0 | 0 | 0.00692 | 0.229847 | 1,501 | 42 | 79 | 35.738095 | 0.884948 | 0.507662 | 0 | 0 | 0 | 0 | 0.182825 | 0.034626 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.1875 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64d17a7f9f6e04f18e7fe0677f5b3c4e404da64e | 1,587 | py | Python | integrationtests/utils_tests.py | landcast/flaskseed | 15b73fef7345e8c05c4b6efe26c889f9818fabe3 | [
"Apache-2.0"
] | null | null | null | integrationtests/utils_tests.py | landcast/flaskseed | 15b73fef7345e8c05c4b6efe26c889f9818fabe3 | [
"Apache-2.0"
] | 1 | 2018-10-21T14:28:46.000Z | 2018-10-21T14:28:46.000Z | integrationtests/utils_tests.py | landcast/flaskseed | 15b73fef7345e8c05c4b6efe26c889f9818fabe3 | [
"Apache-2.0"
] | null | null | null | import json
import re
import subprocess
import sys
import unittest
sys.path.append('.')
from integrationtests import TestBase, json_header, server_location
class CourseTest(TestBase):
def test_course(self):
url = f'{server_location}/test/add_account_single_session'
json_data = "'" + json.dumps({
"state": 1,
"account_name": 'litao',
"account_no": "0123456789"
}) + "'"
cmd = f'''
curl -sS -i -H '{json_header}' -X POST --data {json_data} {url}
'''
print(cmd)
status_code, output = subprocess.getstatusoutput(cmd)
print(output)
self.assertTrue('200 OK' in output, 'expect http status return 200')
url = f'{server_location}/test/add_account_nested_session'
json_data = "'" + json.dumps({
"state": 1,
"account_name": 'tom',
"account_no": "1111111111"
}) + "'"
cmd = f'''
curl -sS -i -H '{json_header}' -X POST --data {json_data} {url}
'''
print(cmd)
status_code, output = subprocess.getstatusoutput(cmd)
print(output)
self.assertTrue('200 OK' in output, 'expect http status return 200')
json_str = re.findall(r"\{(.*)\}", output, re.S)
json_res = json.loads('{' + json_str[0].replace('\n', '') + '}')
self.assertEqual(json_res['db_session_id'],
json_res['nested_db_session_id'],
'not using same db_session')
if __name__ == "__main__":
unittest.main()
| 31.117647 | 76 | 0.558286 | 180 | 1,587 | 4.688889 | 0.405556 | 0.037915 | 0.023697 | 0.042654 | 0.545024 | 0.545024 | 0.545024 | 0.469194 | 0.469194 | 0.372038 | 0 | 0.031475 | 0.299307 | 1,587 | 50 | 77 | 31.74 | 0.727518 | 0 | 0 | 0.47619 | 0 | 0.047619 | 0.321991 | 0.061752 | 0 | 0 | 0 | 0 | 0.071429 | 1 | 0.02381 | false | 0 | 0.142857 | 0 | 0.190476 | 0.095238 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64d2c172df3283a327a114c985bcc7b09bf329f7 | 5,309 | py | Python | scripts/evalutate_excell_data_condition_study.py | llucid-97/AdaS | 298beca98d5b432460c9f268364c0fe7ce8323a6 | [
"MIT"
] | 1 | 2020-06-12T17:14:31.000Z | 2020-06-12T17:14:31.000Z | scripts/evalutate_excell_data_condition_study.py | llucid-97/AdaS | 298beca98d5b432460c9f268364c0fe7ce8323a6 | [
"MIT"
] | 1 | 2020-08-12T21:10:48.000Z | 2020-08-12T21:10:48.000Z | scripts/evalutate_excell_data_condition_study.py | llucid-97/AdaS | 298beca98d5b432460c9f268364c0fe7ce8323a6 | [
"MIT"
] | 3 | 2020-06-17T21:51:16.000Z | 2020-07-23T03:26:13.000Z | """
MIT License
Copyright (c) 2020 Mahdi S. Hosseini
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
lr_method = 'Slope'
excel_name = '.xlsx'
df = pd.read_excel(excel_name)
df = df.T
loss_data = df.iloc[1::9, 1]
input_S_data = df.iloc[2::9, :]
output_S_data = df.iloc[3::9, :]
input_rank_data = df.iloc[4::9, :]
output_rank_data = df.iloc[5::9, :]
input_condition_data = df.iloc[6::9, :]
output_condition_data = df.iloc[7::9, :]
learning_rate_data = df.iloc[8::9, :]
acc_data = df.iloc[9::9, 1]
# input_rank_data = df.iloc[1::8, :]
# output_rank_data = df.iloc[2::8, :]
# fc_rank_data = df.iloc[3::8, :]
# lr_val = df.iloc[4::8, :]
# slope_conv_data = df.iloc[5::8, :]
# slope_fc_data = df.iloc[6::8, :]
# acc_data = df.iloc[7::8, 1]
# loss_data = df.iloc[8::8, 1]
plt.figure(1, figsize=(20, 8.5))
plt.suptitle('plot title')
for iteration_layer in range(input_rank_data.shape[1]):
plt.subplot(np.ceil(np.sqrt(input_rank_data.shape[1])), np.ceil(
np.sqrt(input_rank_data.shape[1])), iteration_layer+1)
plt.plot(np.array(range(1, output_rank_data.shape[0] + 1)), np.asarray(
output_rank_data.iloc[:, iteration_layer]), color='b')
plt.plot(np.array(range(1, input_rank_data.shape[0] + 1)), np.asarray(
input_rank_data.iloc[:, iteration_layer]), color='k')
# plt.plot(np.array(range(1, rank_accelerate_data.shape[0] + 1)), np.asarray(rank_accelerate_data.iloc[:, iteration_layer]), color='r')
# plt.plot(np.array(range(1, rank_velocity_data.shape[0] + 1)), np.asarray(rank_velocity_data.iloc[:, iteration_layer]), color='m')
plt.plot(np.array(range(1, learning_rate_data.shape[0] + 1)), np.asarray(
learning_rate_data.iloc[:, iteration_layer]), color='c')
plt.plot(
np.array(range(1, acc_data.shape[0] + 1)), np.asarray(acc_data), color='g')
plt.ylabel('Tensor Rank')
plt.xlabel('Epoch')
plt.title('Layer '+str(iteration_layer+1))
plt.gca().legend(('input Rank', 'Output Rank',
'learning rate', 'Test Accuracy'), prop={"size": 5})
plt.grid(True)
plt.ylim((-.2, 1))
plt.xlim((0, 200))
# max_rank = (input_rank_data.values + output_rank_data.values)/2
# max_rank = np.max(max_rank, axis=0)
max_rank_in = np.max(input_rank_data.values, axis=0)
max_rank_out = np.max(output_rank_data.values, axis=0)
max_rank = np.maximum(max_rank_in, max_rank_out)
# conv_arch = [64, 64, 128, 128, 256, 256, 256, 512, 512, 512, 512, 512, 512]
# np.minimum(np.round(np.multiply(max_rank, conv_arch)*1.5), conv_arch)
# plt.show()
plt.savefig(excel_name+'.png', dpi=300, bbox_inches='tight')
plt.close()
plt.figure(1, figsize=(20, 8.5))
plt.suptitle('plot title')
for iteration_layer in range(input_rank_data.shape[1]):
plt.subplot(np.ceil(np.sqrt(input_condition_data.shape[1])), np.ceil(
np.sqrt(input_condition_data.shape[1])), iteration_layer+1)
plt.plot(np.array(range(1, input_condition_data.shape[0] + 1)), np.asarray(
input_condition_data.iloc[:, iteration_layer]), color='b')
plt.plot(np.array(range(1, output_condition_data.shape[0] + 1)), np.asarray(
output_condition_data.iloc[:, iteration_layer]), color='k')
plt.ylabel('Tensor Rank')
plt.xlabel('Epoch')
plt.title('Layer '+str(iteration_layer+1))
plt.gca().legend(('input-condition', 'output-condition'), prop={"size": 5})
plt.grid(True)
# plt.ylim((0, 100))
plt.xlim((0, 200))
plt.savefig(excel_name+'_condition.png', dpi=300, bbox_inches='tight')
plt.close()
plt.figure(4, figsize=(20, 8.5))
plt.suptitle('plot title')
for iteration_layer in range(input_rank_data.shape[1]):
ax = plt.subplot(np.ceil(np.sqrt(input_rank_data.shape[1])), np.ceil(
np.sqrt(input_rank_data.shape[1])), iteration_layer+1)
plt.plot(np.array(
range(1, input_condition_data.shape[0]+1)), np.asarray(loss_data), color='b')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.title('Layer '+str(iteration_layer+1))
plt.grid(True)
plt.ylim((1e-4, 1))
ax.set_yscale('log')
plt.xlim((0, 200))
# plt.show()
plt.savefig(excel_name+'_Loss.png', dpi=300, bbox_inches='tight')
plt.close()
| 41.476563 | 140 | 0.680542 | 864 | 5,309 | 4.027778 | 0.236111 | 0.045977 | 0.045977 | 0.036207 | 0.517529 | 0.455172 | 0.40977 | 0.317816 | 0.293103 | 0.280172 | 0 | 0.039128 | 0.162366 | 5,309 | 127 | 141 | 41.80315 | 0.743423 | 0.353927 | 0 | 0.36 | 0 | 0 | 0.072278 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.04 | 0 | 0.04 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64d4eca2a01366e3a22a289ad383de96073f8bac | 1,286 | py | Python | var/spack/repos/builtin/packages/netcdf-fortran/package.py | mrzv/spack | a0fb2838ea60f020179f480a2db1438da9d2e2ab | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2018-11-27T03:39:44.000Z | 2021-09-06T15:50:35.000Z | var/spack/repos/builtin/packages/netcdf-fortran/package.py | matzke1/spack | 9af44814b12639744926c56cdf16ac9e95490011 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/netcdf-fortran/package.py | matzke1/spack | 9af44814b12639744926c56cdf16ac9e95490011 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class NetcdfFortran(AutotoolsPackage):
"""Fortran interface for NetCDF4"""
homepage = "http://www.unidata.ucar.edu/software/netcdf"
url = "http://www.unidata.ucar.edu/downloads/netcdf/ftp/netcdf-fortran-4.4.3.tar.gz"
version('4.4.4', 'e855c789cd72e1b8bc1354366bf6ac72')
version('4.4.3', 'bfd4ae23a34635b273d3eb0d91cbde9e')
depends_on('netcdf')
# The default libtool.m4 is too old to handle NAG compiler properly:
# https://github.com/Unidata/netcdf-fortran/issues/94
patch('nag.patch', when='@:4.4.4%nag')
def configure_args(self):
return ['CPPFLAGS=-I' + self.spec['netcdf'].prefix.include]
@property
def libs(self):
libraries = ['libnetcdff']
# This package installs both shared and static libraries. Permit
# clients to query which one they want.
query_parameters = self.spec.last_query.extra_parameters
shared = 'shared' in query_parameters
return find_libraries(
libraries, root=self.prefix, shared=shared, recursive=True
)
| 32.974359 | 93 | 0.688958 | 162 | 1,286 | 5.425926 | 0.67284 | 0.013652 | 0.031854 | 0.040956 | 0.047782 | 0 | 0 | 0 | 0 | 0 | 0 | 0.059788 | 0.193624 | 1,286 | 38 | 94 | 33.842105 | 0.78785 | 0.341369 | 0 | 0 | 0 | 0.055556 | 0.302158 | 0.076739 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.055556 | 0.055556 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64d5db1f326dd6948ae5b6aa2e86f60f45afe760 | 2,293 | py | Python | tests/db/test_v5_to_v6.py | inmanta/inmanta-core | ae2153d57f124d00ad1b58e6d4bc6818364be4a8 | [
"Apache-2.0"
] | 6 | 2021-03-09T10:24:02.000Z | 2022-01-16T03:52:11.000Z | tests/db/test_v5_to_v6.py | inmanta/inmanta-core | ae2153d57f124d00ad1b58e6d4bc6818364be4a8 | [
"Apache-2.0"
] | 1,319 | 2020-12-18T08:52:29.000Z | 2022-03-31T18:17:32.000Z | tests/db/test_v5_to_v6.py | inmanta/inmanta-core | ae2153d57f124d00ad1b58e6d4bc6818364be4a8 | [
"Apache-2.0"
] | 4 | 2021-03-03T15:36:50.000Z | 2022-03-11T11:41:51.000Z | """
Copyright 2020 Inmanta
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contact: code@inmanta.com
"""
import os
from typing import AsyncIterator
import pytest
from asyncpg import Connection
from db.common import PGRestore
from inmanta.server.bootloader import InmantaBootloader
@pytest.fixture
@pytest.mark.slowtest
async def migrate_v5_to_v6(
hard_clean_db, hard_clean_db_post, postgresql_client: Connection, async_finalizer, server_config
) -> AsyncIterator[None]:
# Get old tables
with open(os.path.join(os.path.dirname(__file__), "dumps/v5.sql"), "r") as fh:
await PGRestore(fh.readlines(), postgresql_client).run()
ibl = InmantaBootloader()
await ibl.start()
# When the bootloader is started, it also executes the migration to v6
yield
await ibl.stop()
@pytest.mark.asyncio
async def test_add_on_delete_cascade_constraint(migrate_v5_to_v6, postgresql_client: Connection) -> None:
"""
Verify that the ON DELETE CASCADE constraint is set correctly on the substitute_compile_id column
of the compile table.
"""
# Assert values in substitute_compile_id column are correct
compiles = await postgresql_client.fetch("SELECT substitute_compile_id FROM public.compile")
assert all([c["substitute_compile_id"] is None for c in compiles])
# Assert that ON DELETE CASCADE is set the foreign key constraint compile_substitute_compile_id_fkey
constraints = await postgresql_client.fetch(
"""
SELECT pg_catalog.pg_get_constraintdef(r.oid, true) as condef
FROM pg_catalog.pg_constraint r
WHERE conname='compile_substitute_compile_id_fkey'
"""
)
assert len(constraints) == 1
assert "ON DELETE CASCADE" in constraints[0]["condef"]
| 34.742424 | 105 | 0.73659 | 318 | 2,293 | 5.160377 | 0.496855 | 0.036563 | 0.06947 | 0.0195 | 0.075564 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008663 | 0.194505 | 2,293 | 65 | 106 | 35.276923 | 0.879805 | 0.355866 | 0 | 0 | 0 | 0 | 0.096596 | 0.038638 | 0 | 0 | 0 | 0 | 0.12 | 1 | 0 | false | 0 | 0.24 | 0 | 0.24 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64d944653978f4300eb65aebe5ed000c73c7f7d4 | 6,840 | py | Python | is_data_handler.py | AlexanderFengler/nn_likelihoods | 2d0f63a63eb50f026b9492acba14708b23dfcaa4 | [
"MIT"
] | 2 | 2019-08-19T15:48:01.000Z | 2020-03-13T12:47:23.000Z | is_data_handler.py | AlexanderFengler/nn_likelihoods | 2d0f63a63eb50f026b9492acba14708b23dfcaa4 | [
"MIT"
] | null | null | null | is_data_handler.py | AlexanderFengler/nn_likelihoods | 2d0f63a63eb50f026b9492acba14708b23dfcaa4 | [
"MIT"
] | 6 | 2019-06-13T04:46:51.000Z | 2021-01-27T18:26:59.000Z | import os
import pickle
import numpy as np
import re
from string import ascii_letters
from datetime import datetime
import argparse
import gzip
def collect_datasets_is(folder = [],
model = [],
ndata = [],
nsubsample = []):
# Load in parameter recovery data
if machine == 'ccv':
if model == 'weibull' or model == 'weibull2':
param_recov_files = os.listdir('/users/afengler/data/kde/' + 'weibull_cdf' + '/parameter_recovery_data_binned_1_nbins_512_n_' + str(ndata) + '/')
param_recov_dat = pickle.load(open('/users/afengler/data/kde/' + 'weibull_cdf' + '/parameter_recovery_data_binned_1_nbins_512_n_' + str(ndata) + '/' + param_recov_files[0], 'rb'))
else:
param_recov_files = os.listdir('/users/afengler/data/kde/' + model + '/parameter_recovery_data_binned_1_nbins_512_n_' + str(ndata) + '/')
param_recov_dat = pickle.load(open('/users/afengler/data/kde/' + model + '/parameter_recovery_data_binned_1_nbins_512_n_' + str(ndata) + '/' + param_recov_files[0], 'rb'))
if machine == 'x7':
param_recov_files = os.listdir('/media/data_cifs/projects/prj_approx-bayes/projectABC/data/' + model + '/parameter_recovery_data_binned_1_nbins_512_n_' + str(ndata) + '/')
param_recov_dat = pickle.load(open('/media/data_cifs/projects/prj_approx-bayes/projectABC/data/' + model + '/parameter_recovery_data_binned_1_nbins_512_n_' + str(ndata) + '/' + param_recov_files[0], 'rb'))
n_data_substring = 'N_' + str(ndata)
is_dict = {}
is_dict['gt'] = []
is_dict['posterior_samples'] = []
is_dict['timings'] = []
is_dict['perplexities'] = []
is_dict['importance_weights'] = []
is_dict['effective_sample_size'] = []
is_dict['means'] = []
is_dict['maps'] = []
is_dict['data'] = []
files_ = os.listdir(folder)
cnt = 0
for file_ in files_:
if model + '_training_' in file_ and n_data_substring in file_ and 'summary' not in file_:
print(cnt)
print('Processing file: ', file_)
cnt += 1
# extract id
st = file_.find('_idx_')
fin = file_.find('_tdist')
idx = int(file_[st + len('_idx_'):fin])
tmp = pickle.load(gzip.open(folder + file_, 'rb'), encoding = 'latin1')
sub_idx = np.random.choice(tmp['posterior_samples'].shape[0], nsubsample, replace = False)
is_dict['gt'].append(tmp['gt_params'])
is_dict['posterior_samples'].append(tmp['posterior_samples'][sub_idx, :])
is_dict['timings'].append(tmp['timeToConvergence'])
is_dict['perplexities'].append(tmp['norm_perplexity'])
is_dict['importance_weights'].append(tmp['final_w'][sub_idx])
is_dict['effective_sample_size'].append(1 / np.sum(np.square(tmp['final_w'])))
is_dict['means'].append(np.mean(tmp['posterior_samples'], axis = 0))
is_dict['maps'].append(tmp['final_x'][np.argmax(tmp['log_likelihood']), :])
# Add data
is_dict['data'].append(param_recov_dat[1][0][idx, : , :])
print('Processed file: ', file_)
#print(model + '_training_' in file_)
is_dict['gt'] = np.stack(is_dict['gt'])
is_dict['posterior_samples'] = np.stack(is_dict['posterior_samples'])
is_dict['timings'] = np.array(is_dict['timings'])
is_dict['perplexities'] = np.array(is_dict['perplexities'])
is_dict['importance_weights'] = np.stack(is_dict['importance_weights'])
is_dict['means'] = np.stack(is_dict['means'])
is_dict['maps'] = np.stack(is_dict['maps'])
is_dict['data'] = np.stack(is_dict['data'])
if machine == 'ccv':
if model == 'weibull':
print('writing to file: ', '/users/afengler/data/eLIFE_exps/summaries/IS_summary_' + 'weibull_cdf' + \
'_' + n_data_substring + '.pickle')
pickle.dump(is_dict, open('/users/afengler/data/eLIFE_exps/summaries/IS_summary_' + 'weibull_cdf' + \
'_' + n_data_substring + '.pickle', 'wb'), protocol = 4)
else:
print('writing to file: ', '/users/afengler/data/eLIFE_exps/summaries/IS_summary_' + model + \
'_' + n_data_substring + '.pickle')
pickle.dump(is_dict, open('/users/afengler/data/eLIFE_exps/summaries/IS_summary_' + model + \
'_' + n_data_substring + '.pickle', 'wb'), protocol = 4)
if machine == 'x7':
print('writing to file: ', '/media/data_cifs/projects/prj_approx-bayes/projectABC/' + isfolder + '/' + 'IS_summary_' + \
model + '_' + n_data_substring + '.pickle')
pickle.dump(is_dict, open( '/media/data_cifs/projects/prj_approx-bayes/projectABC/' + isfolder + '/' + 'IS_summary_' + \
model + '_' + n_data_substring + '.pickle', 'wb'), protocol = 4)
return is_dict
if __name__ == "__main__":
# Currently available models = ['weibull', 'race_model_6', 'ornstein', 'full_ddm', 'ddm_seq2', 'ddm_par2', 'ddm_mic2']
CLI = argparse.ArgumentParser()
CLI.add_argument("--machine",
type = str,
default = 'x7')
CLI.add_argument("--method",
type = str,
default = 'ddm')
CLI.add_argument("--ndata",
type = int,
default = 1024)
CLI.add_argument("--nsubsample",
type = int,
default = 10000)
CLI.add_argument("--isfolder",
type = str,
default = 'eLIFE_exps')
args = CLI.parse_args()
print(args)
machine = args.machine
method = args.method
ndata = args.ndata
nsubsample = args.nsubsample
isfolder = args.isfolder
if machine == 'home':
is_sample_folder = '/Users/afengler/OneDrive/project_nn_likelihoods/data/' + isfolder + '/'
if method == 'weibull_cdf' or method == 'weibull_cdf2':
method = 'weibull'
if machine == 'ccv':
is_sample_folder = '/users/afengler/data/' + isfolder + '/'
if method == 'weibull_cdf' or method == 'weibull_cdf2':
method = 'weibull'
if machine == 'x7':
is_sample_folder = '/media/data_cifs/projects/prj_approx-bayes/projectABC/' + isfolder + '/'
print(is_sample_folder)
print('Started processing model: ', method, ' with ndata: ', ndata)
collect_datasets_is(folder = is_sample_folder,
model = method,
ndata = ndata,
nsubsample = nsubsample) | 44.705882 | 213 | 0.575585 | 781 | 6,840 | 4.723432 | 0.207426 | 0.063432 | 0.041475 | 0.043914 | 0.532665 | 0.499593 | 0.462185 | 0.391434 | 0.391434 | 0.364055 | 0 | 0.011722 | 0.276608 | 6,840 | 153 | 214 | 44.705882 | 0.733832 | 0.029825 | 0 | 0.175 | 0 | 0 | 0.280501 | 0.148394 | 0 | 0 | 0 | 0 | 0 | 1 | 0.008333 | false | 0 | 0.091667 | 0 | 0.108333 | 0.075 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64da409d0683b2127b58982d077ae000d035597d | 9,239 | py | Python | apps/api/tests/test_user.py | onyxhealth/safhir-vmi | 60ba90e9e8ba00347e0dc32e3061da5285df4ade | [
"Apache-2.0"
] | null | null | null | apps/api/tests/test_user.py | onyxhealth/safhir-vmi | 60ba90e9e8ba00347e0dc32e3061da5285df4ade | [
"Apache-2.0"
] | 9 | 2021-03-19T11:43:09.000Z | 2022-03-12T00:38:43.000Z | apps/api/tests/test_user.py | onyxhealth/safhir-vmi | 60ba90e9e8ba00347e0dc32e3061da5285df4ade | [
"Apache-2.0"
] | null | null | null | from datetime import date
from django.contrib.auth import get_user_model
from django.test import Client
from oauth2_provider.models import get_application_model, get_access_token_model
from apps.accounts.models import UserProfile
from .base import BaseTestCase
User = get_user_model()
Application = get_application_model()
AccessToken = get_access_token_model()
class UserTestCase(BaseTestCase):
def test_create_user_success(self):
client = Client()
response = client.post(
"/api/v1/user/",
{
"preferred_username": "james",
"given_name": "James",
"family_name": "Kirk",
"gender": "male",
"password": "tree garden jump fox",
"birthdate": "1952-01-03",
"nickname": "Jim",
"phone_number": "+15182345678",
"email": "jamess@example.com",
},
content_type='application/json',
Authorization="Bearer {}".format(self.token.token),
)
self.assertEqual(response.status_code, 201, response.content)
self.assertDictContainsSubset({
# "iss": "http://localhost:8000",
# "subject": "123456789012345",
"preferred_username": "james",
"given_name": "James",
"family_name": "Kirk",
"name": "James Kirk",
"gender": "male",
"birthdate": "1952-01-03",
"nickname": "Jim",
"phone_number": "+15182345678",
"email": "jamess@example.com",
"ial": '1',
# "id_assursance": [],
"document": [],
"address": []
}, response.json())
up = UserProfile.objects.get(subject=response.json()['sub'])
self.assertEqual(up.user.username, "james")
def test_read_user_success(self):
client = Client()
create_response = client.post(
"/api/v1/user/",
{
"preferred_username": "james",
"given_name": "James",
"family_name": "Kirk",
"gender": "male",
"password": "tree garden jump fox",
"birthdate": "1952-01-03",
"nickname": "Jim",
"phone_number": "+15182345678",
"email": "jamess@example.com",
},
content_type='application/json',
Authorization="Bearer {}".format(self.token.token),
)
response = client.get(
"/api/v1/user/{}/".format(create_response.json()['sub']),
Authorization="Bearer {}".format(self.token.token),
)
self.assertEqual(response.status_code, 200, response.content)
self.assertDictContainsSubset({
# "iss": "http://localhost:8000",
# "subject": "123456789012345",
"preferred_username": "james",
"given_name": "James",
"family_name": "Kirk",
"name": "James Kirk",
"gender": "male",
"birthdate": "1952-01-03",
"nickname": "Jim",
"phone_number": "+15182345678",
"email": "jamess@example.com",
"ial": '1',
# "id_assursance": [],
"document": [],
"address": []
}, response.json())
def test_update_user_success(self):
self.maxDiff = None
client = Client()
response = client.post(
"/api/v1/user/",
{
"preferred_username": "james",
"given_name": "James",
"family_name": "Kirk",
"gender": "male",
"password": "tree garden jump fox",
"birthdate": "1952-01-03",
"nickname": "Jim",
"phone_number": "+15182345678",
"email": "jamess@example.com",
},
content_type="application/json",
Authorization="Bearer {}".format(self.token.token),
)
update_response = client.put(
"/api/v1/user/{}/".format(response.json()['sub']),
{
"birthdate": "2233-03-22",
"family_name": "bob",
},
content_type="application/json",
Authorization="Bearer {}".format(self.token.token),
)
self.assertEqual(update_response.status_code, 200, update_response.content)
self.assertEqual("2233-03-22", update_response.json()['birthdate'])
up = UserProfile.objects.get(subject=response.json()['sub'])
self.assertEqual(up.birth_date, date(2233, 3, 22))
self.assertEqual(up.user.last_name, "bob")
def test_delete_user_success(self):
self.maxDiff = None
client = Client()
response = client.post(
"/api/v1/user/",
{
"preferred_username": "james",
"given_name": "James",
"family_name": "Kirk",
"gender": "male",
"password": "tree garden jump fox",
"birthdate": "1952-01-03",
"nickname": "Jim",
"phone_number": "+15182345678",
"email": "jamess@example.com",
},
Authorization="Bearer {}".format(self.token.token),
)
delete_response = client.delete(
"/api/v1/user/{}/".format(response.json()['sub']),
Authorization="Bearer {}".format(self.token.token),
)
self.assertEqual(delete_response.status_code, 204, delete_response.content)
with self.assertRaises(UserProfile.DoesNotExist):
UserProfile.objects.get(subject=response.json()['sub'])
def test_search_users(self):
"""The user API endpoint can be searched with the 'first_or_last_name' parameter."""
client = Client()
# There is currently 1 user (the one making the request)
response1 = client.get(
"/api/v1/user/",
Authorization="Bearer {}".format(self.token.token),
)
self.assertEqual(len(response1.json()), 1)
# Create some users
user1 = User.objects.create(first_name='One', last_name='Example', username='testuser1')
UserProfile.objects.create(user=user1)
user2 = User.objects.create(first_name='Two', last_name='Example', username='testuser2')
UserProfile.objects.create(user=user2)
user3 = User.objects.create(first_name='Three', last_name='Example', username='testuser3')
UserProfile.objects.create(user=user3)
with self.subTest('No search term'):
# GETting the user list page without a search term returns all users
response = client.get(
"/api/v1/user/",
Authorization="Bearer {}".format(self.token.token),
)
self.assertEqual(len(response.json()), 4)
with self.subTest('Empty search term'):
# GETting the user list page with an empty search term returns all users
response = client.get(
"/api/v1/user/?first_or_last_name=",
Authorization="Bearer {}".format(self.token.token),
)
self.assertEqual(len(response.json()), 4)
with self.subTest('Exact match'):
# GETting the user list page with a search term that matches 1 user
response = client.get(
"/api/v1/user/?first_or_last_name=one", # Note: the search is case-insensitive
Authorization="Bearer {}".format(self.token.token),
)
self.assertEqual(len(response.json()), 1)
self.assertEqual(response.json()[0]['given_name'], 'One')
self.assertDictContainsSubset(
{'given_name': 'One', 'family_name': 'Example', 'sub': user1.userprofile.sub},
response.json()[0]
)
with self.subTest('Multiple matches'):
# GETting the user list page with a search term that matches multiple users
response = client.get(
"/api/v1/user/?first_or_last_name=example",
Authorization="Bearer {}".format(self.token.token),
)
self.assertEqual(len(response.json()), 3)
self.assertEqual(
set(user['given_name'] for user in response.json()),
set(['One', 'Two', 'Three'])
)
self.assertEqual(
set(user['family_name'] for user in response.json()),
set(['Example', 'Example', 'Example'])
)
self.assertEqual(
set(user['sub'] for user in response.json()),
set([user1.userprofile.sub, user2.userprofile.sub, user3.userprofile.sub])
)
with self.subTest('No match'):
# GETting the user list page with a search term that matches no users
response = client.get(
"/api/v1/user/?first_or_last_name=jkfskjdfskjdnbfjshbvjhsbvsjd",
Authorization="Bearer {}".format(self.token.token),
)
self.assertEqual(len(response.json()), 0)
| 39.652361 | 98 | 0.535556 | 895 | 9,239 | 5.415642 | 0.175419 | 0.047039 | 0.024139 | 0.07778 | 0.667836 | 0.643078 | 0.623891 | 0.580978 | 0.580978 | 0.580978 | 0 | 0.036556 | 0.324927 | 9,239 | 232 | 99 | 39.823276 | 0.74058 | 0.075874 | 0 | 0.56 | 0 | 0 | 0.21141 | 0.019955 | 0 | 0 | 0 | 0 | 0.11 | 1 | 0.025 | false | 0.02 | 0.03 | 0 | 0.06 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64da7ea65369f204e730539cae624275e47b12c4 | 5,094 | py | Python | context/app/routes_auth.py | schwenk102/portal-ui | b40fd10e2d6568a9c419c06ba0759da295035cbd | [
"MIT"
] | null | null | null | context/app/routes_auth.py | schwenk102/portal-ui | b40fd10e2d6568a9c419c06ba0759da295035cbd | [
"MIT"
] | null | null | null | context/app/routes_auth.py | schwenk102/portal-ui | b40fd10e2d6568a9c419c06ba0759da295035cbd | [
"MIT"
] | null | null | null | from urllib.parse import urlencode
from flask import (
Blueprint, make_response, current_app, url_for,
request, redirect, render_template, session)
import requests
import globus_sdk
# This is mostly copy-and-paste from
# https://globus-sdk-python.readthedocs.io/en/stable/examples/three_legged_oauth/
blueprint = Blueprint('routes_auth', __name__, template_folder='templates')
def load_app_client():
return globus_sdk.ConfidentialAppAuthClient(
current_app.config['APP_CLIENT_ID'], current_app.config['APP_CLIENT_SECRET'])
def has_hubmap_group(nexus_token):
# Mostly copy-and-paste from
# https://github.com/hubmapconsortium/commons/blob/dc69f4/hubmap_commons/hm_auth.py#L347-L355
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': 'Bearer ' + nexus_token
}
params = {
'fields': 'id,name,description',
# I'm not sure what these do, and if they are necessary:
'for_all_identities': 'false',
'my_statuses': 'active'
}
response = requests.get(
'https://nexus.api.globusonline.org/groups',
headers=headers,
params=params)
response.raise_for_status()
groups = response.json()
return any([group['id'] == current_app.config['GROUP_ID'] for group in groups])
@blueprint.route('/login')
def login():
'''
Login via Globus Auth.
May be invoked in one of two scenarios:
1. Login is starting, no state in Globus Auth yet
2. Returning to application during login, already have short-lived
code from Globus Auth to exchange for tokens, encoded in a query
param
'''
# The redirect URI, as a complete URI (not relative path)
redirect_uri = url_for('routes_auth.login', _external=True)
client = load_app_client()
client.oauth2_start_flow(redirect_uri)
# If there's no "code" query string parameter, we're in this route
# starting a Globus Auth login flow; Redirect out to Globus Auth:
if 'code' not in request.args:
auth_uri = client.oauth2_get_authorize_url(
additional_params={
'scope': ' '.join([
'openid profile email',
'urn:globus:auth:scope:transfer.api.globus.org:all',
'urn:globus:auth:scope:auth.globus.org:view_identities',
'urn:globus:auth:scope:nexus.api.globus.org:groups'
])
}
)
return redirect(auth_uri)
# If we do have a "code" param, we're coming back from Globus Auth
# and can start the process of exchanging an auth code for a token.
code = request.args.get('code')
tokens = client.oauth2_exchange_code_for_tokens(code)
# The repr is deceptive: Looks like a dict, but direct access not possible.
token_object = tokens.by_resource_server['nexus.api.globus.org']
nexus_token = token_object['access_token']
expires_at_seconds = token_object['expires_at_seconds']
if not has_hubmap_group(nexus_token):
# Globus institution login worked, but user does not have HuBMAP group!
return render_template('errors/401-no-hubmap-group.html'), 401
session.update(
nexus_token=nexus_token,
is_authenticated=True)
# Would like to set an expiration on the session like I set on
# the cookie, but the lifetime of sessions is a global config.
response = make_response(
redirect(url_for('routes.index', _external=True)))
response.set_cookie(
key='nexus_token',
value=nexus_token,
expires=expires_at_seconds)
return response
@blueprint.route('/logout')
def logout():
'''
- Revoke the tokens with Globus Auth.
- Destroy the session state.
- Delete cookie, and return a redirect response.
- And when redirect returns, redirect again to the Globus Auth logout page.
'''
redirect_to_globus_param = 'redirect_to_globus'
if redirect_to_globus_param in request.args:
redirect_uri = url_for('routes.index', _external=True)
globus_logout_url = 'https://auth.globus.org/v2/web/logout?' + urlencode({
'client': current_app.config['APP_CLIENT_ID'],
'redirect_uri': redirect_uri,
'redirect_name': 'HuBMAP Portal'
})
return redirect(globus_logout_url)
client = load_app_client()
# Revoke the tokens with Globus Auth
try:
tokens = session['tokens']
except Exception:
# May have only hit this because of weird state during development,
# but if there are no tokens, there's nothing to revoke.
tokens = {}
for token in (token_info['access_token']
for token_info in tokens.values()):
client.oauth2_revoke_token(token)
# Destroy the session state
session.clear()
kwargs = {redirect_to_globus_param: True}
response = make_response(
redirect(url_for('routes_auth.logout', _external=True, **kwargs)))
response.delete_cookie(key='nexus_token')
return response
| 35.131034 | 97 | 0.667452 | 660 | 5,094 | 4.972727 | 0.345455 | 0.036563 | 0.0195 | 0.017367 | 0.121572 | 0.08897 | 0.024375 | 0 | 0 | 0 | 0 | 0.005645 | 0.234982 | 5,094 | 144 | 98 | 35.375 | 0.836541 | 0.296427 | 0 | 0.068966 | 0 | 0 | 0.207536 | 0.051955 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045977 | false | 0 | 0.045977 | 0.011494 | 0.172414 | 0.045977 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64daf61f1b459c77ae842abee1c37e78d14fc111 | 8,283 | py | Python | neutron_taas/tests/unit/taas_client/osc/test_osc_tap_service.py | openstack/tap-as-a-service | c9d046843565b3af514169c26e5893dbe86a9b98 | [
"Apache-2.0"
] | 68 | 2015-10-18T02:57:10.000Z | 2022-02-22T11:33:25.000Z | neutron_taas/tests/unit/taas_client/osc/test_osc_tap_service.py | openstack/tap-as-a-service | c9d046843565b3af514169c26e5893dbe86a9b98 | [
"Apache-2.0"
] | null | null | null | neutron_taas/tests/unit/taas_client/osc/test_osc_tap_service.py | openstack/tap-as-a-service | c9d046843565b3af514169c26e5893dbe86a9b98 | [
"Apache-2.0"
] | 27 | 2015-11-11T02:00:35.000Z | 2020-03-07T03:36:33.000Z | # All Rights Reserved 2020
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import operator
from unittest import mock
from neutronclient.tests.unit.osc.v2 import fakes as test_fakes
from osc_lib import utils as osc_utils
from osc_lib.utils import columns as column_util
from oslo_utils import uuidutils
from neutron_taas.taas_client.osc import tap_service as osc_tap_service
from neutron_taas.tests.unit.taas_client.osc import fakes
columns_long = tuple(col for col, _, listing_mode in osc_tap_service._attr_map
if listing_mode in (column_util.LIST_BOTH,
column_util.LIST_LONG_ONLY))
headers_long = tuple(head for _, head, listing_mode in
osc_tap_service._attr_map if listing_mode in
(column_util.LIST_BOTH, column_util.LIST_LONG_ONLY))
sorted_attr_map = sorted(osc_tap_service._attr_map, key=operator.itemgetter(1))
sorted_columns = tuple(col for col, _, _ in sorted_attr_map)
sorted_headers = tuple(head for _, head, _ in sorted_attr_map)
def _get_data(attrs, columns=sorted_columns):
return osc_utils.get_dict_properties(attrs, columns)
class TestCreateTapService(test_fakes.TestNeutronClientOSCV2):
columns = (
'ID',
'Name',
'Port',
'Status',
'Tenant',
)
def setUp(self):
super(TestCreateTapService, self).setUp()
self.cmd = osc_tap_service.CreateTapService(self.app, self.namespace)
def test_create_tap_service(self):
"""Test Create Tap Service."""
fake_tap_service = fakes.FakeTapService.create_tap_service(
attrs={'port_id': uuidutils.generate_uuid()}
)
self.neutronclient.post = mock.Mock(
return_value={osc_tap_service.TAP_SERVICE: fake_tap_service})
arg_list = [
'--name', fake_tap_service['name'],
'--port', fake_tap_service['port_id'],
]
verify_list = [
('name', fake_tap_service['name']),
('port_id', fake_tap_service['port_id']),
]
parsed_args = self.check_parser(self.cmd, arg_list, verify_list)
self.neutronclient.find_resource = mock.Mock(
return_value={'id': fake_tap_service['port_id']})
columns, data = self.cmd.take_action(parsed_args)
self.neutronclient.post.assert_called_once_with(
'/taas/tap_services',
body={
osc_tap_service.TAP_SERVICE:
{
'name': fake_tap_service['name'],
'port_id': fake_tap_service['port_id']
}
}
)
self.assertEqual(self.columns, columns)
self.assertItemEqual(_get_data(fake_tap_service), data)
class TestListTapService(test_fakes.TestNeutronClientOSCV2):
def setUp(self):
super(TestListTapService, self).setUp()
self.cmd = osc_tap_service.ListTapService(self.app, self.namespace)
def test_list_tap_service(self):
"""Test List Tap Service."""
fake_tap_services = fakes.FakeTapService.create_tap_services(
attrs={'port_id': uuidutils.generate_uuid()},
count=4)
self.neutronclient.list = mock.Mock(return_value=fake_tap_services)
arg_list = []
verify_list = []
parsed_args = self.check_parser(self.cmd, arg_list, verify_list)
headers, data = self.cmd.take_action(parsed_args)
self.neutronclient.list.assert_called_once()
self.assertEqual(headers, list(headers_long))
self.assertListItemEqual(
list(data),
[_get_data(fake_tap_service, columns_long) for fake_tap_service
in fake_tap_services[osc_tap_service.TAP_SERVICES]]
)
class TestDeleteTapService(test_fakes.TestNeutronClientOSCV2):
def setUp(self):
super(TestDeleteTapService, self).setUp()
self.neutronclient.find_resource = mock.Mock(
side_effect=lambda _, name_or_id: {'id': name_or_id})
self.cmd = osc_tap_service.DeleteTapService(self.app, self.namespace)
def test_delete_tap_service(self):
"""Test Delete tap service."""
fake_tap_service = fakes.FakeTapService.create_tap_service(
attrs={'port_id': uuidutils.generate_uuid()}
)
self.neutronclient.delete = mock.Mock()
arg_list = [
fake_tap_service['id'],
]
verify_list = [
(osc_tap_service.TAP_SERVICE, [fake_tap_service['id']]),
]
parsed_args = self.check_parser(self.cmd, arg_list, verify_list)
result = self.cmd.take_action(parsed_args)
self.neutronclient.delete.assert_called_once_with(
osc_tap_service.resource_path % ('tap_services',
fake_tap_service['id']))
self.assertIsNone(result)
class TestShowTapService(test_fakes.TestNeutronClientOSCV2):
def setUp(self):
super(TestShowTapService, self).setUp()
self.neutronclient.find_resource = mock.Mock(
side_effect=lambda _, name_or_id: {'id': name_or_id})
self.cmd = osc_tap_service.ShowTapService(self.app, self.namespace)
def test_show_tap_service(self):
"""Test Show tap service."""
fake_tap_service = fakes.FakeTapService.create_tap_service(
attrs={'port_id': uuidutils.generate_uuid()}
)
self.neutronclient.get = mock.Mock(
return_value={osc_tap_service.TAP_SERVICE: fake_tap_service})
arg_list = [
fake_tap_service['id'],
]
verify_list = [
(osc_tap_service.TAP_SERVICE, fake_tap_service['id']),
]
parsed_args = self.check_parser(self.cmd, arg_list, verify_list)
headers, data = self.cmd.take_action(parsed_args)
self.neutronclient.get.assert_called_once_with(
osc_tap_service.resource_path % ('tap_services',
fake_tap_service['id']))
self.assertEqual(sorted_headers, headers)
self.assertItemEqual(_get_data(fake_tap_service), data)
class TestUpdateTapService(test_fakes.TestNeutronClientOSCV2):
_new_name = 'new_name'
columns = (
'ID',
'Name',
'Port',
'Status',
'Tenant',
)
def setUp(self):
super(TestUpdateTapService, self).setUp()
self.cmd = osc_tap_service.UpdateTapService(self.app, self.namespace)
self.neutronclient.find_resource = mock.Mock(
side_effect=lambda _, name_or_id: {'id': name_or_id})
def test_update_tap_service(self):
"""Test update tap service"""
fake_tap_service = fakes.FakeTapService.create_tap_service(
attrs={'port_id': uuidutils.generate_uuid()}
)
new_tap_service = copy.deepcopy(fake_tap_service)
new_tap_service['name'] = self._new_name
self.neutronclient.put = mock.Mock(
return_value={osc_tap_service.TAP_SERVICE: new_tap_service})
arg_list = [
fake_tap_service['id'],
'--name', self._new_name,
]
verify_list = [('name', self._new_name)]
parsed_args = self.check_parser(self.cmd, arg_list, verify_list)
columns, data = self.cmd.take_action(parsed_args)
attrs = {'name': self._new_name}
self.neutronclient.put.assert_called_once_with(
osc_tap_service.resource_path % ('tap_services',
new_tap_service['id']),
{osc_tap_service.TAP_SERVICE: attrs})
self.assertEqual(self.columns, columns)
self.assertItemEqual(_get_data(new_tap_service), data)
| 36.013043 | 79 | 0.645056 | 987 | 8,283 | 5.08612 | 0.175279 | 0.143426 | 0.069721 | 0.030478 | 0.585857 | 0.559363 | 0.519721 | 0.453785 | 0.429482 | 0.373705 | 0 | 0.0026 | 0.257032 | 8,283 | 229 | 80 | 36.170306 | 0.81313 | 0.083907 | 0 | 0.375758 | 0 | 0 | 0.03404 | 0 | 0 | 0 | 0 | 0 | 0.084848 | 1 | 0.066667 | false | 0 | 0.054545 | 0.006061 | 0.175758 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64dc127b8f61573f77c1944fb95158b8208f8c2e | 600 | py | Python | auth/Auth.py | fede-da/PageDownloader | 2344e2307ea374690ba05923056fff9e59c9ad12 | [
"MIT"
] | null | null | null | auth/Auth.py | fede-da/PageDownloader | 2344e2307ea374690ba05923056fff9e59c9ad12 | [
"MIT"
] | null | null | null | auth/Auth.py | fede-da/PageDownloader | 2344e2307ea374690ba05923056fff9e59c9ad12 | [
"MIT"
] | null | null | null | from auth.pwdLine import PwdLine
from auth.userLine import UserLine
from auth.enabler import Enabler
from tkinter import Tk
class Auth:
ul: UserLine
pl: PwdLine
en: Enabler
def __init__(self, tk: Tk, row: int, col: int, w: int):
self.ul = UserLine("disabled", tk, row+1, col, w)
self.pl = PwdLine("disabled", tk, row+2, col, w)
self.en = Enabler(tk, row, col, self.ul, self.pl)
def getValues(self) -> list:
if self.en.getValue() == "disabled":
return ["", ""]
else:
return [self.ul.getData(), self.pl.getData()]
| 27.272727 | 59 | 0.598333 | 84 | 600 | 4.22619 | 0.345238 | 0.056338 | 0.073239 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004525 | 0.263333 | 600 | 21 | 60 | 28.571429 | 0.798643 | 0 | 0 | 0 | 0 | 0 | 0.04 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.235294 | 0 | 0.705882 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64dd3848949598fcdf3c2e10b7873c7391e20401 | 782 | py | Python | setup.py | soulless-viewer/w3cpull | acc5a564564a2cd256cc32f598f4551c70c17775 | [
"MIT"
] | 1 | 2020-11-19T21:09:00.000Z | 2020-11-19T21:09:00.000Z | setup.py | soulless-viewer/w3cpull | acc5a564564a2cd256cc32f598f4551c70c17775 | [
"MIT"
] | null | null | null | setup.py | soulless-viewer/w3cpull | acc5a564564a2cd256cc32f598f4551c70c17775 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
with open('README.md', encoding='utf-8') as f:
readme = f.read()
setup(
name="w3cpull",
version="1.1.1",
author="Mikalai Lisitsa",
author_email="Mikalai.Lisitsa@ibm.com",
url="https://github.com/soulless-viewer/w3cpull",
description="w3cpull is an application for pulling data from IBM w3 Connections.",
long_description=readme,
long_description_content_type="text/markdown",
keywords='w3-connections w3c ibm',
license='MIT',
packages=find_packages(),
install_requires=[
"docopt == 0.6.2",
"requests == 2.22.0",
"schema == 0.7.2",
"selenium == 3.141.0",
],
include_package_data=True,
python_requires='>=3.6',
scripts=['bin/w3cpull'],
)
| 27.928571 | 86 | 0.640665 | 101 | 782 | 4.851485 | 0.673267 | 0.04898 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.044872 | 0.202046 | 782 | 27 | 87 | 28.962963 | 0.740385 | 0 | 0 | 0 | 0 | 0 | 0.375959 | 0.029412 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.04 | 0 | 0.04 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
64dd62196969f76ce749b40ce7bdcc2fcddb001f | 2,341 | py | Python | noel/noel/main.py | jonparrott/noel | 48d54df340efbcff00ba0c2db587301199fbc572 | [
"Apache-2.0"
] | 66 | 2016-02-11T04:22:52.000Z | 2018-01-14T22:03:55.000Z | noel/noel/main.py | theacodes/noel | 48d54df340efbcff00ba0c2db587301199fbc572 | [
"Apache-2.0"
] | null | null | null | noel/noel/main.py | theacodes/noel | 48d54df340efbcff00ba0c2db587301199fbc572 | [
"Apache-2.0"
] | 8 | 2016-03-26T06:21:17.000Z | 2018-04-23T13:47:38.000Z | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The entrypoint for the noel command line tool."""
import argparse
import os
import noel.builder.commands
import noel.deployer.commands
from noel.utils import run_command
def build_and_deploy_command(args):
"""Build an application image and deploy it to the cluster. This
essentially runs build and then deploy-image."""
image = noel.builder.commands.build_command(args)
args.image = image
noel.deployer.commands.deploy_image_command(args)
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
parser.add_argument(
'--kubernetes-url',
default='http://localhost:8001',
help="The URL for the Kubernetes API.")
noel.builder.commands.register_commands(subparsers)
noel.deployer.commands.register_commands(subparsers)
build_and_deploy = subparsers.add_parser(
'build-and-deploy',
help=build_and_deploy_command.__doc__)
build_and_deploy.set_defaults(func=build_and_deploy_command)
build_and_deploy.add_argument(
'--project-id',
default=None,
help='Google Cloud Project ID, if not specified, it will use gcloud\'s '
'currently configured project.')
build_and_deploy.add_argument(
'--dir',
default=os.getcwd(),
help='Directory containing application and Dockerfile. Defaults to the '
'current directory.')
build_and_deploy.add_argument(
'--app',
default=os.path.basename(os.getcwd()),
help='The application name. Defaults to the name of the directory.')
build_and_deploy.add_argument(
'--version',
default=None,
help='The image tag version. Defaults to the current date & time.')
run_command(parser)
| 33.442857 | 80 | 0.70739 | 309 | 2,341 | 5.229773 | 0.420712 | 0.054455 | 0.086634 | 0.042079 | 0.07302 | 0.042079 | 0 | 0 | 0 | 0 | 0 | 0.006421 | 0.201623 | 2,341 | 69 | 81 | 33.927536 | 0.858213 | 0.300726 | 0 | 0.146341 | 0 | 0 | 0.253416 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04878 | false | 0 | 0.121951 | 0 | 0.170732 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b379b19bf85788ca37275f4904081e4ad7396d67 | 567 | py | Python | bkmus_api/bkmus_copyr.py | mbakija/bkmuseum_xstitch_bot | 07de75a23d48fafae34ebda60a82ba9973386be1 | [
"MIT"
] | 1 | 2020-11-24T05:47:55.000Z | 2020-11-24T05:47:55.000Z | bkmus_api/bkmus_copyr.py | mbakija/bkmuseum_xstitch_bot | 07de75a23d48fafae34ebda60a82ba9973386be1 | [
"MIT"
] | null | null | null | bkmus_api/bkmus_copyr.py | mbakija/bkmuseum_xstitch_bot | 07de75a23d48fafae34ebda60a82ba9973386be1 | [
"MIT"
] | null | null | null | # print copyright restrictions: 0 = not restricted, 1 = in copyright
# that said, several pieces (particularly in Contemporary Art collection)
# do have copyright notices on them even if copyright restrictions == 0
# sorting by "rights_type": "name" might be more clarifying, but the number of
# But I'd argue this use is tranformative and therefore fair use
# though I also might sort out and remove Contemporary Art from the final usage
import json
f = open('BKMobjects.json')
data = json.load(f)
for id in data['object']:
print(id['copyright_restricted'])
| 37.8 | 79 | 0.75485 | 87 | 567 | 4.896552 | 0.735632 | 0.098592 | 0.103286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006369 | 0.169312 | 567 | 14 | 80 | 40.5 | 0.898089 | 0.753086 | 0 | 0 | 0 | 0 | 0.308271 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b379e885d5afb3236202408bc7d3c8933c443206 | 1,243 | py | Python | parseq/scripts_insert/util.py | lukovnikov/parseq | 65a4a2444d78779c3255e70a7897f77e73cdcdda | [
"MIT"
] | 1 | 2022-01-21T16:08:08.000Z | 2022-01-21T16:08:08.000Z | parseq/scripts_insert/util.py | lukovnikov/parseq | 65a4a2444d78779c3255e70a7897f77e73cdcdda | [
"MIT"
] | null | null | null | parseq/scripts_insert/util.py | lukovnikov/parseq | 65a4a2444d78779c3255e70a7897f77e73cdcdda | [
"MIT"
] | 1 | 2020-08-19T07:09:44.000Z | 2020-08-19T07:09:44.000Z | from nltk import Tree
from parseq.grammar import tree_to_lisp_tokens
def reorder_tree(x:Tree, orderless=None, typestr="arg:~type"):
"""
Reorders given tree 'x' such that if a parent label is in 'orderless', the order of the children is always as follows:
- arg:~type goes first
- other children are ordered alphabetically
This function applies itself recursively.
"""
if orderless is None or len(orderless) == 0 or len(x) == 0:
return x
else:
children = [reorder_tree(xe, orderless=orderless) for xe in x]
if x.label() in orderless:
# do type first
types = [xe for xe in children if xe.label() == typestr]
types = sorted(types, key=lambda _xe: str(_xe))
otherchildren = [xe for xe in children if xe.label() != typestr]
otherchildren = sorted([xe for xe in otherchildren], key=lambda _xe: str(_xe))
children = types + otherchildren
x[:] = children
return x
def flatten_tree(x: Tree):
assert(x.label() == "@START@")
assert(len(x) == 1)
xstr = tree_to_lisp_tokens(x[0])
nodes = [Tree(xe if xe not in "()" else "|"+xe, []) for xe in xstr]
y = Tree(x.label(), nodes)
return y | 36.558824 | 122 | 0.617056 | 178 | 1,243 | 4.235955 | 0.376404 | 0.033157 | 0.046419 | 0.047745 | 0.129973 | 0.087533 | 0.087533 | 0.087533 | 0.087533 | 0 | 0 | 0.004405 | 0.269509 | 1,243 | 34 | 123 | 36.558824 | 0.825991 | 0.19469 | 0 | 0.090909 | 0 | 0 | 0.019507 | 0 | 0 | 0 | 0 | 0 | 0.090909 | 1 | 0.090909 | false | 0 | 0.090909 | 0 | 0.318182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b37bde647afa0d22b730a31da03c3321ccc70480 | 3,887 | py | Python | r_desktop.py | mtiday/open_remote_desktop_with_python | 25dd0abed91cf45a7a1bd4961afec4cdcee1b039 | [
"MIT"
] | null | null | null | r_desktop.py | mtiday/open_remote_desktop_with_python | 25dd0abed91cf45a7a1bd4961afec4cdcee1b039 | [
"MIT"
] | null | null | null | r_desktop.py | mtiday/open_remote_desktop_with_python | 25dd0abed91cf45a7a1bd4961afec4cdcee1b039 | [
"MIT"
] | null | null | null | """This program will open a connection to a server you choose from a
list, or specify a name not on the list. Build a text file named
servers.txt and save it in the same directory this program is ran
from. Please make sure not to have any extra spaces before or at the
end of the name of the servers. One name per line. Example:
server1
server2
Created by Michael Tiday.
"""
import os
import time
def start():
"""Main function, will call other functions from this one."""
# Create needed variables
list_of_servers = build_list_of_servers()
r_desktop_to_connect = connect_to(list_of_servers)
print(f'Connecting to {r_desktop_to_connect}...')
# Build BAT file that will call MSTC.exe with the correct switches
build_bat_file(r_desktop_to_connect)
# Connect to device via RDP
time.sleep(1)
os.startfile(os.path.join(os.getcwd(), 'rdesktop.bat'))
# build a list of devices to choose from
def build_list_of_servers():
"""Have user specify which server to connect to"""
# From servers.txt file, create a variable list that will contain
# servers to choose from
list_of_machines = []
with open('servers.txt', 'r') as westmoreland_servers:
# Build list removing /n and making all letters uppercase
for server in westmoreland_servers:
list_of_machines.append(server.replace('\n', '').upper())
# return servers in alphabetical order
return sorted(list_of_machines)
# user input of device to connect to
def connect_to(list_of_servers):
"""Have user choose which device to connect to via
Remote Desktop
:param: list list_of_servers: List of devices the user picks from
"""
choose_device = True
# Print list of devices to choose from
print_list_of_devices(list_of_servers)
while choose_device:
print('\nPlease choose from the list above')
print('Enter number of device, M for a manual entry not in the list.')
print('or "Q" to quit')
user_choice = input('Enter choice: ')
if user_choice.casefold() == 'q':
print('Have a great day. Goodbye!')
time.sleep(3)
raise SystemExit
if user_choice.casefold() == 'm':
return input('Enter name of server then <Enter>: ')
try:
print(f'You choose {list_of_servers[int(user_choice)-1]}')
return list_of_servers[int(user_choice)-1]
except IndexError:
print_list_of_devices(list_of_servers)
print('Try again, number entered didn\'t correspond to a device.')
except ValueError:
print_list_of_devices(list_of_servers)
print('You didn\'t enter an integer')
# print out list of servers
def print_list_of_devices(list_of_servers):
"""Print list of devices to choose from
:param: list list_of_servers: list of devices to choose from
"""
device_number = 0
for device in list_of_servers:
device_number += 1
# if statements used so device names align if more than 10
if device_number < 10:
print(f'{device_number} {device}')
else:
print(f'{device_number} {device}')
# Build the custom BAT file that will open the selected device
def build_bat_file(r_desktop_to_connect):
"""Build a BAT file that will RDP to the correct device
:param: string r_desktop_to_connect: User's choice to connect
"""
with open('rdesktop.bat', 'w') as rdesktop:
rdesktop.write(f'start mstsc.exe /v:{r_desktop_to_connect} exit 0')
if __name__ == '__main__':
# If not ran in a Windows OS, close the program
if os.name != 'nt':
print('Sorry, this program will only run on Windows')
time.sleep(3)
raise SystemExit
start()
| 35.66055 | 79 | 0.655518 | 563 | 3,887 | 4.358792 | 0.309059 | 0.066015 | 0.079462 | 0.041565 | 0.259576 | 0.182559 | 0.153219 | 0.072535 | 0 | 0 | 0 | 0.004893 | 0.263957 | 3,887 | 108 | 80 | 35.990741 | 0.852849 | 0.361976 | 0 | 0.163636 | 0 | 0 | 0.219512 | 0.037892 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.036364 | 0 | 0.181818 | 0.272727 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b37c92749148846755c132fe23eebba1cdbf7a9a | 10,069 | py | Python | external_apps/oauth/__init__.py | spreeker/democracygame | 525139955cb739c295051f317ab670049511bcf8 | [
"BSD-3-Clause"
] | 2 | 2016-05-09T04:57:34.000Z | 2017-03-03T14:22:24.000Z | external_apps/oauth/__init__.py | spreeker/democracygame | 525139955cb739c295051f317ab670049511bcf8 | [
"BSD-3-Clause"
] | null | null | null | external_apps/oauth/__init__.py | spreeker/democracygame | 525139955cb739c295051f317ab670049511bcf8 | [
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
import collections
import random
import time
import urllib
import urlparse
from urlencoding import escape, parse_qs, compose_qs
OAUTH_VERSION = '1.0'
TIMESTAMP_THRESHOLD = 300
NONCE_LENGTH = 10
class OAuthError(RuntimeError):
"""
Generic OAuthError for all error cases.
"""
pass
class OAuthRequest(object):
"""
Represents outgoing or incoming requests. Provides the ability to sign
outgoing requests (`sign_request <#oauth.OAuthRequest.sign_request>`_), and
validate incoming signed requests (`validate_signature
<#oauth.OAuthRequest.validate_signature>`_).
Arguments:
`url`
The URL. Query parameters in the URL will automatically be parsed
out. **Required**.
`http_method`
The HTTP method for the request.
`params`
A dict or string body of request parameters.
`headers`
A dict which may contain the *Authorization* header.
`version`
The *oauth_version*.
`timestamp_threshold`
The number of seconds a received timestamp can be off by.
`nonce_length`
The length of the randomly generated nonce.
"""
def __init__(self, url, http_method='GET', params=None, headers={},
version=OAUTH_VERSION, timestamp_threshold=TIMESTAMP_THRESHOLD,
nonce_length=NONCE_LENGTH):
if params and not isinstance(params, collections.Mapping):
# if its not a mapping, it must be a string
params = parse_qs(params)
elif not params:
params = {}
if 'Authorization' in headers:
auth_header = headers['Authorization']
# check that the authorization header is OAuth
if auth_header.index('OAuth') > -1:
try:
header_params = OAuthRequest._parse_auth_header(auth_header)
params.update(header_params)
except ValueError:
raise OAuthError('Unable to parse OAuth parameters from Authorization header.')
# URL parameters
parts = urlparse.urlparse(url)
url = '%s://%s%s' % (parts.scheme, parts.netloc, parts.path)
params.update(parse_qs(parts.query)) #FIXME should this be a merge?
self.http_method = http_method.upper()
self.url = url
self.params = params.copy()
self.version = version
self.timestamp_threshold = timestamp_threshold
self.nonce_length = nonce_length
def validate_signature(self, signature_method, consumer, token=None):
"""
Validates an *existing* signature in the request. It does not return a
value, and will throw an OAuthError exception when it fails.
**BE WARNED**: Nonce validation is left to the user.
http://oauth.net/core/1.0/#nonce
Arguments:
`signature_method`
The class used to handle Signature logic. This should be a concrete
implementation of `OAuthSignatureMethod
<#oauth.signature_method.base.OAuthSignatureMethod>`_.
`consumer`
A dict containing the oauth_token and oauth_token_secret
representing a OAuth Consumer.
`token`
An optional dict containing the oauth_token and oauth_token_secret
representing a OAuth Token to be used in validating the signature.
This is the basic usage flow for validating signatures:
#. Create a Request object
#. Create a dict with the OAuth Consumer information
#. *Optionally* create a dict with the OAuth Token information
#. Call validate_signature with the Signature Implementation, Consumer
and optional Token and catch OAuthError exceptions.
>>> from oauth import OAuthRequest
>>> from oauth.signature_method.plaintext import OAuthSignatureMethod_PLAINTEXT
>>> import time
>>> params = {
'oauth_nonce': '9747278682',
'oauth_timestamp': str(int(time.time())),
'oauth_consumer_key': 'my-ck',
'oauth_signature_method': 'PLAINTEXT',
'oauth_version': '1.0',
'oauth_signature': 'my-cks%26',
}
>>> consumer = {'oauth_token': 'my-ck', 'oauth_token_secret': 'my-cks'}
>>> request = OAuthRequest('https://example.org/get-request-token', 'GET', params)
>>> request.validate_signature(OAuthSignatureMethod_PLAINTEXT, consumer)
"""
try:
sig = signature_method(self, consumer, token)
timestamp = int(self.params['oauth_timestamp'])
now = int(time.time())
off_by = abs(now - timestamp)
if off_by > self.timestamp_threshold:
raise OAuthError('Expired timestamp: Given Time: %d | Server Time: %s | Threshold: %d.' % (timestamp, now, self.timestamp_threshold))
if self.params['oauth_signature_method'] != sig.name:
raise OAuthError('Unexpected oauth_signature_method. Was expecting %s.' % sig.name)
sig.validate_signature(self.params['oauth_signature'])
except KeyError:
raise OAuthError('Missing required parameter')
def sign_request(self, signature_method, consumer, token=None):
"""
Generate a *new* signature adding/replacing a number of oauth_
parameters as part of the process. Use this when you are making
outbound signed requests.
Arguments:
`signature_method`
The class used to handle Signature logic. This should be a concrete
implementation of `OAuthSignatureMethod
<#oauth.signature_method.base.OAuthSignatureMethod>`_.
`consumer`
A dict containing the oauth_token and oauth_token_secret
representing a OAuth Consumer.
`token`
An optional dict containing the oauth_token and oauth_token_secret
representing a OAuth Token to be used in signing the request.
This is the basic usage flow for generating signatures:
#. Create a Request object
#. Create a dict with the OAuth Consumer information
#. *Optionally* create a dict with the OAuth Token information
#. Call sign_request with the Signature Implementation, Consumer and
optional Token.
>>> from oauth import OAuthRequest
>>> from oauth.signature_method.hmac_sha1 import OAuthSignatureMethod_HMAC_SHA1
>>> consumer = {'oauth_token': 'my-ck', 'oauth_token_secret': 'my-cks'}
>>> request = OAuthRequest('http://example.org/get-request-token')
>>> request.sign_request(OAuthSignatureMethod_HMAC_SHA1, consumer)
>>> header = request.to_header()
*header* will now contain the string that can be used as the
*Authorization* header for this request.
"""
sig = signature_method(self, consumer, token)
self.params.update({
'oauth_consumer_key': consumer['oauth_token'],
'oauth_nonce': ''.join([str(random.randint(0, 9)) for i in range(self.nonce_length)]),
'oauth_signature_method': sig.name,
'oauth_timestamp': int(time.time()),
'oauth_version': self.version,
})
if token and 'oauth_token' in token:
self.params['oauth_token'] = token['oauth_token']
self.params['oauth_signature'] = sig.signature
def to_header(self, realm=None):
"""
Generates the Authorization header with the current OAuth parameters.
http://oauth.net/core/1.0/#auth_header
Arguments:
`realm`
An optional string to use as as the realm. If missing, realm will
be ommitted all together.
"""
auth_header = 'OAuth '
if realm:
auth_header += 'realm="%s",' % realm
oauth_params = dict([(k, v) for k, v in self.params.iteritems() if k[:6] == 'oauth_'])
auth_header += compose_qs(oauth_params, pattern='%s="%s"', join=',')
return auth_header
def to_url(self, include_oauth=False):
"""
Generates a URL suitable for a GET request.
Arguments:
`include_oauth`
Decides if *oauth_* parameters are included. This is useful if the
OAuth parameters are being sent via the query string in the URL
instead of the Authorization header.
"""
return '%s?%s' % (self.url, self.to_postdata(include_oauth))
def to_postdata(self, include_oauth=False):
"""
Generates the POST body.
Arguments:
`include_oauth`
Decides if *oauth_* parameters are included. This is useful if the
OAuth parameters are being sent via the POST body instead of the
Authorization header.
"""
if include_oauth:
params = self.params
else:
params = dict([(k, v) for k, v in self.params.iteritems() if k[:6] != 'oauth_'])
return compose_qs(params)
@property
def normalized_request_params(self):
"""
Generates the normalized request parameters.
http://oauth.net/core/1.0/#rfc.section.9.1.1
"""
params = self.params.copy()
params.pop('oauth_signature', None)
return compose_qs(params, sort=True)
@staticmethod
def _parse_auth_header(header):
"""
Parses the OAuth Authorization header:
http://oauth.net/core/1.0/#auth_header
Note: "realm" is dropped.
"""
# drop OAuth prefix
if header[:6].lower() == 'oauth ':
header = header[6:]
params = {}
parts = header.split(',')
for param in parts:
key, value = param.strip().split('=', 1)
if key == 'realm':
continue
params[key] = urllib.unquote(value.strip('"'))
return params
| 34.961806 | 149 | 0.614063 | 1,145 | 10,069 | 5.269869 | 0.217467 | 0.033146 | 0.026516 | 0.014915 | 0.346371 | 0.308916 | 0.2824 | 0.267484 | 0.223401 | 0.223401 | 0 | 0.006203 | 0.295561 | 10,069 | 287 | 150 | 35.083624 | 0.844495 | 0.487039 | 0 | 0.064516 | 0 | 0 | 0.120442 | 0.015761 | 0 | 0 | 0 | 0.003484 | 0 | 1 | 0.086022 | false | 0.010753 | 0.064516 | 0 | 0.225806 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b37cd0f3a91feee812c43f65e8c47aeebe21eb18 | 5,244 | py | Python | Pneumonia_Detection.py | sachin7695/Pneumonia_Diagnosis_using_XRays | 5192f4d79efba1abc3684cb85e02b46a0b508ee2 | [
"MIT"
] | null | null | null | Pneumonia_Detection.py | sachin7695/Pneumonia_Diagnosis_using_XRays | 5192f4d79efba1abc3684cb85e02b46a0b508ee2 | [
"MIT"
] | null | null | null | Pneumonia_Detection.py | sachin7695/Pneumonia_Diagnosis_using_XRays | 5192f4d79efba1abc3684cb85e02b46a0b508ee2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[1]:
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense,Flatten, Dropout,BatchNormalization, GlobalAveragePooling2D, ZeroPadding2D
from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array
from tensorflow.keras.optimizers import Adam, SGD
import pandas as pd
import cv2 as cv2
import numpy as np
from matplotlib import pyplot as plt
import os
from sklearn.model_selection import train_test_split
import tensorflow as tf
# In[2]:
dataset = r"/home/sachin269/Downloads/ChestXRay/chest_xray/train"
Normal_path = r"/home/sachin269/Downloads/ChestXRay/chest_xray/train/NORMAL"
Pneumonia_path = r"/home/sachin269/Downloads/ChestXRay/chest_xray/train/PNEUMONIA/"
# In[3]:
img = cv2.imread(Normal_path+'/IM-0115-0001.jpeg')
print(img.shape)
plt.imshow(img)
# In[4]:
vals = [Normal_path, Pneumonia_path]
print(os.listdir(vals[0]).__len__())
print(os.listdir(vals[1]).__len__())
# In[6]:
pathdir = [Normal_path, Pneumonia_path]
classes = ['Normal', 'Pneumonia']
filepaths = []
labels = []
for i, j in zip(pathdir, classes):
filelist = os.listdir(i)
# print(filelist)
for vals in filelist:
x = os.path.join(i, vals)
filepaths.append(x)
labels.append(j)
# print(filepaths.__len__(), labels.__len__())
# In[7]:
print(filepaths[0:4])
print(labels[0:4])
print(filepaths[-4:])
print(labels[-4:])
# In[8]:
dataset = list(zip(filepaths, labels))
pathframe = pd.DataFrame(dataset, columns=['filepaths', 'labels'])
# In[9]:
pathframe.__len__()
pathframe.tail()
# In[10]:
print(pathframe['labels'].value_counts())
# In[11]:
for i in range(0, 20):
vals = np.random.randint(1, len(pathframe))
plt.subplot(4,5, i+1)
plt.imshow(cv2.imread(pathframe.filepaths[vals]))
plt.axis('off')
plt.show()
# In[12]:
Train, Test = train_test_split(pathframe, train_size=0.90, random_state=0)
Train_new, valid = train_test_split(Train, train_size = 0.90, random_state=0)
print(Train.shape, Test.shape, Train_new.shape, valid.shape)
# In[13]:
train_datagen = ImageDataGenerator(rescale=1.0/255, rotation_range= 40 , width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2,
zoom_range=0.2, horizontal_flip = True, vertical_flip= True)
test_datagen = ImageDataGenerator(rescale=1.0/255)
# In[14]:
train_gen = train_datagen.flow_from_dataframe(dataframe = Train_new, x_col = 'filepaths', y_col='labels', batch_size=16,
target_size=(250,250), class_mode = 'binary', shuffle=True)
valid_gen = train_datagen.flow_from_dataframe(dataframe = valid, x_col = 'filepaths', y_col='labels', batch_size=16,
target_size=(250,250), class_mode = 'binary', shuffle=True)
test_gen = train_datagen.flow_from_dataframe(dataframe = Test, x_col = 'filepaths', y_col='labels', batch_size=16,
target_size=(250,250), class_mode = 'binary', shuffle=False)
# In[15]:
print(train_gen.class_indices)
print(train_gen[0][0].shape)
for i in range(0, 12):
val = train_gen[0][0][i]
vals = val.astype('uint8')
plt.subplot(4,3,i+1)
plt.imshow(vals)
plt.axis('off')
plt.show()
# In[16]:
model = Sequential()
model.add(Conv2D(16, (3, 3), input_shape = (250, 250, 3), activation = 'relu'))
model.add(Dropout(0.2))
model.add(Conv2D(16, (3, 3), activation = 'relu'))
model.add(MaxPooling2D(pool_size = (2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(16, (3, 3), activation = 'relu'))
model.add(MaxPooling2D(pool_size = (2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(units = 128, activation = 'relu'))
model.add(Dense(units = 1, activation = 'sigmoid'))
callbacks = tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience = 2, min_delta= 0.01)
optim=tf.keras.optimizers.RMSprop(learning_rate=0.01, rho=0.9, epsilon=None, decay=0.0)
model.compile(optimizer = optim, loss = 'binary_crossentropy', metrics = ['accuracy'])
history = model.fit(train_gen, validation_data= valid_gen, epochs=5)
model.summary()
# In[17]:
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='lower left')
plt.show()
# In[18]:
model.evaluate(test_gen)
# In[19]:
import seaborn as sns
import matplotlib.pyplot as plt
#Violin Plots for all the weights matrices.
w_after = model.get_weights()
h1_w = w_after[0].flatten().reshape(-1,1)
h2_w = w_after[2].flatten().reshape(-1,1)
h3_w = w_after[4].flatten().reshape(-1,1)
out_w = w_after[6].flatten().reshape(-1,1)
fig = plt.figure(figsize=(12,10))
plt.title("Weight matrices after model is trained")
plt.subplot(1, 4, 1)
plt.title("Trained model Weights")
ax = sns.violinplot(y=h1_w,color='b')
plt.xlabel('Hidden Layer 1')
plt.subplot(1, 4, 2)
plt.title("Trained model Weights")
ax = sns.violinplot(y=h2_w, color='r')
plt.xlabel('Hidden Layer 2 ')
plt.subplot(1, 4, 3)
plt.title("Trained model Weights")
ax = sns.violinplot(y=h3_w, color='g')
plt.xlabel('Hidden Layer 3 ')
# In[ ]:
| 22.899563 | 138 | 0.687071 | 791 | 5,244 | 4.409608 | 0.286979 | 0.025229 | 0.012901 | 0.025229 | 0.298739 | 0.287557 | 0.261181 | 0.198968 | 0.198968 | 0.116686 | 0 | 0.048528 | 0.15122 | 5,244 | 228 | 139 | 23 | 0.735116 | 0.054157 | 0 | 0.151786 | 0 | 0 | 0.11395 | 0.03528 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.116071 | 0 | 0.116071 | 0.098214 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b37f2c8f6aee924b36740ecfe3b5cc25e0a3af9c | 448 | py | Python | masters/master.client.syzygy/master_source_cfg.py | bopopescu/build | 4e95fd33456e552bfaf7d94f7d04b19273d1c534 | [
"BSD-3-Clause"
] | null | null | null | masters/master.client.syzygy/master_source_cfg.py | bopopescu/build | 4e95fd33456e552bfaf7d94f7d04b19273d1c534 | [
"BSD-3-Clause"
] | null | null | null | masters/master.client.syzygy/master_source_cfg.py | bopopescu/build | 4e95fd33456e552bfaf7d94f7d04b19273d1c534 | [
"BSD-3-Clause"
] | 1 | 2020-07-23T11:05:06.000Z | 2020-07-23T11:05:06.000Z | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from master.chromium_git_poller_bb8 import ChromiumGitPoller
def Update(config, active_master, c):
syzygy_poller = ChromiumGitPoller(
repourl='https://github.com/google/syzygy.git',
branch='master',
pollInterval=60)
c['change_source'].append(syzygy_poller)
| 32 | 72 | 0.75 | 63 | 448 | 5.222222 | 0.777778 | 0.072948 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018568 | 0.158482 | 448 | 13 | 73 | 34.461538 | 0.854111 | 0.354911 | 0 | 0 | 0 | 0 | 0.192982 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.142857 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b381335b767782ee84683c89db68147a9099b797 | 861 | py | Python | udemy_pyspark/min-temperatures.py | XC-Li/Hadoop_Spark_Practice | e34b51874ebf81ae9ba7e4c3ccd864580920c525 | [
"MIT"
] | null | null | null | udemy_pyspark/min-temperatures.py | XC-Li/Hadoop_Spark_Practice | e34b51874ebf81ae9ba7e4c3ccd864580920c525 | [
"MIT"
] | null | null | null | udemy_pyspark/min-temperatures.py | XC-Li/Hadoop_Spark_Practice | e34b51874ebf81ae9ba7e4c3ccd864580920c525 | [
"MIT"
] | null | null | null | from pyspark import SparkConf, SparkContext
conf = SparkConf().setMaster("local").setAppName("MinTemperatures")
sc = SparkContext(conf = conf)
def parseLine(line):
fields = line.split(',')
stationID = fields[0]
entryType = fields[2]
temperature = float(fields[3]) * 0.1 * (9.0 / 5.0) + 32.0
return (stationID, entryType, temperature)
lines = sc.textFile("file:///D:/Github/Hadoop_Spark_Practice/udemy_pyspark/1800.csv")
parsedLines = lines.map(parseLine) # transfer from line to parsed pairs
minTemps = parsedLines.filter(lambda x: "TMAX" in x[1]) # only keep the records with TMIN
stationTemps = minTemps.map(lambda x: (x[0], x[2])) # remove x[1]
minTemps = stationTemps.reduceByKey(lambda x, y: max(x,y)) # find the min value
results = minTemps.collect()
for result in results:
print(result[0] + "\t{:.2f}F".format(result[1]))
| 39.136364 | 90 | 0.699187 | 122 | 861 | 4.909836 | 0.606557 | 0.035058 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.031378 | 0.148664 | 861 | 21 | 91 | 41 | 0.785812 | 0.11266 | 0 | 0 | 0 | 0 | 0.126482 | 0.081686 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.058824 | 0 | 0.176471 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b382df3f6b2858f533490ca2d78d4ad6529491c4 | 6,122 | py | Python | src/polyswarmd/app.py | polyswarm/polyswarmd | b732d60f0f829cc355c1f938bbe6de69f9985098 | [
"MIT"
] | 14 | 2018-04-16T18:04:23.000Z | 2019-11-26T06:39:23.000Z | src/polyswarmd/app.py | polyswarm/polyswarmd | b732d60f0f829cc355c1f938bbe6de69f9985098 | [
"MIT"
] | 227 | 2018-04-03T01:10:34.000Z | 2021-03-25T21:49:58.000Z | src/polyswarmd/app.py | polyswarm/polyswarmd | b732d60f0f829cc355c1f938bbe6de69f9985098 | [
"MIT"
] | 2 | 2018-04-23T18:37:47.000Z | 2021-04-26T10:58:39.000Z | """
isort:skip_file
"""
from concurrent.futures import ThreadPoolExecutor
from requests_futures.sessions import FuturesSession
from polyswarmd.monkey import patch_all
patch_all()
import datetime
import functools
import logging
from flask import Flask, g, request
from flask_caching import Cache
from polyswarmd.config.polyswarmd import PolySwarmd, DEFAULT_FALLBACK_SIZE
from polyswarmd.utils.logger import init_logging # noqa
from polyswarmd.utils.profiler import setup_profiler
from polyswarmd.utils.response import success, failure, install_error_handlers
logger = logging.getLogger(__name__)
cache: Cache = Cache(config={"CACHE_TYPE": "simple", "CACHE_DEFAULT_TIMEOUT": 30})
# Set up our app object
app = Flask(__name__)
app.url_map.strict_slashes = False
_config = PolySwarmd.auto()
app.config['POLYSWARMD'] = _config
# Setting this value works even when Content-Length is omitted, we must have it
app.config['MAX_CONTENT_LENGTH'] = _config.artifact.max_size * _config.artifact.limit
session = FuturesSession(executor=ThreadPoolExecutor(4), adapter_kwargs={'max_retries': 2})
session.request = functools.partial(session.request, timeout=10)
app.config['REQUESTS_SESSION'] = session
app.config['CHECK_BLOCK_LIMIT'] = True
app.config['THREADPOOL'] = ThreadPoolExecutor()
install_error_handlers(app)
from polyswarmd.views.eth import misc
from polyswarmd.views.artifacts import artifacts
from polyswarmd.views.balances import balances
from polyswarmd.views.bounties import bounties
from polyswarmd.views.relay import relay
from polyswarmd.views.offers import offers
from polyswarmd.views.staking import staking
from polyswarmd.views.event_message import init_websockets
app.register_blueprint(misc, url_prefix='/')
app.register_blueprint(artifacts, url_prefix='/artifacts')
app.register_blueprint(balances, url_prefix='/balances')
app.register_blueprint(bounties, url_prefix='/bounties')
app.register_blueprint(relay, url_prefix='/relay')
app.register_blueprint(offers, url_prefix='/offers')
app.register_blueprint(staking, url_prefix='/staking')
if app.config['POLYSWARMD'].websocket.enabled:
init_websockets(app)
setup_profiler(app)
cache.init_app(app)
AUTH_WHITELIST = {'/status', '/relay/withdrawal', '/transactions'}
@cache.memoize(30)
def get_auth(api_key, auth_uri):
future = session.get(auth_uri, headers={'Authorization': api_key})
return future.result()
@cache.memoize(30)
def get_account(api_key, auth_uri):
future = session.get(auth_uri, params={'api_key': api_key})
return future.result()
def check_auth_response(api_response):
if api_response is None or api_response.status_code // 100 != 2:
return None
try:
return api_response.json()
except ValueError:
logger.exception(
'Invalid response from API key management service, received: %s', api_response.encode()
)
return None
class User(object):
def __init__(self, authorized=False, user_id=None, max_artifact_size=DEFAULT_FALLBACK_SIZE):
self.authorized = authorized
self.max_artifact_size = max_artifact_size
self.user_id = user_id if authorized else None
@classmethod
def from_api_key(cls, api_key):
config = app.config['POLYSWARMD']
auth_uri = f'{config.auth.uri}/communities/{config.community}/auth'
r = get_auth(api_key, auth_uri)
j = check_auth_response(r)
if j is None:
return cls(
authorized=False, user_id=None, max_artifact_size=config.artifact.fallback_max_size
)
anonymous = j.get('anonymous', True)
user_id = j.get('user_id') if not anonymous else None
# Get account features
account_uri = f'{config.auth.uri}/accounts'
r = get_account(api_key, account_uri)
j = check_auth_response(r)
if j is None:
return cls(
authorized=True,
user_id=user_id,
max_artifact_size=config.artifact.fallback_max_size
)
max_artifact_size = next((
f['base_uses']
for f in j.get('account', {}).get('features', [])
if f['tag'] == 'max_artifact_size'
), config.artifact.fallback_max_size)
return cls(authorized=True, user_id=user_id, max_artifact_size=max_artifact_size)
@property
def anonymous(self):
return self.user_id is None
def __bool__(self):
config = app.config['POLYSWARMD']
return config.auth.require_api_key and self.authorized
@app.route('/status')
def status():
config = app.config['POLYSWARMD']
return success(config.status.get_status())
@app.before_request
def before_request():
g.user = User()
config = app.config['POLYSWARMD']
if not config.auth.require_api_key:
return
# Ignore prefix if present
try:
api_key = request.headers.get('Authorization').split()[-1]
except Exception:
# exception == unauthenticated
return whitelist_check(request.path)
if api_key:
g.user = User.from_api_key(api_key)
if not g.user:
return whitelist_check(request.path)
size = request.content_length
if size is not None and size > g.user.max_artifact_size * 256:
return failure('Payload too large', 413)
def whitelist_check(path):
# Want to be able to whitelist unauthenticated routes, everything requires auth by default
return None if path in AUTH_WHITELIST else failure('Unauthorized', 401)
@app.after_request
def after_request(response):
eth_address = getattr(g, 'eth_address', None)
user = getattr(g, 'user', None)
if response.status_code == 200:
logger.info(
'%s %s %s %s %s %s', datetime.datetime.now(), request.method, response.status_code,
request.path, eth_address, user.user_id
)
else:
logger.error(
'%s %s %s %s %s %s: %s', datetime.datetime.now(), request.method, response.status_code,
request.path, eth_address, user.user_id, response.get_data()
)
return response
| 30.61 | 99 | 0.703692 | 799 | 6,122 | 5.180225 | 0.244055 | 0.023194 | 0.036241 | 0.006765 | 0.231457 | 0.161633 | 0.144721 | 0.144721 | 0.103165 | 0.08577 | 0 | 0.005473 | 0.194218 | 6,122 | 199 | 100 | 30.763819 | 0.83357 | 0.046553 | 0 | 0.142857 | 0 | 0 | 0.099485 | 0.017182 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078571 | false | 0 | 0.142857 | 0.014286 | 0.35 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3838e5127349f4a28ab146728a622c8c9ad0a50 | 9,702 | py | Python | script/AWS_EC2_info_to_FMC.py | tekgourou/AWS_EC2_CONTEXT_to_FMC | 777df7e5acd96cdab0b65eda1c8b9c54f4d477c2 | [
"Apache-2.0"
] | 3 | 2020-06-29T17:23:16.000Z | 2021-08-18T05:47:53.000Z | script/AWS_EC2_info_to_FMC.py | tekgourou/AWS_EC2_CONTEXT_to_FMC | 777df7e5acd96cdab0b65eda1c8b9c54f4d477c2 | [
"Apache-2.0"
] | null | null | null | script/AWS_EC2_info_to_FMC.py | tekgourou/AWS_EC2_CONTEXT_to_FMC | 777df7e5acd96cdab0b65eda1c8b9c54f4d477c2 | [
"Apache-2.0"
] | null | null | null | #!/bin/env python
'''
PURPOSE:
THIS SCRIPT IMPORTS ALL THE OPERATING SYSTEMS INFORMATION FROM AWS EC2 API,
PRINTS THE OUTPUT TO A CSV AND THEN IMPORTS THE CSV INTO FIREPOWER MANAGEMENT CENTER USING THE HOST INPUT API OF FMC.
DEPENDENCIES / REQUIREMENTS:
1- PYTHON 3.6
2- PERL 5
3- ACCOUNT ON AWS CLOUD AN API KEY GENERATED.
4- FIREPOWER MANAGEMENT CENTER (FMC) 6.x +
5- 'requests' MODULE, THAT CAN BE INSTALLED BY EXECUTING THE COMMAND "python -m pip install requests"
5- 'boto3' MODULE, THAT CAN BE INSTALLED BY EXECUTING THE COMMAND "python -m pip install boto3"
6- UPDATE THE 'parameters.json' FILE WITH THE DETAILS BEFORE EXECUTING THIS SCRIPT
7- TCP PORT 443 TO DUO API CLOUD.
8- TCP PORT 8307 TO FMC
9- FMC HOST INPUT API CLIENT CERTIFICATE FILE (xxxxxx.pkcs12) GENERATED FROM FMC, DOWNLOADED IN THIS SCRIPT'S LOCAL DIRECTORY.
TO GENERATE THE CERTIFICATE, LOGIN TO FMC WEB GUI AND NAVIGATE TO SYSTEM -> INTEGRATIONS -> HOST INPUT CLIENT -> CREATE CLIENT
-> HOSTNAME IS THE IP OF THE HOST RUNNING THIS SCRIPT AND ***NO PASSWORD*** -> DOWNLOAD THE PKCS12 FILE IN THIS SCRIPT'S LOCAL DIRECTORY
This script is based on the AMP4Endpoint Host Input for FMC. Modified by Alexandre Argeris (aargeris@cisco.com)
NOTE:
All Cisco software is subject to the Supplemental End User License Agreements (SEULA) located at https://www.cisco.com/c/en/us/about/legal/cloud-and-software/software-terms.html
'''
import json
import sys
import subprocess
import logging
import os
from AWS_EC2_instance_info import get_aws_ec2_info
print('##########################################################')
print('# AWS EC2 instance context sharing to FMC #')
print('# Production use at your own risk #')
print('# aargeris@cisco.com, alexandre@argeris.net #')
print('# Run this script once to detect any error #')
print('# then put it in your crontab #')
print('##########################################################')
print()
auditlogfile = "AUDIT.log"
# Start Log File Handler
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.FileHandler(auditlogfile)
datefmt = '[%Y-%m-%d %H:%M:%S]'
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s', datefmt)
handler.setFormatter(formatter)
logger.addHandler(handler)
# Import variables to get configuration
logger.info("###############################################################################")
logger.info("Starting execution of the script")
config = ''
try:
config = json.loads(open("parameters.json").read())
logger.info("Found the parameters file - 'parameters.json'. Loading in parameters now....")
except Exception as err:
logger.error(
"ERROR in reading the 'parameters.json' file or the file does not exist. So exiting! Below is the exact exception message.")
print(
"ERROR in reading the 'parameters.json' file or the file does not exist. So exiting! Below is the exact exception message.")
logger.error(str(err))
print(str(err))
logger.error("Check out the sample 'parameters.json' file for example....")
print("Check out the sample 'parameters.json' file for example....")
sys.exit()
csv = open("./hostinputcsv.txt", "w")
# Create dictionary of variables
var = {
"FMC_ipaddress": config["FMC_ipaddress"],
"FMC_host_vuln_db_overwrite_OR_update": config["FMC_host_vuln_db_overwrite_OR_update"],
"push_changes_to_fmc": config["push_changes_to_fmc"],
"FMC_user": config["FMC_user"],
"FMC_password": config["FMC_password"],
}
# Check to make sure there is data in the parameters
for key in var.keys():
value = var[key]
if value == "":
logger.error("Missing Value for the Parameter {}.... So exiting!".format(key, value))
print("Missing Value for the Parameter {}.... So exiting!".format(key, value))
sys.exit()
if 'FMC_ipaddress' not in var.keys():
logger.error(
"Missing the Parameter - 'FMC_ipaddress'. So exiting! Check out the sample 'parameters.json' file for example.... ")
print(
"Missing the Parameter - 'FMC_ipaddress'. So exiting! Check out the sample 'parameters.json' file for example.... ")
sys.exit()
if 'FMC_host_vuln_db_overwrite_OR_update' not in var.keys():
logger.error(
"Missing the Parameter - 'FMC_host_vuln_db_overwrite_OR_update'. So exiting! Check out the sample 'parameters.json' file for example.... ")
print(
"Missing the Parameter - 'FMC_host_vuln_db_overwrite_OR_update'. So exiting! Check out the sample 'parameters.json' file for example.... ")
sys.exit()
if var['FMC_host_vuln_db_overwrite_OR_update'] != "overwrite" and var[
'FMC_host_vuln_db_overwrite_OR_update'] != "update":
logger.error(
"Parameter - 'FMC_host_vuln_db_overwrite_OR_update' can be either set to \"update\" or \"overwrite\". Any other value is not allowed... So exiting! Check out the sample 'parameters.json' file for example.... ")
print(
"Parameter - 'FMC_host_vuln_db_overwrite_OR_update' can be either set to \"update\" or \"overwrite\". Any other value is not allowed... So exiting! Check out the sample 'parameters.json' file for example.... ")
sys.exit()
if 'push_changes_to_fmc' not in var.keys():
logger.error(
"Missing the Parameter - 'push_changes_to_fmc'. So exiting! Check out the sample 'parameters.json' file for example.... ")
print(
"Missing the Parameter - 'push_changes_to_fmc'. So exiting! Check out the sample 'parameters.json' file for example.... ")
sys.exit()
logger.info("Parameter check complete")
#Prepare the CSV for FMC host input
csv.write("SetSource,AWS EC2 API\n")
csv.write("AddHostAttribute,{},{}\n".format('AWS EC2 Info', 'text'))
def add_host_to_csv(ip, opersys, AWS_EC2_INFO ):
csv.write("AddHost,{}\n".format(ip))
csv.write("SetAttributeValue,{},{},{}\n".format(ip, 'AWS EC2 Info', AWS_EC2_INFO))
if "Windows" in opersys:
csv.write("SetOS,{},Microsoft,Windows,\"{}\"\n".format(ip, opersys))
elif "Amazon" in opersys:
csv.write("SetOS,{},Amazon,Linux,\"{}\"\n".format(ip, opersys))
elif "Ubuntu" in opersys:
csv.write("SetOS,{},Ubuntu,Linux,\"{}\"\n".format(ip, opersys))
elif "SUSE" in opersys:
csv.write("SetOS,{},Suse,Linux,\"{}\"\n".format(ip, opersys))
elif "Red Hat" in opersys:
csv.write("SetOS,{},Red Hat,Linux,\"{}\"\n".format(ip, opersys))
elif "CentOS" in opersys:
csv.write("SetOS,{},CentOS,Linux,\"{}\"\n".format(ip, opersys))
else:
csv.write("SetOS,{},{},{},\"{}\"\n".format(ip, opersys, "TBD", "TBD"))
# ADDING ENDPOINT CONTEXT to CSV
instance_list = get_aws_ec2_info()
for instance in instance_list:
if instance['Public IP'] == None:
AWS_EC2_INFO = ('EC2 Name: {} - EC2 Type: {} - EC2 VPC ID: {}'.format(instance['Name'], instance['Type'], instance['VPC ID']))
add_host_to_csv(instance['Private IP'], instance['Image Description'], AWS_EC2_INFO)
else:
AWS_EC2_INFO = ('EC2 Name: {} - Public IP: {} - EC2 Type: {} - EC2 VPC ID: {}'.format(instance['Name'], instance['Public IP'],instance['Type'], instance['VPC ID']))
add_host_to_csv(instance['Private IP'], instance['Image Description'], AWS_EC2_INFO)
AWS_EC2_INFO = ('EC2 Name: {} - Private IP: {} - EC2 Type: {} - EC2 VPC ID: {}'.format(instance['Name'], instance['Private IP'], instance['Type'], instance['VPC ID']))
add_host_to_csv(instance['Public IP'], instance['Image Description'], AWS_EC2_INFO)
#SENDING CSV File to FMC via HOST INPUT API
if var['FMC_host_vuln_db_overwrite_OR_update'] == "overwrite":
csv.write("ScanFlush")
else:
csv.write("ScanUpdate")
csv.close()
logger.info("Completed the Parsing of the events and wrote the information to the CSV file")
if not var["push_changes_to_fmc"]:
logger.info("Not supposed to push any changes to FMC as per the parameters in 'parameters.json'... So exiting!")
print("Not supposed to push any changes to FMC as per the parameters in 'parameters.json'... So exiting!")
sys.exit()
else:
# Call the Perl Host Input SDK client for the Host Input
logger.info("Calling the PERL client of FMC Host Input SDK to push the CSV details into FMC")
perl_log_filename = ".HostInput.log"
if os.path.exists(perl_log_filename):
try:
os.remove(perl_log_filename)
except:
pass
logger.info("COMMAND:-" + " perl" + " sf_host_input_agent.pl" + " -server={}".format(
var["FMC_ipaddress"]) + " -level=3" + " -logfile={}".format(
perl_log_filename) + " -plugininfo=hostinputcsv.txt" + " csv" + " -runondc=n")
pipe = subprocess.call(["perl", "sf_host_input_agent.pl", "-server={}".format(var["FMC_ipaddress"]), "-level=3",
"-logfile={}".format(perl_log_filename), "-plugininfo=hostinputcsv.txt", "csv",
"-runondc=n"])
logger.info("The output of the script is saved in a seperate file. Copying the content of that file here as-it-is")
try:
with open(perl_log_filename) as f:
output = f.read()
logger.info("\n" + output)
f.close()
os.remove(perl_log_filename)
except:
logger.error(
"Could not open the " + perl_log_filename + " file, so probably the PERL script execution might have failed")
print(
"Could not open the " + perl_log_filename + " file, so probably the PERL script execution might have failed")
sys.exit()
print("The output of the script is appended to '" + auditlogfile + "' file")
| 48.029703 | 219 | 0.660276 | 1,349 | 9,702 | 4.633062 | 0.227576 | 0.03808 | 0.03744 | 0.0272 | 0.50176 | 0.46752 | 0.42176 | 0.4016 | 0.39632 | 0.38736 | 0 | 0.006984 | 0.188312 | 9,702 | 201 | 220 | 48.268657 | 0.786667 | 0.176149 | 0 | 0.324138 | 0 | 0.089655 | 0.534913 | 0.112824 | 0 | 0 | 0 | 0 | 0 | 1 | 0.006897 | false | 0.013793 | 0.041379 | 0 | 0.048276 | 0.131034 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b384f1cd8ef3bf5972a03b34c5abadd537fbffab | 2,394 | py | Python | app-packages/solr/package/scripts/solr_node.py | turningme/incubator-retired-slider | 1d4f519d763210f46e327338be72efa99e65cb5d | [
"Apache-2.0"
] | 60 | 2015-01-05T10:51:11.000Z | 2018-12-15T03:48:09.000Z | app-packages/solr/package/scripts/solr_node.py | turningme/incubator-retired-slider | 1d4f519d763210f46e327338be72efa99e65cb5d | [
"Apache-2.0"
] | null | null | null | app-packages/solr/package/scripts/solr_node.py | turningme/incubator-retired-slider | 1d4f519d763210f46e327338be72efa99e65cb5d | [
"Apache-2.0"
] | 87 | 2015-01-14T05:14:15.000Z | 2018-12-25T14:14:56.000Z | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from resource_management import *
class Solr_Component(Script):
def install(self, env):
self.install_packages(env)
def configure(self, env):
import params
env.set_params(params)
def start(self, env):
import params
env.set_params(params)
self.configure(env)
start_solr_cmd = """{java64_home}/bin/java
-server
-Xss256k
-Xmx{xmx_val}
-Xms{xms_val}
{gc_tune}
{solr_opts}
-DzkClientTimeout={zk_timeout}
-DzkHost={zk_host}
-Dhost={solr_host}
-Djetty.port={port}
-DSTOP.PORT={stop_port}
-DSTOP.KEY={stop_key}
-Duser.timezone=UTC
-Dsolr.solr.home=\"{app_root}/server/solr\"
-Dsolr.install.dir=\"{app_root}\"
-Djetty.home=\"{app_root}/server\"
-Xloggc:\"{app_root}/server/logs/solr_gc.log\"
-jar start.jar {server_module}"""
process_cmd = format(start_solr_cmd.replace("\n", " "))
print("Starting Solr using command: "+process_cmd)
Execute(process_cmd,
logoutput=True,
wait_for_finish=False,
pid_file=params.pid_file,
poll_after = 10,
cwd=format("{app_root}/server")
)
def stop(self, env):
import params
env.set_params(params)
stop_cmd = format("bin/solr stop -p {port} -k {stop_key}")
Execute(stop_cmd,
logoutput=True,
wait_for_finish=True,
cwd=format("{app_root}")
)
def status(self, env):
import params
env.set_params(params)
status_cmd = "bin/solr status"
Execute(status_cmd,
logoutput=True,
wait_for_finish=True,
cwd=format("{app_root}")
)
if __name__ == "__main__":
Solr_Component().execute()
| 26.898876 | 72 | 0.70259 | 342 | 2,394 | 4.754386 | 0.447368 | 0.030135 | 0.03198 | 0.04674 | 0.169127 | 0.169127 | 0.151292 | 0.151292 | 0.060271 | 0.060271 | 0 | 0.005624 | 0.182957 | 2,394 | 88 | 73 | 27.204545 | 0.825665 | 0.324144 | 0 | 0.25 | 0 | 0 | 0.350528 | 0.156619 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.1 | 0 | 0.2 | 0.016667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b388a93bd00b03fd3f2be3f945fb5452ac8fba8a | 2,364 | py | Python | other/gen_inserts_from_hpge.py | dlooto/dauphine | ba0a0caaf513dbbfcfdd658ad5c687cbf279c021 | [
"Apache-2.0"
] | null | null | null | other/gen_inserts_from_hpge.py | dlooto/dauphine | ba0a0caaf513dbbfcfdd658ad5c687cbf279c021 | [
"Apache-2.0"
] | null | null | null | other/gen_inserts_from_hpge.py | dlooto/dauphine | ba0a0caaf513dbbfcfdd658ad5c687cbf279c021 | [
"Apache-2.0"
] | 1 | 2020-08-13T14:34:44.000Z | 2020-08-13T14:34:44.000Z | # 2017数据补齐
# 根据SPE文件和RPT文件生成导入SQL
import os
import time
def parse_spe(filename):
f = open(filename)
line = f.readline()
while line:
if line.startswith('$DATE_MEA:'):
begin_time_line = f.readline()
begin_time = time.strptime(begin_time_line.strip(), '%m/%d/%Y %H:%M:%S')
begin_time = int(time.mktime(begin_time))
f.readline()
end_time_line = f.readline().strip().split(' ')
end_time_diff = int(end_time_line[1])
break
line = f.readline()
end_time = begin_time + end_time_diff
return (begin_time, end_time, end_time + 30)
def parse_rpt(filename):
f = open(filename)
line = f.readline()
while line:
if 'Start time:' in line:
begin_time_line = line[line.find('20'):].strip()
begin_time = time.strptime(begin_time_line.strip('\x00'), '%Y/%m/%d %H:%M:%S')
begin_time = int(time.mktime(begin_time))
elif 'Real time:' in line:
end_time_line = line[line.find('time') + 5:].strip()
end_time_diff = int(end_time_line)
break
line = f.readline()
end_time = begin_time + end_time_diff
return (begin_time, end_time, end_time + 50)
def to_insert_sql(sid, filepath, file_type, start_time, end_time, data_time):
file_name = os.path.basename(filepath)
file_link = '/var/www/almada/api/storage/static/hpge/%s/%s' % (sid, file_name)
return '(null, %d, %d, %d, \'%s\', \'%s\', \'%s\', %d, 1, 0, 0)' % (data_time, start_time, end_time, sid, file_link, file_name, file_type)
w = open('a.sql', 'w')
def scanfile(path):
filelist = os.listdir(path)
allfile = []
sid = os.path.relpath(path, '/Users/healer/Downloads/ff')
for filename in filelist:
filepath = os.path.join(path, filename)
if os.path.isdir(filepath):
scanfile(filepath)
if filepath.lower().endswith('.spe'):
sql = to_insert_sql(sid, filepath, 1, *parse_spe(filepath))
elif filepath.lower().endswith('.rpt'):
sql = to_insert_sql(sid, filepath, 2, *parse_rpt(filepath))
else:
sql = None
if sql:
w.write("insert into dt_data_13 values %s;\n" % sql)
allfile = scanfile('/Users/healer/Downloads/ff')
w.close() | 31.52 | 142 | 0.58714 | 324 | 2,364 | 4.080247 | 0.283951 | 0.08472 | 0.066566 | 0.048412 | 0.413011 | 0.366112 | 0.32829 | 0.290469 | 0.231467 | 0.231467 | 0 | 0.01209 | 0.265228 | 2,364 | 75 | 143 | 31.52 | 0.748993 | 0.012267 | 0 | 0.254545 | 0 | 0 | 0.112302 | 0.041577 | 0 | 0 | 0 | 0 | 0 | 1 | 0.072727 | false | 0 | 0.036364 | 0 | 0.163636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3898633efb7d04220516dced622d82112dbe427 | 1,200 | py | Python | photos/views.py | Gideon-Muriithi/photos_gallary | 06534c06582bfb5e38e0e0d9bde768512a493c2e | [
"MIT"
] | null | null | null | photos/views.py | Gideon-Muriithi/photos_gallary | 06534c06582bfb5e38e0e0d9bde768512a493c2e | [
"MIT"
] | 5 | 2020-02-12T03:13:56.000Z | 2021-09-08T01:20:31.000Z | photos/views.py | Gideon-Muriithi/photos_gallary | 06534c06582bfb5e38e0e0d9bde768512a493c2e | [
"MIT"
] | null | null | null | from django.shortcuts import render
from . models import Location, Image, categories
def get_images(request):
images = Image.get_all_images()
locations = Location.objects.all()
context = { "images":images,
"locations":locations
}
return render(request, 'images.html', context)
def get_location (request, location):
locations = Location.objects.all()
chosen_location = Location.objects.get(id=location)
images = Image.objects.filter(image_location=chosen_location.id)
context = {
'location':chosen_location, 'locations':locations, 'images':images
}
return render(request, 'location.html',context)
def seach_results(request):
if 'category' in request.GET and request.GET['category']:
search_term = request.GET.get('category')
searched_images = Image.search_by_category((search_term))
message = f"{search_term}"
context = {"message":message,"images":searched_images,
"category":search_term
}
return render(request, 'search.html', context)
else:
message = "You haven't searched for any category!"
return render(request, 'search.html',{"message":message})
| 34.285714 | 74 | 0.68 | 136 | 1,200 | 5.875 | 0.294118 | 0.060075 | 0.095119 | 0.067584 | 0.072591 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.199167 | 1,200 | 34 | 75 | 35.294118 | 0.831426 | 0 | 0 | 0.068966 | 0 | 0 | 0.155833 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103448 | false | 0 | 0.068966 | 0 | 0.310345 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b38b561a2bf71d935045c435d5ddb3b856bc3af4 | 9,103 | py | Python | tests/processing_components/test_atmospheric_screen.py | SKA-ScienceDataProcessor/rascil | bd3b47f779e18e184781e2928ad1539d1fdc1c9b | [
"Apache-2.0"
] | 7 | 2019-12-14T13:42:33.000Z | 2022-01-28T03:31:45.000Z | tests/processing_components/test_atmospheric_screen.py | SKA-ScienceDataProcessor/rascil | bd3b47f779e18e184781e2928ad1539d1fdc1c9b | [
"Apache-2.0"
] | 6 | 2020-01-08T09:40:08.000Z | 2020-06-11T14:56:13.000Z | tests/processing_components/test_atmospheric_screen.py | SKA-ScienceDataProcessor/rascil | bd3b47f779e18e184781e2928ad1539d1fdc1c9b | [
"Apache-2.0"
] | 3 | 2020-01-14T11:14:16.000Z | 2020-09-15T05:21:06.000Z | """ Unit tests for mpc
"""
import logging
import os
import unittest
import astropy.units as u
import numpy
from astropy.coordinates import SkyCoord
from rascil.data_models.parameters import rascil_path, rascil_data_path
from rascil.data_models.polarisation import PolarisationFrame
from rascil.processing_components import create_image, create_empty_image_like
from rascil.processing_components.image.operations import import_image_from_fits, export_image_to_fits
from rascil.processing_components.imaging.primary_beams import create_low_test_beam
from rascil.processing_components.simulation import create_low_test_skycomponents_from_gleam, \
create_test_skycomponents_from_s3
from rascil.processing_components.simulation import create_named_configuration
from rascil.processing_components.simulation import create_test_image
from rascil.processing_components.simulation.atmospheric_screen import create_gaintable_from_screen, \
grid_gaintable_to_screen, plot_gaintable_on_screen
from rascil.processing_components.skycomponent.operations import apply_beam_to_skycomponent
from rascil.processing_components.skycomponent.operations import filter_skycomponents_by_flux
from rascil.processing_components.visibility.base import create_blockvisibility
log = logging.getLogger('logger')
log.setLevel(logging.WARNING)
class TestAtmosphericScreen(unittest.TestCase):
def setUp(self):
self.persist = os.getenv("RASCIL_PERSIST", False)
self.dir = rascil_path('test_results')
def actualSetup(self, atmosphere="ionosphere"):
dec = -40.0 * u.deg
self.times = numpy.linspace(-10.0, 10.0, 3) * numpy.pi / (3600.0 * 12.0)
self.phasecentre = SkyCoord(ra=+0.0 * u.deg, dec=dec, frame='icrs', equinox='J2000')
if atmosphere == "ionosphere":
self.core = create_named_configuration('LOWBD2', rmax=300.0)
self.frequency = numpy.array([1.0e8])
self.channel_bandwidth = numpy.array([5e7])
self.cellsize = 0.000015
else:
self.core = create_named_configuration('MID', rmax=300.0)
self.frequency = numpy.array([1.36e9])
self.channel_bandwidth = numpy.array([1e8])
self.cellsize = 0.00015
self.vis = create_blockvisibility(self.core, self.times, self.frequency,
channel_bandwidth=self.channel_bandwidth,
phasecentre=self.phasecentre, weight=1.0,
polarisation_frame=PolarisationFrame('stokesI'))
self.vis.data['vis'] *= 0.0
# Create model
self.model = create_image(npixel=512, cellsize=0.000015, polarisation_frame=PolarisationFrame("stokesI"),
frequency=self.frequency, channel_bandwidth=self.channel_bandwidth,
phasecentre=self.phasecentre)
def test_read_screen(self):
screen = import_image_from_fits(rascil_data_path('models/test_mpc_screen.fits'))
assert screen.data.shape == (1, 3, 2000, 2000), screen.data.shape
def test_create_gaintable_from_screen_ionosphere(self):
self.actualSetup("ionosphere")
screen = import_image_from_fits(rascil_data_path('models/test_mpc_screen.fits'))
beam = create_test_image(cellsize=0.0015, phasecentre=self.vis.phasecentre,
frequency=self.frequency)
beam = create_low_test_beam(beam, use_local=False)
gleam_components = \
create_low_test_skycomponents_from_gleam(flux_limit=1.0,
phasecentre=self.phasecentre,
frequency=self.frequency,
polarisation_frame=PolarisationFrame('stokesI'),
radius=0.2)
pb_gleam_components = apply_beam_to_skycomponent(gleam_components, beam)
actual_components = filter_skycomponents_by_flux(pb_gleam_components, flux_min=1.0)
gaintables = create_gaintable_from_screen(self.vis, actual_components, screen)
assert len(gaintables) == len(actual_components), len(gaintables)
assert gaintables[0].gain.shape == (3, 94, 1, 1, 1), gaintables[0].gain.shape
def test_create_gaintable_from_screen_troposphere(self):
self.actualSetup("troposphere")
screen = import_image_from_fits(rascil_data_path('models/test_mpc_screen.fits'))
beam = create_test_image(cellsize=0.00015, phasecentre=self.vis.phasecentre,
frequency=self.frequency)
beam = create_low_test_beam(beam, use_local=False)
s3_components = create_test_skycomponents_from_s3(flux_limit=0.3,
phasecentre=self.phasecentre,
frequency=self.frequency,
polarisation_frame=PolarisationFrame('stokesI'),
radius=1.5 * numpy.pi / 180.0)
assert len(s3_components) > 0, "No S3 components selected"
pb_s3_components = apply_beam_to_skycomponent(s3_components, beam)
actual_components = filter_skycomponents_by_flux(pb_s3_components, flux_max=10.0)
assert len(actual_components) > 0, "No components after applying primary beam"
gaintables = create_gaintable_from_screen(self.vis, actual_components, screen, height=3e3,
type_atmosphere="troposphere")
assert len(gaintables) == len(actual_components), len(gaintables)
assert gaintables[0].gain.shape == (3, 63, 1, 1, 1), gaintables[0].gain.shape
def test_grid_gaintable_to_screen(self):
self.actualSetup()
screen = import_image_from_fits(rascil_data_path('models/test_mpc_screen.fits'))
beam = create_test_image(cellsize=0.0015, phasecentre=self.vis.phasecentre,
frequency=self.frequency)
beam = create_low_test_beam(beam, use_local=False)
gleam_components = create_low_test_skycomponents_from_gleam(flux_limit=1.0,
phasecentre=self.phasecentre,
frequency=self.frequency,
polarisation_frame=PolarisationFrame(
'stokesI'),
radius=0.2)
pb_gleam_components = apply_beam_to_skycomponent(gleam_components, beam)
actual_components = filter_skycomponents_by_flux(pb_gleam_components, flux_min=1.0)
gaintables = create_gaintable_from_screen(self.vis, actual_components, screen)
assert len(gaintables) == len(actual_components), len(gaintables)
assert gaintables[0].gain.shape == (3, 94, 1, 1, 1), gaintables[0].gain.shape
newscreen = create_empty_image_like(screen)
newscreen, weights = grid_gaintable_to_screen(self.vis, gaintables, newscreen)
assert numpy.max(numpy.abs(screen.data)) > 0.0
if self.persist: export_image_to_fits(newscreen, rascil_path('test_results/test_mpc_screen_gridded.fits'))
if self.persist: export_image_to_fits(weights, rascil_path('test_results/test_mpc_screen_gridded_weights.fits'))
def test_plot_gaintable_to_screen(self):
self.actualSetup()
screen = import_image_from_fits(rascil_data_path('models/test_mpc_screen.fits'))
beam = create_test_image(cellsize=0.0015, phasecentre=self.vis.phasecentre,
frequency=self.frequency)
beam = create_low_test_beam(beam, use_local=False)
gleam_components = create_low_test_skycomponents_from_gleam(flux_limit=1.0,
phasecentre=self.phasecentre,
frequency=self.frequency,
polarisation_frame=PolarisationFrame(
'stokesI'),
radius=0.2)
pb_gleam_components = apply_beam_to_skycomponent(gleam_components, beam)
actual_components = filter_skycomponents_by_flux(pb_gleam_components, flux_min=1.0)
gaintables = create_gaintable_from_screen(self.vis, actual_components, screen)
assert len(gaintables) == len(actual_components), len(gaintables)
assert gaintables[0].gain.shape == (3, 94, 1, 1, 1), gaintables[0].gain.shape
plot_gaintable_on_screen(self.vis, gaintables, plotfile=rascil_path(
'test_results/test_plot_gaintable_to_screen.png'))
| 52.618497 | 120 | 0.631989 | 981 | 9,103 | 5.565749 | 0.158002 | 0.038095 | 0.03663 | 0.054945 | 0.65989 | 0.597985 | 0.592674 | 0.520513 | 0.493773 | 0.476557 | 0 | 0.027336 | 0.288696 | 9,103 | 172 | 121 | 52.924419 | 0.815907 | 0.003515 | 0 | 0.414063 | 0 | 0 | 0.05341 | 0.029905 | 0 | 0 | 0 | 0 | 0.09375 | 1 | 0.054688 | false | 0 | 0.179688 | 0 | 0.242188 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b38fdb90a8567c51ecce68bb72dba00c506fbc8a | 6,822 | bzl | Python | tools/build_rules/proto.bzl | bowlofstew/kythe | 23a3524de3924901ffaba4b8bcab8abe96de6f3a | [
"Apache-2.0"
] | 1 | 2021-04-24T08:18:15.000Z | 2021-04-24T08:18:15.000Z | tools/build_rules/proto.bzl | bowlofstew/kythe | 23a3524de3924901ffaba4b8bcab8abe96de6f3a | [
"Apache-2.0"
] | 3 | 2020-12-31T09:08:34.000Z | 2021-09-28T05:42:02.000Z | tools/build_rules/proto.bzl | moul/kythe | 2e198cc818981fc6cffa14d8263fda3a33da6429 | [
"Apache-2.0"
] | null | null | null | load("@//tools:build_rules/go.bzl", "go_library")
standard_proto_path = "third_party/proto/src/"
def go_package_name(go_prefix, label):
return "%s%s/%s" % (go_prefix.go_prefix, label.package, label.name)
def _genproto_impl(ctx):
proto_src_deps = [src.proto_src for src in ctx.attr.deps]
inputs, outputs, arguments = [ctx.file.src] + proto_src_deps, [], ["--proto_path=."]
for src in proto_src_deps:
if src.path.startswith(standard_proto_path):
arguments += ["--proto_path=" + standard_proto_path]
break
if ctx.attr.gen_cc:
outputs += [ctx.outputs.cc_hdr, ctx.outputs.cc_src]
arguments += ["--cpp_out=" + ctx.configuration.genfiles_dir.path]
if ctx.attr.gen_java:
if ctx.outputs.java_src.path.endswith(".srcjar"):
srcjar = ctx.new_file(ctx.outputs.java_src.basename[:-6] + "jar")
else:
srcjar = ctx.outputs.java_src
outputs += [srcjar]
arguments += ["--java_out=" + srcjar.path]
if ctx.attr.has_services:
java_grpc_plugin = ctx.executable._protoc_grpc_plugin_java
inputs += [java_grpc_plugin]
arguments += [
"--plugin=protoc-gen-java_rpc=" + java_grpc_plugin.path,
"--java_rpc_out=" + srcjar.path
]
go_package = go_package_name(ctx.attr._go_package_prefix, ctx.label)
if ctx.attr.gen_go:
outputs += [ctx.outputs.go_src]
go_cfg = ["import_path=" + go_package, _go_import_path(ctx.attr.deps)]
if ctx.attr.has_services:
go_cfg += ["plugins=grpc"]
genfiles_path = ctx.configuration.genfiles_dir.path
if ctx.attr.gofast:
inputs += [ctx.executable._protoc_gen_gofast]
arguments += [
"--plugin=" + ctx.executable._protoc_gen_gofast.path,
"--gofast_out=%s:%s" % (",".join(go_cfg), genfiles_path)
]
else:
inputs += [ctx.executable._protoc_gen_go]
arguments += [
"--plugin=" + ctx.executable._protoc_gen_go.path,
"--golang_out=%s:%s" % (",".join(go_cfg), genfiles_path)
]
ctx.action(
mnemonic = "GenProto",
inputs = inputs,
outputs = outputs,
arguments = arguments + [ctx.file.src.path],
executable = ctx.executable._protoc)
# This is required because protoc only understands .jar extensions, but Bazel
# requires source JAR files end in .srcjar.
if ctx.attr.gen_java and srcjar != ctx.outputs.java_src:
ctx.action(
mnemonic = "FixProtoSrcJar",
inputs = [srcjar],
outputs = [ctx.outputs.java_src],
arguments = [srcjar.path, ctx.outputs.java_src.path],
command = "cp $1 $2")
# Fixup the resulting outputs to keep the source-only .jar out of the result.
outputs += [ctx.outputs.java_src]
outputs = [e for e in outputs if e != srcjar]
return struct(files=set(outputs),
go_package=go_package,
proto_src=ctx.file.src)
_genproto_attrs = {
"src": attr.label(
allow_files = FileType([".proto"]),
single_file = True,
),
"deps": attr.label_list(
allow_files = False,
providers = ["proto_src"],
),
"has_services": attr.bool(),
"gofast": attr.bool(),
"_protoc": attr.label(
default = Label("//third_party/proto:protoc"),
executable = True,
),
"_go_package_prefix": attr.label(
default = Label("//external:go_package_prefix"),
providers = ["go_prefix"],
allow_files = False,
),
"_protoc_gen_go": attr.label(
default = Label("@go_protobuf//:protoc-gen-golang"),
executable = True,
),
"_protoc_gen_gofast": attr.label(
default = Label("@go_gogo_protobuf//:protoc-gen-gofast"),
executable = True,
),
"_protoc_grpc_plugin_java": attr.label(
default = Label("//third_party/grpc-java:plugin"),
executable = True,
),
"gen_cc": attr.bool(),
"gen_java": attr.bool(),
"gen_go": attr.bool(),
}
def _genproto_outputs(attrs):
outputs = {}
if attrs.gen_cc:
outputs += {
"cc_hdr": "%{src}.pb.h",
"cc_src": "%{src}.pb.cc"
}
if attrs.gen_go:
outputs += {
"go_src": "%{src}.pb.go",
}
if attrs.gen_java:
outputs += {
"java_src": "%{src}.srcjar",
}
return outputs
genproto = rule(
_genproto_impl,
attrs = _genproto_attrs,
output_to_genfiles = True,
outputs = _genproto_outputs,
)
def proto_library(name, src=None, deps=[], visibility=None,
has_services=False,
gen_java=False, gen_go=False, gen_cc=False,
gofast=True):
if not src:
if name.endswith("_proto"):
src = name[:-6] + ".proto"
else:
src = name + ".proto"
proto_pkg = genproto(name=name,
src=src,
deps=deps,
has_services=has_services,
gen_java=gen_java,
gen_go=gen_go,
gen_cc=gen_cc,
gofast=gofast)
# TODO(shahms): These should probably not be separate libraries, but
# allowing upstream *_library and *_binary targets to depend on the
# proto_library() directly is a challenge. We'd also need a different
# workaround for the non-generated any.pb.{h,cc} from the upstream protocol
# buffer library.
if gen_java:
java_deps = ["//third_party/proto:protobuf_java"]
if has_services:
java_deps += [
"//external:guava",
"//third_party/grpc-java",
"//third_party/jsr305_annotations:jsr305",
]
for dep in deps:
java_deps += [dep + "_java"]
native.java_library(
name = name + "_java",
srcs = [proto_pkg.label()],
deps = java_deps,
visibility = visibility,
)
if gen_go:
go_deps = ["@go_protobuf//:proto"]
if has_services:
go_deps += [
"@go_x_net//:context",
"@go_grpc//:grpc",
]
for dep in deps:
go_deps += [dep + "_go"]
go_library(
name = name + "_go",
srcs = [proto_pkg.label()],
deps = go_deps,
multi_package = 1,
visibility = visibility,
)
if gen_cc:
cc_deps = ["//third_party/proto:protobuf"]
for dep in deps:
cc_deps += [dep + "_cc"]
native.cc_library(
name = name + "_cc",
visibility = visibility,
hdrs = [proto_pkg.label()],
srcs = [proto_pkg.label()],
defines = ["GOOGLE_PROTOBUF_NO_RTTI"],
deps = cc_deps,
)
def _go_import_path(deps):
import_map = {}
for dep in deps:
if dep.proto_src.path.startswith(standard_proto_path):
import_map += {dep.proto_src.path[len(standard_proto_path):]: dep.go_package}
else:
import_map += {dep.proto_src.path: dep.go_package}
return ",".join(["M%s=%s" % i for i in import_map.items()])
| 31.730233 | 86 | 0.598358 | 856 | 6,822 | 4.508178 | 0.191589 | 0.022804 | 0.029023 | 0.030837 | 0.210158 | 0.099508 | 0.034206 | 0.034206 | 0 | 0 | 0 | 0.002184 | 0.261653 | 6,822 | 214 | 87 | 31.878505 | 0.763947 | 0.071094 | 0 | 0.203125 | 0 | 0 | 0.14507 | 0.063369 | 0 | 0 | 0 | 0.004673 | 0 | 1 | 0.026042 | false | 0 | 0.03125 | 0.005208 | 0.078125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b391c9147986afbcf63eec9ecaef07d00a11bbee | 1,128 | py | Python | examples/nsw-qld.py | ejwilson3/NchooseK | 1e985fb47808283fbb579609b2467be3f5a4a29e | [
"BSD-3-Clause"
] | null | null | null | examples/nsw-qld.py | ejwilson3/NchooseK | 1e985fb47808283fbb579609b2467be3f5a4a29e | [
"BSD-3-Clause"
] | 1 | 2021-07-19T20:41:06.000Z | 2021-07-19T20:41:06.000Z | examples/nsw-qld.py | ejwilson3/NchooseK | 1e985fb47808283fbb579609b2467be3f5a4a29e | [
"BSD-3-Clause"
] | 1 | 2021-07-14T17:21:17.000Z | 2021-07-14T17:21:17.000Z | #! /usr/bin/env python
###################################
# Test NchooseK on a two-region #
# map-coloring problem #
# #
# By Scott Pakin <pakin@lanl.gov> #
###################################
import nchoosek
# Define a type for "exactly one color".
env = nchoosek.Environment()
OneColor = env.new_type('one_color', 'RGBY',
nchoosek.Constraint('RGBY', {1}))
NotBothTrue = env.new_type('not_both_true', 'AB',
nchoosek.Constraint('AB', {0, 1}))
# Define all colors in all regions.
qld = [env.register_port('qld.' + c) for c in 'RGBY']
nsw = [env.register_port('nsw.' + c) for c in 'RGBY']
# Establish constraints.
qld_color = OneColor(qld)
nsw_color = OneColor(nsw)
for i in range(len(qld)):
NotBothTrue([qld[i], nsw[i]])
# Output the environment.
print('Ports:')
print(' ', env.ports())
print('')
print('Constraints:')
for c in set(env.constraints()):
print(' ', c)
print('')
# Solve for all variables in the environment.
result = env.solve()
for k, v in sorted(result.items()):
print('%-16s %s' % (k, v))
| 26.857143 | 61 | 0.560284 | 144 | 1,128 | 4.326389 | 0.451389 | 0.019262 | 0.028892 | 0.022472 | 0.035313 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005701 | 0.222518 | 1,128 | 41 | 62 | 27.512195 | 0.704675 | 0.281028 | 0 | 0.090909 | 0 | 0 | 0.116919 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.045455 | 0 | 0.045455 | 0.318182 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3934e37cd454e67d99a3d0f67cc435385b6aa5b | 7,902 | py | Python | googledrive_cloner/tests/mock_service.py | martinmCGG/googledrivecloner | b7d9592ef0bed34b567dfdf5f306b057edd2a0f3 | [
"MIT"
] | 5 | 2022-02-01T01:07:17.000Z | 2022-03-29T19:23:52.000Z | googledrive_cloner/tests/mock_service.py | martinmCGG/googledrivecloner | b7d9592ef0bed34b567dfdf5f306b057edd2a0f3 | [
"MIT"
] | 1 | 2022-03-29T19:23:25.000Z | 2022-03-29T19:23:25.000Z | googledrive_cloner/tests/mock_service.py | martinmCGG/googledrivecloner | b7d9592ef0bed34b567dfdf5f306b057edd2a0f3 | [
"MIT"
] | 1 | 2022-03-29T14:07:36.000Z | 2022-03-29T14:07:36.000Z | import copy
from typing import Dict, List, Optional, Set, Union
from unittest.mock import Mock
from uuid import uuid4
class File:
"""
Mock File class representing a GoogleDrive File with basic information
file_id, name, parents (list of parent ids), mimeType
"""
def __init__(
self,
file_id: str,
name: str = "",
parents: List[str] = None,
mimeType: str = "mime",
):
self.id = file_id
self.name = name
self.parents = parents or list()
self.mimeType = mimeType
def __eq__(self, other: "File") -> bool:
return (
self.name == other.name
and self.parents == other.parents
and self.mimeType == other.mimeType
)
def __repr__(self) -> str:
return f"mimeType: {self.mimeType}, name:{self.name}, parents: {self.parents}"
def copy(self, new_id: str) -> "File":
"""
Make a copy of this file, under a different id
:param new_id: (str) of new file id
:return: (File)
"""
return File(
file_id=new_id,
name=self.name,
parents=copy.copy(self.parents),
mimeType=self.mimeType,
)
class Node:
"""Node in the file directory tree"""
def __init__(self, file_id: str, parent_node: "Node" = None):
self.file_id = file_id
self.parent_node = parent_node
self.children: Set["Node"] = set()
class Tree:
"""Mocked version of a file directory tree"""
def __init__(self):
self.root = Node(file_id="root")
self.nodes = {"root": self.root}
def link_parent(self, child_node: Node, parent_id: str) -> None:
"""
Link child node to parent id, creating a parent if it doesn't exist
:param child_node: (Node)
:param parent_id: (str)
:return: (None) updates tree
"""
parent_id = parent_id or "root"
# Create parent if it doesn't exist
if parent_id not in self.nodes:
self.nodes[parent_id] = Node(file_id=parent_id)
# Add parent to child
self.nodes[child_node.file_id].parent_node = self.nodes[parent_id]
# Add child to parent's children
self.nodes[parent_id].children.add(self.nodes[child_node.file_id])
def add(self, file_id: str, parent_id: str) -> None:
"""
Add a Node with a given file_id (if it doesn't exist) and link to parent_id
:param file_id: (str)
:param parent_id: (str)
:return: (None)
"""
if file_id not in self.nodes:
self.nodes[file_id] = Node(file_id=file_id)
child_node = self.nodes[file_id]
self.link_parent(child_node, parent_id)
def print_node(self, node: Node) -> list:
"""
Get a list representation of the node,
[node.file_id, [children nodes]]
:param node: (Node)
:return: (list)
"""
children = list()
for child_node in node.children:
if not child_node.children:
children.append(child_node.file_id)
else:
children.append(self.print_node(child_node))
return [node.file_id, list(sorted(children, key=lambda child: str(child)))]
def print(self) -> list:
"""
Get a list representation of the whole file structure,
starting with the root node
:return: (list)
"""
return self.print_node(self.nodes["root"])
def return_execute(func: callable):
"""
Transform function to return a Mock so that the function only runs when `execute`
is called
i.e.
@return_execute
def foo():
return 1
foo().execute() -> 1
:param func: (callable) to decorate
:return: (callable) which returns a Mock with an `execute` method
"""
def inner(*a, **k):
mock = Mock()
mock.execute.side_effect = lambda *args, **kwargs: func(*a, **k)
return mock
return inner
class MockService:
def __init__(
self,
):
"""
Mock Google Drive Files Service
"""
self.list_mock = Mock()
self.get_mock = Mock()
self.next_tokens = list()
# file to parent
self.files: Dict[str, File] = dict()
@return_execute
def create(self, body, *a, **k) -> dict:
file_id = str(uuid4())
new_file = File(
file_id=file_id,
parents=body["parents"],
mimeType=body["mimeType"],
name=body["name"],
)
self.files[file_id] = new_file
return {"id": file_id}
@return_execute
def copy(self, fileId: str) -> dict:
old_file = self.files[fileId]
new_id = str(uuid4())
new_file = old_file.copy(new_id)
new_file.id = new_id
self.files[new_id] = new_file
return {"id": new_id}
@return_execute
def delete(self, fileId: str) -> dict:
del self.files[fileId]
return {"id": fileId}
@return_execute
def update(self, **kwargs):
file_id = kwargs["fileId"]
file = self.files[file_id]
file.parents.remove(kwargs["removeParents"])
file.parents.append(kwargs["addParents"])
file.name = kwargs["body"]["name"]
self.files[file_id] = file
return {"id": file_id}
def _get(
self,
fileId: str,
fields: str = "mimeType,name,parents",
single_parent: bool = False,
) -> Dict[str, Optional[Union[str, list]]]:
"""
Get a representation of the file as a dict,
returning any extra fields with dummy values
Also cleans parent field if requested to only return an id, not list
:param fileId: (str)
:param fields: (str) to get, comma separated
:param single_parent: (bool) only return the single parent id
:return: (dict)
"""
file = self.files[fileId]
resp: Dict[str, Optional[Union[str, list]]] = {
**{field: f"{field}_value" for field in fields.split(",")},
**{
"id": fileId,
"parents": file.parents,
"mimeType": file.mimeType,
"name": file.name,
},
}
if single_parent:
if file.parents:
resp["parent"] = file.parents[0]
else:
resp["parent"] = None
del resp["parents"]
return resp
@return_execute
def get(self, fileId: str, fields: str = "mimeType,name,parents", *args, **kwargs):
"""Also logs to the Mock -> self.get_mock for analysis of passed kwargs"""
self.get_mock(fileId=fileId, fields=fields, *args, **kwargs)
return self._get(fileId, fields)
@return_execute
def list(self, *args, **kwargs):
"""Also logs to the Mock -> self.list_mock for analysis of passed kwargs"""
self.list_mock(*args, **kwargs)
resp = {
"files": [
self._get(file_id, fields="name,parents,mimeType")
for file_id in self.files
]
}
if self.next_tokens:
resp["nextPageToken"] = self.next_tokens.pop()
return resp
def _add_file(self, file: File) -> File:
"""
Add a file to the file store
:param file: (File) to add
:return: (File)
"""
self.files[file.id] = file
return file
@property
def file_structure(self) -> Tree:
"""
Get a tree representation of the current file structure
:return: (Tree)
"""
tree = Tree()
for file_id in sorted(self.files):
file = self.files[file_id]
parent_id = file.parents[0] if file.parents else None
tree.add(file_id, parent_id)
return tree
| 29.266667 | 87 | 0.559226 | 986 | 7,902 | 4.342799 | 0.157201 | 0.054647 | 0.018683 | 0.017515 | 0.201308 | 0.168846 | 0.078001 | 0.036432 | 0.021952 | 0.021952 | 0 | 0.001314 | 0.325867 | 7,902 | 269 | 88 | 29.375465 | 0.802515 | 0.223235 | 0 | 0.133758 | 0 | 0 | 0.053486 | 0.011121 | 0 | 0 | 0 | 0 | 0 | 1 | 0.140127 | false | 0 | 0.025478 | 0.012739 | 0.292994 | 0.025478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b393a832ea806e708dd3e9d64cfaf7e065346907 | 43,854 | py | Python | astrocats/catalog/entry.py | Astrocats-Cataclysmic-Variable-Catalog/Cataclysmic-Varible-Astrocats | 7835595a4df5bf463bf11412f65b4f8671ae8dfc | [
"MIT"
] | null | null | null | astrocats/catalog/entry.py | Astrocats-Cataclysmic-Variable-Catalog/Cataclysmic-Varible-Astrocats | 7835595a4df5bf463bf11412f65b4f8671ae8dfc | [
"MIT"
] | null | null | null | astrocats/catalog/entry.py | Astrocats-Cataclysmic-Variable-Catalog/Cataclysmic-Varible-Astrocats | 7835595a4df5bf463bf11412f65b4f8671ae8dfc | [
"MIT"
] | null | null | null | """Definitions related to the `Entry` class for catalog entries."""
import codecs
import gzip as gz
import hashlib
import json
import logging
import os
import sys
from collections import OrderedDict
from copy import deepcopy
from decimal import Decimal
from astrocats.catalog.catdict import CatDict, CatDictError
from astrocats.catalog.error import ERROR, Error
from astrocats.catalog.key import KEY_TYPES, Key, KeyCollection
from astrocats.catalog.model import MODEL, Model
from astrocats.catalog.photometry import PHOTOMETRY, Photometry
from astrocats.catalog.quantity import QUANTITY, Quantity
from astrocats.catalog.source import SOURCE, Source
from astrocats.catalog.spectrum import SPECTRUM, Spectrum
from astrocats.catalog.utils import (alias_priority, dict_to_pretty_string,
is_integer, is_number, listify)
from past.builtins import basestring
from six import string_types
class ENTRY(KeyCollection):
"""General `CatDict` keys which should be relevant for all catalogs."""
# Constants for use in key definitions
_DIST_PREF_KINDS = [
'heliocentric', 'cmb', 'spectroscopic', 'photometric', 'host',
'cluster'
]
_HOST_DIST_PREF_KINDS = [
'heliocentric', 'cmb', 'spectroscopic', 'photometric', 'host',
'cluster'
]
# List of keys
ALIAS = Key('alias', KEY_TYPES.STRING)
COMOVING_DIST = Key('comovingdist',
KEY_TYPES.NUMERIC,
kind_preference=_DIST_PREF_KINDS,
replace_better=True)
DEC = Key('dec', KEY_TYPES.STRING)
DISCOVER_DATE = Key('discoverdate', KEY_TYPES.STRING, replace_better=True)
DISCOVERER = Key('discoverer', KEY_TYPES.STRING)
DISTINCT_FROM = Key('distinctfrom', KEY_TYPES.STRING)
EBV = Key('ebv', KEY_TYPES.NUMERIC, replace_better=True)
AV_CIRCUM = Key('avcircum', KEY_TYPES.NUMERIC, replace_better=True)
ERRORS = Key('errors', no_source=True)
HOST = Key('host', KEY_TYPES.STRING)
HOST_DEC = Key('hostdec', KEY_TYPES.STRING)
HOST_OFFSET_ANG = Key('hostoffsetang', KEY_TYPES.NUMERIC)
HOST_OFFSET_DIST = Key('hostoffsetdist', KEY_TYPES.NUMERIC)
HOST_RA = Key('hostra', KEY_TYPES.STRING)
HOST_REDSHIFT = Key('hostredshift',
KEY_TYPES.NUMERIC,
kind_preference=_HOST_DIST_PREF_KINDS,
replace_better=True)
HOST_VELOCITY = Key('hostvelocity',
KEY_TYPES.NUMERIC,
kind_preference=_HOST_DIST_PREF_KINDS,
replace_better=True)
HOST_LUM_DIST = Key('hostlumdist',
KEY_TYPES.NUMERIC,
kind_preference=_HOST_DIST_PREF_KINDS,
replace_better=True)
HOST_COMOVING_DIST = Key('hostcomovingdist',
KEY_TYPES.NUMERIC,
kind_preference=_HOST_DIST_PREF_KINDS,
replace_better=True)
LUM_DIST = Key('lumdist',
KEY_TYPES.NUMERIC,
kind_preference=_DIST_PREF_KINDS,
replace_better=True)
MAX_ABS_MAG = Key('maxabsmag', KEY_TYPES.NUMERIC)
MAX_APP_MAG = Key('maxappmag', KEY_TYPES.NUMERIC)
MAX_BAND = Key('maxband', KEY_TYPES.STRING)
MAX_DATE = Key('maxdate', KEY_TYPES.STRING, replace_better=True)
MODELS = Key('models')
NAME = Key('name', KEY_TYPES.STRING, no_source=True)
PHOTOMETRY = Key('photometry')
RA = Key('ra', KEY_TYPES.STRING)
REDSHIFT = Key('redshift',
KEY_TYPES.NUMERIC,
kind_preference=_DIST_PREF_KINDS,
replace_better=True)
SCHEMA = Key('schema', no_source=True)
SOURCES = Key('sources', no_source=True)
SPECTRA = Key('spectra')
VELOCITY = Key('velocity',
KEY_TYPES.NUMERIC,
kind_preference=_DIST_PREF_KINDS,
replace_better=True)
class Entry(OrderedDict):
"""Class representing an individual element of each Catalog.
For example, a single supernova in the supernova catalog, this object
handles and manages the addition of data for this `Entry`, using different
`CatDict` instances (e.g. `Photometry`).
Notes
-----
- Stubs: a stub is the most minimal entry, containing an entry's 'name'
and possible aliases. These instances are used to represent entries
which are known to exist (e.g. have already been saved) for cross
referencing and duplicate removal.
+ The `Entry.get_stub` method returns the 'stub' corresponding to the
Entry instance. i.e. it returns a *new object* with only the name
and aliases copied over.
Attributes
----------
catalog : `astrocats.catalog.catalog.Catalog` object
Pointer to the parent catalog object of which this entry is a member.
filename : str or 'None'
If this entry is loaded from a file, its (full path and) filename.
_log : `logging.Logger` object
Pointer to the logger from the parent catalog.
_stub : bool
Whether this instance represents a 'stub' (see above).
_KEYS : `astrocats.catalog.key.KeyCollection` object
The associated object which contains the different dictionary keys
used in this type (e.g. `Supernova`) entry.
"""
_KEYS = ENTRY
def __init__(self, catalog=None, name=None, stub=False):
"""Create a new `Entry` object with the given `name`.
Arguments
---------
catalog : `astrocats.catalog.catalog.Catalog` instance
The parent catalog object of which this entry belongs.
name : str
The name of this entry, e.g. `SN1987A` for a `Supernova` entry.
stub : bool
Whether or not this instance represents a 'stub' (see above).
"""
super(Entry, self).__init__()
self.catalog = catalog
self.filename = None
self.dupe_of = []
self._stub = stub
if catalog:
self._log = catalog.log
else:
from astrocats.catalog.catalog import Catalog
self._log = logging.getLogger()
self.catalog = Catalog(None, self._log)
self[self._KEYS.NAME] = name
return
def __repr__(self):
"""Return JSON representation of self."""
jsonstring = dict_to_pretty_string({ENTRY.NAME: self})
return jsonstring
def _append_additional_tags(self, quantity, source, cat_dict):
"""Append additional bits of data to an existing quantity.
Called when a newly added quantity is found to be a duplicate.
"""
pass
def _get_save_path(self, bury=False):
"""Return the path that this Entry should be saved to."""
filename = self.get_filename(self[self._KEYS.NAME])
# Put objects that shouldn't belong in this catalog in the boneyard
if bury:
outdir = self.catalog.get_repo_boneyard()
# Get normal repository save directory
else:
repo_folders = self.catalog.PATHS.get_repo_output_folders()
# If no repo folders exist, raise an error -- cannot save
if not len(repo_folders):
err_str = (
"No output data repositories found. Cannot save.\n"
"Make sure that repo names are correctly configured "
"in the `input/repos.json` file, and either manually or "
"automatically (using `astrocats CATALOG git-clone`) "
"clone the appropriate data repositories.")
self.catalog.log.error(err_str)
raise RuntimeError(err_str)
outdir = repo_folders[0]
return outdir, filename
def _ordered(self, odict):
"""Convert the object into a plain OrderedDict."""
ndict = OrderedDict()
if isinstance(odict, CatDict) or isinstance(odict, Entry):
key = odict.sort_func
else:
key = None
nkeys = list(sorted(odict.keys(), key=key))
for key in nkeys:
if isinstance(odict[key], OrderedDict):
odict[key] = self._ordered(odict[key])
if isinstance(odict[key], list):
if (not (odict[key] and
not isinstance(odict[key][0], OrderedDict))):
nlist = []
for item in odict[key]:
if isinstance(item, OrderedDict):
nlist.append(self._ordered(item))
else:
nlist.append(item)
odict[key] = nlist
ndict[key] = odict[key]
return ndict
def get_hash(self, keys=[]):
"""Return a unique hash associated with the listed keys."""
if not len(keys):
keys = list(self.keys())
string_rep = ''
oself = self._ordered(deepcopy(self))
for key in keys:
string_rep += json.dumps(oself.get(key, ''), sort_keys=True)
return hashlib.sha512(string_rep.encode()).hexdigest()[:16]
def _clean_quantity(self, quantity):
"""Clean quantity value before it is added to entry."""
value = quantity.get(QUANTITY.VALUE, '').strip()
error = quantity.get(QUANTITY.E_VALUE, '').strip()
unit = quantity.get(QUANTITY.U_VALUE, '').strip()
kind = quantity.get(QUANTITY.KIND, '')
if isinstance(kind, list) and not isinstance(kind, string_types):
kind = [x.strip() for x in kind]
else:
kind = kind.strip()
if not value:
return False
if is_number(value):
value = '%g' % Decimal(value)
if error:
error = '%g' % Decimal(error)
if value:
quantity[QUANTITY.VALUE] = value
if error:
quantity[QUANTITY.E_VALUE] = error
if unit:
quantity[QUANTITY.U_VALUE] = unit
if kind:
quantity[QUANTITY.KIND] = kind
return True
def __deepcopy__(self, memo):
"""Define how an `Entry` should be deep copied."""
new_entry = self.__class__(self.catalog)
for key in self:
if not key.startswith('__') and key != 'catalog':
new_entry[key] = deepcopy(self[key])
return new_entry
def _load_data_from_json(self,
fhand,
clean=False,
merge=True,
pop_schema=True,
ignore_keys=[],
compare_to_existing=True,
gzip=False,
filter_on={}):
# FIX: check for overwrite??"""
self._log.debug("_load_data_from_json(): {}\n\t{}".format(self.name(),
fhand))
# Store the filename this was loaded from
self.filename = fhand
if gzip:
jfil = gz.open(fhand, 'rb')
else:
jfil = open(fhand, 'r')
data = json.load(jfil, object_pairs_hook=OrderedDict)
name = list(data.keys())
if len(name) != 1:
err = "json file '{}' has multiple keys: {}".format(fhand,
list(name))
self._log.error(err)
raise ValueError(err)
name = name[0]
# Remove the outmost dict level
data = data[name]
self._log.debug("Name: {}".format(name))
# Delete ignored keys
for key in ignore_keys:
if key in data:
del data[key]
# Convert the OrderedDict data from json into class structure i.e.
# `Sources` will be extracted and created from the dict Everything
# that remains afterwards should be okay to just store to this
# `Entry`
self._convert_odict_to_classes(
data,
clean=clean,
merge=merge,
pop_schema=pop_schema,
compare_to_existing=compare_to_existing,
filter_on=filter_on)
if len(data):
err_str = ("Remaining entries in `data` after "
"`_convert_odict_to_classes`.")
err_str += "\n{}".format(dict_to_pretty_string(data))
self._log.error(err_str)
raise RuntimeError(err_str)
jfil.close()
# If object doesnt have a name yet, but json does, store it
self_name = self[ENTRY.NAME]
if len(self_name) == 0:
self[ENTRY.NAME] = name
# Warn if there is a name mismatch
elif self_name.lower().strip() != name.lower().strip():
self._log.warning("Object name '{}' does not match name in json:"
"'{}'".format(self_name, name))
self.check()
return
def _convert_odict_to_classes(self,
data,
clean=False,
merge=True,
pop_schema=True,
compare_to_existing=True,
filter_on={}):
"""Convert `OrderedDict` into `Entry` or its derivative classes."""
self._log.debug("_convert_odict_to_classes(): {}".format(self.name()))
self._log.debug("This should be a temporary fix. Dont be lazy.")
# Setup filters. Currently only used for photometry.
fkeys = list(filter_on.keys())
# Handle 'name'
name_key = self._KEYS.NAME
if name_key in data:
self[name_key] = data.pop(name_key)
# Handle 'schema'
schema_key = self._KEYS.SCHEMA
if schema_key in data:
# Schema should be re-added every execution (done elsewhere) so
# just delete the old entry
if pop_schema:
data.pop(schema_key)
else:
self[schema_key] = data.pop(schema_key)
# Cleanup 'internal' repository stuff
if clean:
# Add data to `self` in ways accomodating 'internal' formats and
# leeway. Removes each added entry from `data` so the remaining
# stuff can be handled normally
data = self.clean_internal(data)
# Handle 'sources'
# ----------------
src_key = self._KEYS.SOURCES
if src_key in data:
# Remove from `data`
sources = data.pop(src_key)
self._log.debug("Found {} '{}' entries".format(
len(sources), src_key))
self._log.debug("{}: {}".format(src_key, sources))
for src in sources:
self.add_source(allow_alias=True, **src)
# Handle `photometry`
# -------------------
photo_key = self._KEYS.PHOTOMETRY
if photo_key in data:
photoms = data.pop(photo_key)
self._log.debug("Found {} '{}' entries".format(
len(photoms), photo_key))
phcount = 0
for photo in photoms:
skip = False
for fkey in fkeys:
if fkey in photo and photo[fkey] not in filter_on[fkey]:
skip = True
if skip:
continue
self._add_cat_dict(
Photometry,
self._KEYS.PHOTOMETRY,
compare_to_existing=compare_to_existing,
**photo)
phcount += 1
self._log.debug("Added {} '{}' entries".format(
phcount, photo_key))
# Handle `spectra`
# ---------------
spec_key = self._KEYS.SPECTRA
if spec_key in data:
# When we are cleaning internal data, we don't always want to
# require all of the normal spectrum data elements.
spectra = data.pop(spec_key)
self._log.debug("Found {} '{}' entries".format(
len(spectra), spec_key))
for spec in spectra:
self._add_cat_dict(
Spectrum,
self._KEYS.SPECTRA,
compare_to_existing=compare_to_existing,
**spec)
# Handle `error`
# --------------
err_key = self._KEYS.ERRORS
if err_key in data:
errors = data.pop(err_key)
self._log.debug("Found {} '{}' entries".format(
len(errors), err_key))
for err in errors:
self._add_cat_dict(Error, self._KEYS.ERRORS, **err)
# Handle `models`
# ---------------
model_key = self._KEYS.MODELS
if model_key in data:
# When we are cleaning internal data, we don't always want to
# require all of the normal spectrum data elements.
model = data.pop(model_key)
self._log.debug("Found {} '{}' entries".format(
len(model), model_key))
for mod in model:
self._add_cat_dict(
Model,
self._KEYS.MODELS,
compare_to_existing=compare_to_existing,
**mod)
# Handle everything else --- should be `Quantity`s
# ------------------------------------------------
if len(data):
self._log.debug("{} remaining entries, assuming `Quantity`".format(
len(data)))
# Iterate over remaining keys
for key in list(data.keys()):
vals = data.pop(key)
# All quantities should be in lists of that quantity
# E.g. `aliases` is a list of alias quantities
if not isinstance(vals, list):
vals = [vals]
self._log.debug("{}: {}".format(key, vals))
for vv in vals:
self._add_cat_dict(
Quantity,
key,
check_for_dupes=merge,
compare_to_existing=compare_to_existing,
**vv)
if merge and self.dupe_of:
self.merge_dupes()
return
def _check_cat_dict_source(self, cat_dict_class, key_in_self, **kwargs):
"""Check that a source exists and that a quantity isn't erroneous."""
# Make sure that a source is given
source = kwargs.get(cat_dict_class._KEYS.SOURCE, None)
if source is None:
raise CatDictError(
"{}: `source` must be provided!".format(self[self._KEYS.NAME]),
warn=True)
# Check that source is a list of integers
for x in source.split(','):
if not is_integer(x):
raise CatDictError(
"{}: `source` is comma-delimited list of "
" integers!".format(self[self._KEYS.NAME]),
warn=True)
# If this source/data is erroneous, skip it
if self.is_erroneous(key_in_self, source):
self._log.info("This source is erroneous, skipping")
return None
# If this source/data is private, skip it
if (self.catalog.args is not None and not self.catalog.args.private and
self.is_private(key_in_self, source)):
self._log.info("This source is private, skipping")
return None
return source
def _init_cat_dict(self, cat_dict_class, key_in_self, **kwargs):
"""Initialize a CatDict object, checking for errors."""
# Catch errors associated with crappy, but not unexpected data
try:
new_entry = cat_dict_class(self, key=key_in_self, **kwargs)
except CatDictError as err:
if err.warn:
self._log.info("'{}' Not adding '{}': '{}'".format(self[
self._KEYS.NAME], key_in_self, str(err)))
return None
return new_entry
def _add_cat_dict(self,
cat_dict_class,
key_in_self,
check_for_dupes=True,
compare_to_existing=True,
**kwargs):
"""Add a `CatDict` to this `Entry`.
CatDict only added if initialization succeeds and it
doesn't already exist within the Entry.
"""
# Make sure that a source is given, and is valid (nor erroneous)
if cat_dict_class != Error:
try:
source = self._check_cat_dict_source(cat_dict_class,
key_in_self, **kwargs)
except CatDictError as err:
if err.warn:
self._log.info("'{}' Not adding '{}': '{}'".format(self[
self._KEYS.NAME], key_in_self, str(err)))
return False
if source is None:
return False
# Try to create a new instance of this subclass of `CatDict`
new_entry = self._init_cat_dict(cat_dict_class, key_in_self, **kwargs)
if new_entry is None:
return False
# Compare this new entry with all previous entries to make sure is new
if compare_to_existing and cat_dict_class != Error:
for item in self.get(key_in_self, []):
if new_entry.is_duplicate_of(item):
item.append_sources_from(new_entry)
# Return the entry in case we want to use any additional
# tags to augment the old entry
return new_entry
# If this is an alias, add it to the parent catalog's reverse
# dictionary linking aliases to names for fast lookup.
if key_in_self == self._KEYS.ALIAS:
# Check if this adding this alias makes us a dupe, if so mark
# ourselves as a dupe.
if (check_for_dupes and 'aliases' in dir(self.catalog) and
new_entry[QUANTITY.VALUE] in self.catalog.aliases):
possible_dupe = self.catalog.aliases[new_entry[QUANTITY.VALUE]]
# print(possible_dupe)
if (possible_dupe != self[self._KEYS.NAME] and
possible_dupe in self.catalog.entries):
self.dupe_of.append(possible_dupe)
if 'aliases' in dir(self.catalog):
self.catalog.aliases[new_entry[QUANTITY.VALUE]] = self[
self._KEYS.NAME]
self.setdefault(key_in_self, []).append(new_entry)
if (key_in_self == self._KEYS.ALIAS and check_for_dupes and
self.dupe_of):
self.merge_dupes()
return True
@classmethod
def get_filename(cls, name):
"""Convert from an `Entry` name into an appropriate filename."""
fname = name.replace('/', '_')
return fname
@classmethod
def init_from_file(cls,
catalog,
name=None,
path=None,
clean=False,
merge=True,
pop_schema=True,
ignore_keys=[],
compare_to_existing=True,
try_gzip=False,
filter_on={}):
"""Construct a new `Entry` instance from an input file.
The input file can be given explicitly by `path`, or a path will
be constructed appropriately if possible.
Arguments
---------
catalog : `astrocats.catalog.catalog.Catalog` instance
The parent catalog object of which this entry belongs.
name : str or 'None'
The name of this entry, e.g. `SN1987A` for a `Supernova` entry.
If no `path` is given, a path is constructed by trying to find
a file in one of the 'output' repositories with this `name`.
note: either `name` or `path` must be provided.
path : str or 'None'
The absolutely path of the input file.
note: either `name` or `path` must be provided.
clean : bool
Whether special sanitization processing should be done on the input
data. This is mostly for input files from the 'internal'
repositories.
"""
if not catalog:
from astrocats.catalog.catalog import Catalog
log = logging.getLogger()
catalog = Catalog(None, log)
catalog.log.debug("init_from_file()")
if name is None and path is None:
err = ("Either entry `name` or `path` must be specified to load "
"entry.")
log.error(err)
raise ValueError(err)
# If the path is given, use that to load from
load_path = ''
if path is not None:
load_path = path
name = ''
# If the name is given, try to find a path for it
else:
repo_paths = catalog.PATHS.get_repo_output_folders()
for rep in repo_paths:
filename = cls.get_filename(name)
newpath = os.path.join(rep, filename + '.json')
if os.path.isfile(newpath):
load_path = newpath
break
if load_path is None or not os.path.isfile(load_path):
# FIX: is this warning worthy?
return None
# Create a new `Entry` instance
new_entry = cls(catalog, name)
# Check if .gz file
if try_gzip and not load_path.endswith('.gz'):
try_gzip = False
# Fill it with data from json file
new_entry._load_data_from_json(
load_path,
clean=clean,
merge=merge,
pop_schema=pop_schema,
ignore_keys=ignore_keys,
compare_to_existing=compare_to_existing,
gzip=try_gzip,
filter_on=filter_on)
return new_entry
def add_alias(self, alias, source, clean=True):
"""Add an alias, optionally 'cleaning' the alias string.
Calls the parent `catalog` method `clean_entry_name` - to apply the
same name-cleaning as is applied to entry names themselves.
Returns
-------
alias : str
The stored version of the alias (cleaned or not).
"""
if clean:
alias = self.catalog.clean_entry_name(alias)
self.add_quantity(self._KEYS.ALIAS, alias, source)
return alias
def add_error(self, value, **kwargs):
"""Add an `Error` instance to this entry."""
kwargs.update({ERROR.VALUE: value})
self._add_cat_dict(Error, self._KEYS.ERRORS, **kwargs)
return
def add_photometry(self, compare_to_existing=True, **kwargs):
"""Add a `Photometry` instance to this entry."""
self._add_cat_dict(
Photometry,
self._KEYS.PHOTOMETRY,
compare_to_existing=compare_to_existing,
**kwargs)
return
def merge_dupes(self):
"""Merge two entries that correspond to the same entry."""
for dupe in self.dupe_of:
if dupe in self.catalog.entries:
if self.catalog.entries[dupe]._stub:
# merge = False to avoid infinite recursion
self.catalog.load_entry_from_name(
dupe, delete=True, merge=False)
self.catalog.copy_entry_to_entry(self.catalog.entries[dupe],
self)
del self.catalog.entries[dupe]
self.dupe_of = []
def add_quantity(self,
quantities,
value,
source,
check_for_dupes=True,
compare_to_existing=True,
**kwargs):
"""Add an `Quantity` instance to this entry."""
success = True
for quantity in listify(quantities):
kwargs.update({QUANTITY.VALUE: value, QUANTITY.SOURCE: source})
cat_dict = self._add_cat_dict(
Quantity,
quantity,
compare_to_existing=compare_to_existing,
check_for_dupes=check_for_dupes,
**kwargs)
if isinstance(cat_dict, CatDict):
self._append_additional_tags(quantity, source, cat_dict)
success = False
return success
def add_self_source(self):
"""Add a source that refers to the catalog itself.
For now this points to the Open Supernova Catalog by default.
"""
return self.add_source(
bibcode=self.catalog.OSC_BIBCODE,
name=self.catalog.OSC_NAME,
url=self.catalog.OSC_URL,
secondary=True)
def add_source(self, allow_alias=False, **kwargs):
"""Add a `Source` instance to this entry."""
if not allow_alias and SOURCE.ALIAS in kwargs:
err_str = "`{}` passed in kwargs, this shouldn't happen!".format(
SOURCE.ALIAS)
self._log.error(err_str)
raise RuntimeError(err_str)
# Set alias number to be +1 of current number of sources
if SOURCE.ALIAS not in kwargs:
kwargs[SOURCE.ALIAS] = str(self.num_sources() + 1)
source_obj = self._init_cat_dict(Source, self._KEYS.SOURCES, **kwargs)
if source_obj is None:
return None
for item in self.get(self._KEYS.SOURCES, ''):
if source_obj.is_duplicate_of(item):
return item[item._KEYS.ALIAS]
self.setdefault(self._KEYS.SOURCES, []).append(source_obj)
return source_obj[source_obj._KEYS.ALIAS]
def add_model(self, allow_alias=False, **kwargs):
"""Add a `Model` instance to this entry."""
if not allow_alias and MODEL.ALIAS in kwargs:
err_str = "`{}` passed in kwargs, this shouldn't happen!".format(
SOURCE.ALIAS)
self._log.error(err_str)
raise RuntimeError(err_str)
# Set alias number to be +1 of current number of models
if MODEL.ALIAS not in kwargs:
kwargs[MODEL.ALIAS] = str(self.num_models() + 1)
model_obj = self._init_cat_dict(Model, self._KEYS.MODELS, **kwargs)
if model_obj is None:
return None
for item in self.get(self._KEYS.MODELS, ''):
if model_obj.is_duplicate_of(item):
return item[item._KEYS.ALIAS]
self.setdefault(self._KEYS.MODELS, []).append(model_obj)
return model_obj[model_obj._KEYS.ALIAS]
def add_spectrum(self, compare_to_existing=True, **kwargs):
"""Add a `Spectrum` instance to this entry."""
spec_key = self._KEYS.SPECTRA
# Make sure that a source is given, and is valid (nor erroneous)
source = self._check_cat_dict_source(Spectrum, spec_key, **kwargs)
if source is None:
return None
# Try to create a new instance of `Spectrum`
new_spectrum = self._init_cat_dict(Spectrum, spec_key, **kwargs)
if new_spectrum is None:
return None
is_dupe = False
for item in self.get(spec_key, []):
# Only the `filename` should be compared for duplicates. If a
# duplicate is found, that means the previous `exclude` array
# should be saved to the new object, and the old deleted
if new_spectrum.is_duplicate_of(item):
if SPECTRUM.EXCLUDE in new_spectrum:
item[SPECTRUM.EXCLUDE] = new_spectrum[SPECTRUM.EXCLUDE]
elif SPECTRUM.EXCLUDE in item:
item.update(new_spectrum)
is_dupe = True
break
if not is_dupe:
self.setdefault(spec_key, []).append(new_spectrum)
return
def check(self):
"""Check that the entry has the required fields."""
# Make sure there is a schema key in dict
if self._KEYS.SCHEMA not in self:
self[self._KEYS.SCHEMA] = self.catalog.SCHEMA.URL
# Make sure there is a name key in dict
if (self._KEYS.NAME not in self or len(self[self._KEYS.NAME]) == 0):
raise ValueError("Entry name is empty:\n\t{}".format(
json.dumps(
self, indent=2)))
return
def clean_internal(self, data=None):
"""Clean input from 'internal', human added data.
This is used in the 'Entry.init_from_file' method.
"""
return data
def extra_aliases(self):
"""Return aliases considered when merging duplicates."""
return []
def get_aliases(self, includename=True):
"""Retrieve the aliases of this object as a list of strings.
Arguments
---------
includename : bool
Include the 'name' parameter in the list of aliases.
"""
# empty list if doesnt exist
alias_quanta = self.get(self._KEYS.ALIAS, [])
aliases = [aq[QUANTITY.VALUE] for aq in alias_quanta]
if includename and self[self._KEYS.NAME] not in aliases:
aliases = [self[self._KEYS.NAME]] + aliases
return aliases
def get_entry_text(self, fname):
"""Retrieve the raw text from a file."""
if fname.split('.')[-1] == 'gz':
with gz.open(fname, 'rt') as f:
filetext = f.read()
else:
with open(fname, 'r') as f:
filetext = f.read()
return filetext
def get_source_by_alias(self, alias):
"""Given an alias, find the corresponding source in this entry.
If the given alias doesn't exist (e.g. there are no sources), then a
`ValueError` is raised.
Arguments
---------
alias : str
The str-integer (e.g. '8') of the target source.
Returns
-------
source : `astrocats.catalog.source.Source` object
The source object corresponding to the passed alias.
"""
for source in self.get(self._KEYS.SOURCES, []):
if source[self._KEYS.ALIAS] == alias:
return source
raise ValueError("Source '{}': alias '{}' not found!".format(self[
self._KEYS.NAME], alias))
def get_stub(self):
"""Get a new `Entry` which contains the 'stub' of this one.
The 'stub' is only the name and aliases.
Usage:
-----
To convert a normal entry into a stub (for example), overwrite the
entry in place, i.e.
>>> entries[name] = entries[name].get_stub()
Returns
-------
stub : `astrocats.catalog.entry.Entry` subclass object
The type of the returned object is this instance's type.
"""
stub = type(self)(self.catalog, self[self._KEYS.NAME], stub=True)
if self._KEYS.ALIAS in self:
stub[self._KEYS.ALIAS] = self[self._KEYS.ALIAS]
if self._KEYS.DISTINCT_FROM in self:
stub[self._KEYS.DISTINCT_FROM] = self[self._KEYS.DISTINCT_FROM]
if self._KEYS.RA in self:
stub[self._KEYS.RA] = self[self._KEYS.RA]
if self._KEYS.DEC in self:
stub[self._KEYS.DEC] = self[self._KEYS.DEC]
if self._KEYS.DISCOVER_DATE in self:
stub[self._KEYS.DISCOVER_DATE] = self[self._KEYS.DISCOVER_DATE]
if self._KEYS.SOURCES in self:
stub[self._KEYS.SOURCES] = self[self._KEYS.SOURCES]
return stub
def is_erroneous(self, field, sources):
"""Check if attribute has been marked as being erroneous."""
if self._KEYS.ERRORS in self:
my_errors = self[self._KEYS.ERRORS]
for alias in sources.split(','):
source = self.get_source_by_alias(alias)
bib_err_values = [
err[ERROR.VALUE] for err in my_errors
if err[ERROR.KIND] == SOURCE.BIBCODE and
err[ERROR.EXTRA] == field
]
if (SOURCE.BIBCODE in source and
source[SOURCE.BIBCODE] in bib_err_values):
return True
name_err_values = [
err[ERROR.VALUE] for err in my_errors
if err[ERROR.KIND] == SOURCE.NAME and err[ERROR.EXTRA] ==
field
]
if (SOURCE.NAME in source and
source[SOURCE.NAME] in name_err_values):
return True
return False
def is_private(self, key, sources):
"""Check if attribute is private."""
# aliases are always public.
if key == ENTRY.ALIAS:
return False
return all([
SOURCE.PRIVATE in self.get_source_by_alias(x)
for x in sources.split(',')
])
def name(self):
"""Return own name."""
try:
return self[self._KEYS.NAME]
except KeyError:
return None
def num_sources(self):
"""Return the current number of sources stored in this instance.
Returns
-------
len : int
The *integer* number of existing sources.
"""
return len(self.get(self._KEYS.SOURCES, []))
def num_models(self):
"""Return the current number of models stored in this instance.
Returns
-------
len : int
The *integer* number of existing models.
"""
return len(self.get(self._KEYS.MODELS, []))
def priority_prefixes(self):
"""Return prefixes to given priority when merging duplicate entries."""
return ()
def sanitize(self):
"""Sanitize the data (sort it, etc.) before writing it to disk.
Template method that can be overridden in each catalog's subclassed
`Entry` object.
"""
name = self[self._KEYS.NAME]
aliases = self.get_aliases(includename=False)
if name not in aliases:
# Assign the first source to alias, if not available assign us.
if self._KEYS.SOURCES in self:
self.add_quantity(self._KEYS.ALIAS, name, '1')
if self._KEYS.ALIAS not in self:
source = self.add_self_source()
self.add_quantity(self._KEYS.ALIAS, name, source)
else:
source = self.add_self_source()
self.add_quantity(self._KEYS.ALIAS, name, source)
if self._KEYS.ALIAS in self:
self[self._KEYS.ALIAS].sort(
key=lambda key: alias_priority(name, key[QUANTITY.VALUE]))
else:
self._log.error(
'There should be at least one alias for `{}`.'.format(name))
if self._KEYS.PHOTOMETRY in self:
self[self._KEYS.PHOTOMETRY].sort(
key=lambda x: ((float(x[PHOTOMETRY.TIME]) if
isinstance(x[PHOTOMETRY.TIME],
(basestring, float, int))
else min([float(y) for y in
x[PHOTOMETRY.TIME]])) if
PHOTOMETRY.TIME in x else 0.0,
x[PHOTOMETRY.BAND] if PHOTOMETRY.BAND in
x else '',
float(x[PHOTOMETRY.MAGNITUDE]) if
PHOTOMETRY.MAGNITUDE in x else ''))
if (self._KEYS.SPECTRA in self and list(
filter(None, [
SPECTRUM.TIME in x for x in self[self._KEYS.SPECTRA]
]))):
self[self._KEYS.SPECTRA].sort(
key=lambda x: (float(x[SPECTRUM.TIME]) if
SPECTRUM.TIME in x else 0.0,
x[SPECTRUM.FILENAME] if
SPECTRUM.FILENAME in x else '')
)
if self._KEYS.SOURCES in self:
# Remove orphan sources
source_aliases = [
x[SOURCE.ALIAS] for x in self[self._KEYS.SOURCES]
]
# Sources with the `PRIVATE` attribute are always retained
source_list = [
x[SOURCE.ALIAS] for x in self[self._KEYS.SOURCES]
if SOURCE.PRIVATE in x
]
for key in self:
# if self._KEYS.get_key_by_name(key).no_source:
if (key in [
self._KEYS.NAME, self._KEYS.SCHEMA, self._KEYS.SOURCES,
self._KEYS.ERRORS
]):
continue
for item in self[key]:
source_list += item[item._KEYS.SOURCE].split(',')
new_src_list = sorted(
list(set(source_aliases).intersection(source_list)))
new_sources = []
for source in self[self._KEYS.SOURCES]:
if source[SOURCE.ALIAS] in new_src_list:
new_sources.append(source)
else:
self._log.info('Removing orphaned source from `{}`.'
.format(name))
if not new_sources:
del self[self._KEYS.SOURCES]
self[self._KEYS.SOURCES] = new_sources
def save(self, bury=False, final=False):
"""Write entry to JSON file in the proper location.
Arguments
---------
bury : bool
final : bool
If this is the 'final' save, perform additional sanitization and
cleaning operations.
"""
outdir, filename = self._get_save_path(bury=bury)
if final:
self.sanitize()
# FIX: use 'dump' not 'dumps'
jsonstring = json.dumps(
{
self[self._KEYS.NAME]: self._ordered(self)
},
indent='\t' if sys.version_info[0] >= 3 else 4,
separators=(',', ':'),
ensure_ascii=False)
if not os.path.isdir(outdir):
raise RuntimeError("Output directory '{}' for event '{}' does "
"not exist.".format(outdir, self[
self._KEYS.NAME]))
save_name = os.path.join(outdir, filename + '.json')
#Added function here to remove the spaces and replace them with _ in the save name
save_name = save_name.replace("*","_")
with codecs.open(save_name, 'w', encoding='utf8') as sf:
sf.write(jsonstring)
if not os.path.exists(save_name):
raise RuntimeError("File '{}' was not saved!".format(save_name))
return save_name
def set_preferred_name(self):
"""Set a preferred name for the entry."""
return self[self._KEYS.NAME]
def sort_func(self, key):
"""Used to sort keys when writing Entry to JSON format.
Should be supplemented/overridden by inheriting classes.
"""
if key == self._KEYS.SCHEMA:
return 'aaa'
if key == self._KEYS.NAME:
return 'aab'
if key == self._KEYS.SOURCES:
return 'aac'
if key == self._KEYS.ALIAS:
return 'aad'
if key == self._KEYS.MODELS:
return 'aae'
if key == self._KEYS.PHOTOMETRY:
return 'zzy'
if key == self._KEYS.SPECTRA:
return 'zzz'
return key
| 38.067708 | 92 | 0.548319 | 5,082 | 43,854 | 4.574577 | 0.121409 | 0.034756 | 0.019098 | 0.012388 | 0.284541 | 0.23116 | 0.188016 | 0.158551 | 0.129086 | 0.115967 | 0 | 0.001314 | 0.358097 | 43,854 | 1,151 | 93 | 38.100782 | 0.824547 | 0.215693 | 0 | 0.269231 | 0 | 0 | 0.05195 | 0.002378 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054377 | false | 0.003979 | 0.030504 | 0 | 0.218833 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b39793836966e613d8f5a0a9e61da63d48b5e600 | 3,096 | py | Python | util/get-google-calendars.py | Kartoffel/infodisplay | 01c87c1d06b4bae45397243d0a99c86015cee708 | [
"MIT"
] | 2 | 2021-12-29T14:24:19.000Z | 2022-01-04T21:24:56.000Z | util/get-google-calendars.py | Kartoffel/infodisplay | 01c87c1d06b4bae45397243d0a99c86015cee708 | [
"MIT"
] | 1 | 2022-01-03T19:40:47.000Z | 2022-01-05T17:45:39.000Z | util/get-google-calendars.py | Kartoffel/infodisplay | 01c87c1d06b4bae45397243d0a99c86015cee708 | [
"MIT"
] | null | null | null | '''
get-google-calendars.py
This script will obtain the `token.json` required for getting your google calendar appointments on the info display.
It will also give you the ID's of your calendars, which you can choose to include in your config.ini
Run this on your local desktop! Install the following packages through pip first:
- google-api-python-client
- google-auth-httplib2
- google-auth-oauthlib
Create a project and enable the Google Cloud Platform API following
https://developers.google.com/workspace/guides/create-project
(enable the "Google Calendar API")
Create a desktop application and obtain the `credentials.json`
Place `credentials.json` in the folder you are running this script from.
More info and documentation can be found through
https://developers.google.com/calendar/api/quickstart/python
'''
import sys
import socket
import os.path
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient.discovery import build
SCOPES = ['https://www.googleapis.com/auth/calendar.readonly']
timeout_in_sec = 30
socket.setdefaulttimeout(timeout_in_sec)
def refresh_credentials():
creds = None
# The file token.json stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.json'):
creds = Credentials.from_authorized_user_file('token.json', SCOPES)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
print("Credentials expired, refreshing..")
creds.refresh(Request())
else:
print("Let's get a new token")
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.json', 'w') as token:
token.write(creds.to_json())
return creds
def get_calendars():
'''
Use this to get your calendar IDs (run by hand),
then put those in your config file
'''
creds = refresh_credentials()
if not creds:
print("No credentials!")
return
print("Getting calendars..")
with build('calendar', 'v3', credentials=creds, cache_discovery=False) as service:
calList = service.calendarList().list(
maxResults = 50,
minAccessRole = 'reader'
).execute()
calendars = calList.get('items', [])
print('Calendars:\n')
if not calendars:
print('No calendars found.')
for calendar in calendars:
cal_id = calendar['id']
cal_name = calendar['summary']
print("- Calendar name: {}, ID: {}".format(cal_name, cal_id))
if __name__ == '__main__':
get_calendars()
| 32.25 | 120 | 0.671189 | 394 | 3,096 | 5.190355 | 0.42132 | 0.022005 | 0.011736 | 0.023472 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.0034 | 0.239987 | 3,096 | 95 | 121 | 32.589474 | 0.865703 | 0.377261 | 0 | 0 | 0 | 0 | 0.153384 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042553 | false | 0 | 0.148936 | 0 | 0.234043 | 0.148936 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3a032ed592c9395acc528eda36249d66c3bcd76 | 619 | py | Python | view_npy.py | ryx2/tools | cc223ae5ed41e0a35832282775bd49650f71a24e | [
"MIT"
] | null | null | null | view_npy.py | ryx2/tools | cc223ae5ed41e0a35832282775bd49650f71a24e | [
"MIT"
] | null | null | null | view_npy.py | ryx2/tools | cc223ae5ed41e0a35832282775bd49650f71a24e | [
"MIT"
] | null | null | null | import numpy as np
import sys
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser(
description="terminal view a numpy file or image as np array")
parser.add_argument("data", help=".npy file to be viewed or im.")
parser.add_argument("--img", action='store_true', help="if an image.")
args = parser.parse_args()
if args.img:
import imageio
data = imageio.imread(args.data)
else:
try:
data = np.load(args.data)
except Exception as e:
print(e)
print('trying genfromtxt instead')
data = np.genfromtxt(args.data)
import ipdb
ipdb.set_trace()
| 25.791667 | 70 | 0.694669 | 91 | 619 | 4.67033 | 0.56044 | 0.056471 | 0.08 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.195477 | 619 | 23 | 71 | 26.913043 | 0.853414 | 0 | 0 | 0 | 0 | 0 | 0.213247 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.285714 | 0 | 0.285714 | 0.095238 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3a207ed26d2f91c204a97c15e81205d87670102 | 5,667 | py | Python | mlaut/strategies/neural_networks.py | vishalbelsare/mlaut | a3bd4b2591c3144d100f413f6c4c2231392103e5 | [
"BSD-3-Clause"
] | 23 | 2019-01-14T15:12:32.000Z | 2022-03-31T12:23:34.000Z | mlaut/strategies/neural_networks.py | vishalbelsare/mlaut | a3bd4b2591c3144d100f413f6c4c2231392103e5 | [
"BSD-3-Clause"
] | 11 | 2019-01-23T13:39:20.000Z | 2020-04-17T13:25:27.000Z | mlaut/strategies/neural_networks.py | vishalbelsare/mlaut | a3bd4b2591c3144d100f413f6c4c2231392103e5 | [
"BSD-3-Clause"
] | 4 | 2019-01-07T20:46:40.000Z | 2022-03-25T00:00:00.000Z | from mlaut.shared.static_variables import GRIDSEARCH_NUM_CV_FOLDS, GRIDSEARCH_CV_NUM_PARALLEL_JOBS
from mlaut.shared.static_variables import VERBOSE
from tensorflow.python.keras.models import Sequential, load_model, model_from_json
from tensorflow.python.keras.layers import Dense, Activation, Dropout
from tensorflow.python.keras.wrappers.scikit_learn import KerasRegressor, KerasClassifier
from tensorflow.python.keras import optimizers
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import GridSearchCV
import numpy as np
import wrapt
import tensorflow as tf
from mlaut.highlevel.strategies import TabClassifKerasStrategy, TabRegrKerasStrategy
# class OverwrittenSequentialClassifier(Sequential):
# """
# Keras sequential model that overrides the default :func:`tensorflow.python.keras.models.fit` and :func:`tensorflow.python.keras.models.predict` methods.
# """
# def fit(self, X_train, y_train, **kwargs):
# """
# Overrides the default :func:`tensorflow.python.keras.models.fit` and reshapes the `y_train` in one hot array.
# Args:
# X_train: training data
# y_train: Labels that will be converted to onehot array.
# Returns:
# :func:`tensorflow.python.keras.models.fit` object
# """
# onehot_encoder = OneHotEncoder(sparse=False)
# len_y = len(y_train)
# reshaped_y = y_train.reshape(len_y, 1)
# y_train_onehot_encoded = onehot_encoder.fit_transform(reshaped_y)
# # if 'epochs' not in self._hyperparameters:
# # epochs = 1
# # else:
# # epochs = self._hyperparameters
# return super().fit(X_train,
# y_train_onehot_encoded,
# batch_size=kwargs['batch_size'],
# epochs=kwargs['epochs'])
# def predict(self, X_test, batch_size=None, verbose=VERBOSE):
# """
# Overrides the default :func:`tensorflow.python.keras.models.predict` by replacing it with a :func:`tensorflow.python.keras.models.predict_classes`
# Returns:
# :func:`tensorflow.python.keras.models.predict_classes`
# """
# predictions = Sequential.predict(self, X_test, batch_size=batch_size, verbose=verbose)
# return predictions.argmax(axis=1)
# # return super().predict_classes(X_test)
# class KerasClassificationStrategy(CSCKerasStrategy):
# def keras_model_classification(num_classes, input_dim):
# nn_deep_model = OverwrittenSequentialClassifier()
# nn_deep_model.add(Dense(288, input_dim=input_dim, activation='relu'))
# nn_deep_model.add(Dense(144, activation='relu'))
# nn_deep_model.add(Dropout(0.5))
# nn_deep_model.add(Dense(12, activation='relu'))
# nn_deep_model.add(Dense(num_classes, activation='softmax'))
# model_optimizer = optimizers.Adam(lr=0.001)
# nn_deep_model.compile(loss='mean_squared_error', optimizer=model_optimizer, metrics=['accuracy'])
# return nn_deep_model
# def __init__(self,
# estimator=KerasClassifier,
# build_fn=keras_model_classification,
# param_grid={'epochs': 1,
# 'batch_size': None},
# name='Keras4Layers',
# check_input=False):
# print('****************** I like to init')
# print(f'*****Param grid: {param_grid}, {name}')
# super().__init__(estimator=estimator, build_fn=build_fn, param_grid=param_grid, name=name, check_input=check_input)
def keras_model_classification(num_classes, input_dim):
# nn_deep_model = OverwrittenSequentialClassifier()
nn_deep_model = Sequential()
nn_deep_model.add(Dense(288, input_dim=input_dim, activation='relu'))
nn_deep_model.add(Dense(144, activation='relu'))
nn_deep_model.add(Dropout(0.5))
nn_deep_model.add(Dense(12, activation='relu'))
nn_deep_model.add(Dense(num_classes, activation='softmax'))
model_optimizer = optimizers.Adam(lr=0.001)
nn_deep_model.compile(loss='mean_squared_error', optimizer=model_optimizer, metrics=['accuracy'])
return nn_deep_model
param_grid={'epochs': 1,
'batch_size': None}
KerasClassificationStrategy = TabClassifKerasStrategy(estimator=KerasClassifier,
build_fn=keras_model_classification,
param_grid=param_grid,
name='KerasClassifier4Layers',
check_input=False)
def keras_model_regression(input_dim):
nn_deep_model = Sequential()
nn_deep_model.add(Dense(288, input_dim=input_dim, activation='relu'))
nn_deep_model.add(Dense(144, activation='relu'))
nn_deep_model.add(Dropout(0.5))
nn_deep_model.add(Dense(12, activation='relu'))
nn_deep_model.add(Dense(1, activation='sigmoid'))
model_optimizer = optimizers.Adam(lr=0.001)
nn_deep_model.compile(loss='mean_squared_error', optimizer=model_optimizer, metrics=['mae'])
return nn_deep_model
KerasRegressionStrategy = TabRegrKerasStrategy(estimator=KerasRegressor,
build_fn=keras_model_regression,
param_grid=param_grid,
name='KerasRegressor4Layers',
check_input=False)
| 41.364964 | 158 | 0.636668 | 618 | 5,667 | 5.572816 | 0.245955 | 0.043554 | 0.079849 | 0.060976 | 0.537747 | 0.512485 | 0.450058 | 0.409698 | 0.39518 | 0.358014 | 0 | 0.012172 | 0.260632 | 5,667 | 136 | 159 | 41.669118 | 0.809785 | 0.510676 | 0 | 0.409091 | 0 | 0 | 0.053137 | 0.015867 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.272727 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3a5004165d6ada562d6358f71622d9f82b157f8 | 3,130 | py | Python | src/rtfparse/parser.py | hanose/rtfparse | 437bf2ef560a275427ae2e48416ec8c12331b370 | [
"MIT"
] | 2 | 2021-08-31T08:26:14.000Z | 2022-03-28T11:28:56.000Z | src/rtfparse/parser.py | hanose/rtfparse | 437bf2ef560a275427ae2e48416ec8c12331b370 | [
"MIT"
] | 1 | 2021-06-10T11:31:16.000Z | 2021-11-21T16:12:57.000Z | src/rtfparse/parser.py | hanose/rtfparse | 437bf2ef560a275427ae2e48416ec8c12331b370 | [
"MIT"
] | 2 | 2021-12-05T17:26:36.000Z | 2022-03-31T13:58:33.000Z | #!/usr/bin/env python
import io
import re
import logging
import pathlib
# Own modules
from rtfparse import re_patterns
from rtfparse import entities
from rtfparse import utils
# Typing
from typing import Optional
from typing import Union
from rtfparse import config_loader
# Setup logging
logger = logging.getLogger(__name__)
class Rtf_Parser:
def __init__(self,
rtf_path: Optional[pathlib.Path]=None,
rtf_file: Optional[Union[io.BufferedReader, io.BytesIO]]=None,
) -> None:
self.rtf_path = rtf_path
self.rtf_file = rtf_file
if not (self.rtf_path or self.rtf_file):
raise ValueError("Need `rtf_path` or `rtf_file` argument")
self.ENCODING_PROBE = 48 # look for encoding information in the first 48 bytes of the file
def read_encoding(self, file: Union[io.BufferedReader, io.BytesIO]) -> str:
probed = file.read(self.ENCODING_PROBE)
group = entities.Group("cp1252", io.BytesIO(probed))
recognized_encodings = (
"ansi",
"ansicpg",
"mac",
"pc",
"pca",
)
# Gather all control words, which could define an encoding:
names = tuple(filter(lambda item: isinstance(item, entities.Control_Word) and item.control_name in recognized_encodings, group.structure))
# Check if the ANSI code page is set as a parameter of any of the control words:
cp = None
for item in names:
# if any item is a Control_Word which has a parameter, we assume that this is the parameter of \ansicpg, and that corresponds to the codepage we are looking for
if item.parameter:
param = item.parameter
if param:
encoding = f"cp{param}"
else:
if names[0].control_name == "ansi":
encoding = "ansi"
elif names[0].control_name == "mac":
encoding = "mac_roman"
elif names[0].control_name == "pc":
encoding = "cp437"
elif names[0].control_name == "pca":
encoding = "cp850"
file.seek(0)
logger.info(f"recognized encoding {encoding}")
return encoding
def parse_file(self) -> entities.Group:
if self.rtf_path is not None:
file = open(self.rtf_path, mode="rb")
elif self.rtf_file is not None:
file = self.rtf_file
else:
file = io.BytesIO(b"")
parsed_object = utils.what_is_being_parsed(file)
logger.info(f"Parsing the structure of {parsed_object}")
try:
encoding = self.read_encoding(file)
self.parsed = entities.Group(encoding, file)
except Exception as err:
logger.exception(err)
finally:
if self.rtf_path is not None:
logger.debug(f"Closing {parsed_object}")
file.close()
logger.info(f"Structure of {parsed_object} parsed")
return self.parsed
if __name__ == "__main__":
pass
| 35.568182 | 172 | 0.594888 | 385 | 3,130 | 4.690909 | 0.348052 | 0.03876 | 0.036545 | 0.037652 | 0.09247 | 0.024363 | 0.024363 | 0 | 0 | 0 | 0 | 0.00892 | 0.319489 | 3,130 | 87 | 173 | 35.977011 | 0.838967 | 0.131949 | 0 | 0.055556 | 0 | 0 | 0.090439 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0.013889 | 0.138889 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3a78bb678c70aacc4fa4fb1b41422f20b1003ff | 330 | py | Python | cmssw/examples/MyAnalyzer/python/MyAnalyzer_cfg.py | guitargeek/PKGBUILDs | a71e887c838827bb876f3ad4badb66c2eda5f61c | [
"MIT"
] | null | null | null | cmssw/examples/MyAnalyzer/python/MyAnalyzer_cfg.py | guitargeek/PKGBUILDs | a71e887c838827bb876f3ad4badb66c2eda5f61c | [
"MIT"
] | null | null | null | cmssw/examples/MyAnalyzer/python/MyAnalyzer_cfg.py | guitargeek/PKGBUILDs | a71e887c838827bb876f3ad4badb66c2eda5f61c | [
"MIT"
] | null | null | null | import FWCore.ParameterSet.Config as cms
process = cms.Process("MyAnalyzer")
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
"file:/home/jonas/PhD/022E2036-A2D9-E711-9A8C-0CC47A13D2A4.root"
)
)
process.analyzer = cms.EDAnalyzer('MyAnalyzer')
process.p = cms.Path(process.analyzer)
| 23.571429 | 72 | 0.733333 | 40 | 330 | 6.05 | 0.675 | 0.082645 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.072917 | 0.127273 | 330 | 13 | 73 | 25.384615 | 0.767361 | 0 | 0 | 0 | 0 | 0 | 0.278788 | 0.187879 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3a840e28d30bd9ab4482c1cf21eed740cbb25a5 | 4,513 | py | Python | Math/matrix.py | TimHeiszwolf/Heis_Python_Tools | ffa35b80838673e272dc46a6fff5cafc76ecb239 | [
"MIT"
] | null | null | null | Math/matrix.py | TimHeiszwolf/Heis_Python_Tools | ffa35b80838673e272dc46a6fff5cafc76ecb239 | [
"MIT"
] | null | null | null | Math/matrix.py | TimHeiszwolf/Heis_Python_Tools | ffa35b80838673e272dc46a6fff5cafc76ecb239 | [
"MIT"
] | null | null | null | # Methods for dealing with Matrices. Don't use these. Just use the numpy functions.
def get_random_matrix(size = [5, 5], value_range = [-10, 10], type_of_value = int):
"""
A function which can generate many different types of random matrixes of whatever dimension is desired.
"""
if len(size)>1:
current_size = size[0]
new_size = [size[i] for i in range(1, len(size))]
return [get_random_matrix(new_size, value_range, type_of_value) for i in range(0, current_size)]
else:
if type_of_value == int:
return [random.randint(value_range[0], value_range[1]) for i in range(0, size[0])]
if type_of_value == float:
return [random.uniform(value_range[0], value_range[1]) for i in range(0, size[0])]
else:
print('Type:', type_of_value, 'is not supported by this function.')
def calculate_determinant(matrix):
"""
A function which calculates the determinant of matrixes with recursion.
TODO: add validation.
"""
#print(np.array(matrix))
size_matrix = len(matrix)
if size_matrix==1:
return matrix[0][0]
else:
total = 0
for i in range(0, size_matrix):
new_matrix = [[matrix[y][x] for x in range(0, size_matrix) if x != i] for y in range(1, size_matrix)]
total = total + (-1)**i * matrix[0][i] * calculate_determinant(new_matrix)
return total
def Guassian_elimination(matrix, vector):
"""
https://www.youtube.com/watch?v=3aO2eG9lGk4
"""
#print('ORG\n', matrix, vector, '\n')
for i in range(0, min(len(matrix[0]), len(matrix))):
max_factor = abs(matrix[i][i])
max_factor_index = 0
for j in range(i, len(matrix[i])):
# See if two rows need to be changed
if abs(matrix[j][i]) > max_factor:
max_factor = abs(matrix[j][i])
max_factor_index = j
if max_factor_index != 0:
# Swapping two rows
storage = matrix[i].copy()
#print('SW1\n',matrix, vector, '\n')
matrix[i] = matrix[max_factor_index]
#print('SW2\n',matrix, vector, '\n')
matrix[max_factor_index] = storage
#print('SW3\n',matrix, vector, '\n')
storage = vector[i]
vector[i] = vector[max_factor_index]
vector[max_factor_index] = storage
#print('SWP\n',matrix, vector, '\n')
""" # This part can be uncommented if you want to have the pivots be equal to one.
scaling_factor = (1 / matrix[i][i])
matrix[i] = scaling_factor * matrix[i]
vector[i] = scaling_factor * vector[i]
print('SCL\n', matrix, vector, scaling_factor, '\n')"""
for k in range(i + 1, len(matrix[i])):
swap_factor = matrix[k][i] / matrix[i][i]
matrix[k] = matrix[k] - swap_factor * matrix[i]
vector[k] = vector[k] - swap_factor * vector[i]
#print('SUB\n',matrix, vector, '\n')
#print('RES\n', matrix, vector, '\n')
return matrix, vector# Not needed but nice to do.
def solve_system_of_equations(matrix, vector):
matrix, vector = Guassian_elimination(matrix, vector)
x = 0 * vector
for i in [len(vector) - 1 - i for i in range(0, len(vector))]:
x[i] = (vector[i] - sum([x[j] * matrix[i][j] for j in range(0, len(matrix[i]))])) / matrix[i][i]
return x
def get_inverse_of_matrix(matrix):
"""
A function which calculates the inverse of a matrix using the determinant. Not as quick as using Jordan elimination but much easier to impliment. https://youtu.be/xZBbfLLfVV4 and https://youtu.be/ArcrdMkEmKo
"""
if len(matrix) != len(matrix[0]):
raise ValueError('Can only handle square matrices')
size_matrix = len(matrix)
determinant = calculate_determinant(matrix)
cofactor_matrix = 0 * matrix.copy()
for i in range(0, size_matrix):
for j in range(0, size_matrix):
sub_matrix = np.array([[matrix[y][x] for x in range(0, size_matrix) if x != j] for y in range(0, size_matrix) if y != i])
#print(sub_matrix, '\n', i, j, '\n\n')
cofactor_matrix[i][j] = (-1)**(i + j) * calculate_determinant(sub_matrix)
#print(cofactor_matrix, '\n')
inverse = np.transpose(cofactor_matrix) * ( 1 / determinant)
return inverse
| 36.395161 | 211 | 0.581653 | 635 | 4,513 | 4.009449 | 0.222047 | 0.043991 | 0.037706 | 0.034564 | 0.192066 | 0.123331 | 0.073841 | 0.056559 | 0.056559 | 0.056559 | 0 | 0.016444 | 0.285841 | 4,513 | 123 | 212 | 36.691057 | 0.773503 | 0.21006 | 0 | 0.116667 | 0 | 0 | 0.021964 | 0 | 0 | 0 | 0 | 0.00813 | 0 | 1 | 0.083333 | false | 0 | 0 | 0 | 0.216667 | 0.016667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3acb021cd22a119b718d7aceb7d3ebd579b063b | 233 | py | Python | python/tb6600/set-power.py | Heimkino-Praxis/screen-masking | 024f0f84c524f897fc00d0c177b618b610a560b5 | [
"MIT"
] | 3 | 2020-09-09T12:59:24.000Z | 2021-10-14T13:45:11.000Z | python/tb6600/set-power.py | Heimkino-Praxis/screen-masking | 024f0f84c524f897fc00d0c177b618b610a560b5 | [
"MIT"
] | null | null | null | python/tb6600/set-power.py | Heimkino-Praxis/screen-masking | 024f0f84c524f897fc00d0c177b618b610a560b5 | [
"MIT"
] | null | null | null | import stepper
import sys
if (len(sys.argv) != 2):
print("missing parameter: 0|1");
sys.exit();
value = int(sys.argv[1])
stepper.setLock(0)
stepper.setPower(value)
if (value > 0):
print("power on")
else:
print("power off")
| 12.263158 | 33 | 0.656652 | 37 | 233 | 4.135135 | 0.567568 | 0.091503 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.030457 | 0.154506 | 233 | 18 | 34 | 12.944444 | 0.746193 | 0 | 0 | 0 | 0 | 0 | 0.167382 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3afea34374cd7ab693f16bdea1195e824d2afa9 | 10,880 | py | Python | huawei/services/device_config.py | tuxuser/huawei-lpv2 | 38bf2fd7e5a21978ffacc4bc58923c7c2389c7ab | [
"MIT"
] | 42 | 2019-09-01T08:59:35.000Z | 2022-03-06T17:48:08.000Z | huawei/services/device_config.py | tuxuser/huawei-lpv2 | 38bf2fd7e5a21978ffacc4bc58923c7c2389c7ab | [
"MIT"
] | 7 | 2019-09-01T08:33:16.000Z | 2021-05-26T13:44:36.000Z | huawei/services/device_config.py | tuxuser/huawei-lpv2 | 38bf2fd7e5a21978ffacc4bc58923c7c2389c7ab | [
"MIT"
] | 11 | 2019-09-01T03:43:50.000Z | 2021-07-01T14:42:12.000Z | import enum
from dataclasses import dataclass
from datetime import datetime
from logging import getLogger
from typing import Tuple
from ..protocol import (
AUTH_VERSION,
Command,
MismatchError,
NONCE_LENGTH,
PROTOCOL_VERSION,
Packet,
TLV,
check_result,
create_bonding_key,
decode_int,
digest_challenge,
digest_response,
encode_int,
encrypt_packet,
hexlify,
set_status,
)
logger = getLogger(__name__)
class DeviceConfig:
id = 1
class LinkParams:
id = 1
class Tags:
ProtocolVersion = 1
MaxFrameSize = 2
MaxLinkSize = 3
ConnectionInterval = 4
ServerNonce = 5
PathExtendNumber = 6 # apparently used for BTVersion == 0
class SetDateFormat:
id = 4
class Tags:
DateFormat = 2
TimeFormat = 3
SetDateFormat = 129
class SetTime:
id = 5
class Tags:
Timestamp = 1
ZoneOffset = 2
class ProductInfo:
id = 7
class Tags:
BTVersion = 1
ProductType = 2 # int
HardwareVersion = 3
PhoneNumber = 4
MacAddress = 5
IMEI = 6
SoftwareVersion = 7
OpenSourceVersion = 8
SerialNumber = 9
ProductModel = 10
eMMCId = 11
HealthAppSupport = 13 # int
class Bond:
id = 14
class Tags:
BondRequest = 1
Status = 2
RequestCode = 3
ClientSerial = 5
BondingKey = 6
InitVector = 7
class BondParams:
id = 15
class Tags:
Status = 1
StatusInfo = 2
ClientSerial = 3
BTVersion = 4
MaxFrameSize = 5
ClientMacAddress = 7
EncryptionCounter = 9
class Auth:
id = 19
class Tags:
Challenge = 1
Nonce = 2
class BatteryLevel:
id = 8
class Tags:
GetStatus = 1
class ActivateOnRotate:
id = 9
class Tags:
SetStatus = 1
class FactoryReset:
id = 13
class Tags:
SetStatus = 1
class NavigateOnRotate:
id = 27
class Tags:
SetStatus = 1
class LeftRightWrist:
id = 26
class Tags:
SetStatus = 1
def request_link_params() -> Packet:
return Packet(
service_id=DeviceConfig.id,
command_id=DeviceConfig.LinkParams.id,
command=Command(
tlvs=[
TLV(DeviceConfig.LinkParams.Tags.ProtocolVersion),
TLV(DeviceConfig.LinkParams.Tags.MaxFrameSize),
TLV(DeviceConfig.LinkParams.Tags.MaxLinkSize),
TLV(DeviceConfig.LinkParams.Tags.ConnectionInterval),
],
),
)
@dataclass
class LinkParams:
max_frame_size: int
max_link_size: int
connection_interval: int # milliseconds
@check_result
def process_link_params(command: Command) -> Tuple[LinkParams, bytes]:
link_params = LinkParams(
max_frame_size=decode_int(command[DeviceConfig.LinkParams.Tags.MaxFrameSize].value),
max_link_size=decode_int(command[DeviceConfig.LinkParams.Tags.MaxLinkSize].value),
connection_interval=decode_int(command[DeviceConfig.LinkParams.Tags.ConnectionInterval].value),
)
protocol_version = decode_int(command[DeviceConfig.LinkParams.Tags.ProtocolVersion].value)
auth_version = decode_int(command[DeviceConfig.LinkParams.Tags.ServerNonce].value[:2])
server_nonce = bytes(command[DeviceConfig.LinkParams.Tags.ServerNonce].value[2:18])
# TODO: optional path extend number parsing
if protocol_version != PROTOCOL_VERSION:
raise MismatchError("protocol version", protocol_version, PROTOCOL_VERSION)
if auth_version != AUTH_VERSION:
raise MismatchError("authentication scheme version", auth_version, AUTH_VERSION)
if len(server_nonce) != NONCE_LENGTH:
raise MismatchError("server nonce length", len(server_nonce), NONCE_LENGTH)
logger.info(
f"Negotiated link parameters: "
f"{link_params.max_frame_size}, "
f"{link_params.max_link_size}, "
f"{link_params.connection_interval}, "
f"{hexlify(server_nonce)}",
)
return link_params, server_nonce
def request_authentication(client_nonce: bytes, server_nonce: bytes) -> Packet:
return Packet(
service_id=DeviceConfig.id,
command_id=DeviceConfig.Auth.id,
command=Command(
tlvs=[
TLV(tag=DeviceConfig.Auth.Tags.Challenge, value=digest_challenge(client_nonce, server_nonce)),
TLV(tag=DeviceConfig.Auth.Tags.Nonce, value=(encode_int(AUTH_VERSION) + client_nonce)),
],
),
)
@check_result
def process_authentication(command: Command, client_nonce: bytes, server_nonce: bytes):
expected_answer = digest_response(client_nonce, server_nonce)
actual_answer = command[DeviceConfig.Auth.Tags.Challenge].value
if expected_answer != actual_answer:
raise MismatchError("challenge answer", actual_answer, expected_answer)
def request_bond_params(client_serial: str, client_mac: str) -> Packet:
return Packet(
service_id=DeviceConfig.id,
command_id=DeviceConfig.BondParams.id,
command=Command(
tlvs=[
TLV(tag=DeviceConfig.BondParams.Tags.Status),
TLV(tag=DeviceConfig.BondParams.Tags.ClientSerial, value=client_serial.encode()),
TLV(tag=DeviceConfig.BondParams.Tags.BTVersion, value=b"\x02"),
TLV(tag=DeviceConfig.BondParams.Tags.MaxFrameSize),
TLV(tag=DeviceConfig.BondParams.Tags.ClientMacAddress, value=client_mac.encode()),
TLV(tag=DeviceConfig.BondParams.Tags.EncryptionCounter),
],
),
)
@check_result
def process_bond_params(command: Command) -> Tuple[int, int]:
bond_status = decode_int(command[DeviceConfig.BondParams.Tags.Status].value)
bond_status_info = decode_int(command[DeviceConfig.BondParams.Tags.StatusInfo].value)
bt_version = decode_int(command[DeviceConfig.BondParams.Tags.BTVersion].value)
max_frame_size = decode_int(command[DeviceConfig.BondParams.Tags.MaxFrameSize].value)
encryption_counter = decode_int(command[DeviceConfig.BondParams.Tags.EncryptionCounter].value)
# TODO: check bond status
logger.info(
f"Negotiated bond params: "
f"{bond_status}, "
f"{bond_status_info}, "
f"{bt_version}, "
f"{max_frame_size}, "
f"{encryption_counter}",
)
return max_frame_size, encryption_counter
def request_bond(client_serial: str, device_mac: str, key: bytes, iv: bytes) -> Packet:
return Packet(
service_id=DeviceConfig.id,
command_id=DeviceConfig.Bond.id,
command=Command(
tlvs=[
TLV(tag=DeviceConfig.Bond.Tags.BondRequest),
TLV(tag=DeviceConfig.Bond.Tags.RequestCode, value=b"\x00"),
TLV(tag=DeviceConfig.Bond.Tags.ClientSerial, value=client_serial.encode()),
TLV(tag=DeviceConfig.Bond.Tags.BondingKey, value=create_bonding_key(device_mac, key, iv)),
TLV(tag=DeviceConfig.Bond.Tags.InitVector, value=iv),
],
),
)
class DateFormat(enum.Enum):
YearFirst = 1
MonthFirst = 2
DayFirst = 3
class TimeFormat(enum.Enum):
Hours12 = 1
Hours24 = 2
@encrypt_packet
def set_date_format(date_format: DateFormat, time_format: TimeFormat) -> Packet:
date_format_tlvs = [
TLV(tag=DeviceConfig.SetDateFormat.Tags.DateFormat, value=encode_int(date_format.value, length=1)),
TLV(tag=DeviceConfig.SetDateFormat.Tags.TimeFormat, value=encode_int(time_format.value, length=1)),
]
return Packet(
service_id=DeviceConfig.id,
command_id=DeviceConfig.SetDateFormat.id,
command=Command(
tlvs=[
TLV(tag=DeviceConfig.SetDateFormat.Tags.SetDateFormat, value=bytes(Command(tlvs=date_format_tlvs))),
],
),
)
@encrypt_packet
def set_time(moment: datetime) -> Packet:
def request_set_time(timestamp: float, zone_hours: int, zone_minutes: int) -> Packet:
zone_offset = encode_int(zone_hours, length=1) + encode_int(zone_minutes, length=1)
return Packet(
service_id=DeviceConfig.id,
command_id=DeviceConfig.SetTime.id,
command=Command(
tlvs=[
TLV(tag=DeviceConfig.SetTime.Tags.Timestamp, value=encode_int(int(timestamp), length=4)),
TLV(tag=DeviceConfig.SetTime.Tags.ZoneOffset, value=zone_offset),
],
),
)
offset = (moment - datetime.utcfromtimestamp(moment.timestamp())).total_seconds() / 3600
float_hours, float_minutes = divmod(offset, 1)
offset_hours = int(abs(float_hours) + 128) if float_hours < 0 else int(float_hours)
offset_minutes = int(abs(float_minutes * 60))
return request_set_time(moment.timestamp(), offset_hours, offset_minutes)
@encrypt_packet
def set_activate_on_rotate(state: bool) -> Packet:
return set_status(
DeviceConfig.id,
DeviceConfig.ActivateOnRotate.id,
DeviceConfig.ActivateOnRotate.Tags.SetStatus,
state,
)
@encrypt_packet
def set_navigate_on_rotate(state: bool) -> Packet:
return set_status(
DeviceConfig.id,
DeviceConfig.NavigateOnRotate.id,
DeviceConfig.NavigateOnRotate.Tags.SetStatus,
state,
)
@encrypt_packet
def request_battery_level() -> Packet:
return Packet(
service_id=DeviceConfig.id,
command_id=DeviceConfig.BatteryLevel.id,
command=Command(
tlvs=[
TLV(tag=DeviceConfig.BatteryLevel.Tags.GetStatus),
],
),
)
@check_result
def process_battery_level(command: Command):
return decode_int(command[DeviceConfig.BatteryLevel.Tags.GetStatus].value)
@encrypt_packet
def set_right_wrist(state: bool) -> Packet:
return set_status(
DeviceConfig.id,
DeviceConfig.LeftRightWrist.id,
DeviceConfig.LeftRightWrist.Tags.SetStatus,
state,
)
@encrypt_packet
def factory_reset() -> Packet:
return set_status(
DeviceConfig.id,
DeviceConfig.FactoryReset.id,
DeviceConfig.FactoryReset.Tags.SetStatus,
True,
)
@encrypt_packet
def request_product_info() -> Packet:
return Packet(
service_id=DeviceConfig.id,
command_id=DeviceConfig.ProductInfo.id,
command=Command(tlvs=[TLV(tag=i) for i in range(14)]),
)
| 28.113695 | 116 | 0.636489 | 1,137 | 10,880 | 5.91029 | 0.170624 | 0.05 | 0.050893 | 0.045833 | 0.362798 | 0.283482 | 0.20625 | 0.11994 | 0.11994 | 0.094643 | 0 | 0.013041 | 0.274081 | 10,880 | 386 | 117 | 28.186529 | 0.837807 | 0.011121 | 0 | 0.293729 | 0 | 0 | 0.031994 | 0.010603 | 0 | 0 | 0 | 0.002591 | 0 | 1 | 0.056106 | false | 0 | 0.019802 | 0.036304 | 0.250825 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3b29d9a1f0bfb0365245f5647a6528266304cb8 | 1,023 | py | Python | 2021/06-python-decorator/decorator_sample.py | tswast/code-snippets | f859a9f4c747326bff9456a4f1f6578453cad2db | [
"Apache-2.0"
] | 14 | 2017-03-09T23:12:42.000Z | 2022-01-13T11:15:11.000Z | 2021/06-python-decorator/decorator_sample.py | tswast/code-snippets | f859a9f4c747326bff9456a4f1f6578453cad2db | [
"Apache-2.0"
] | 1 | 2020-12-31T04:12:08.000Z | 2021-05-08T05:20:56.000Z | 2021/06-python-decorator/decorator_sample.py | tswast/code-snippets | f859a9f4c747326bff9456a4f1f6578453cad2db | [
"Apache-2.0"
] | 8 | 2017-05-31T16:55:46.000Z | 2020-12-29T22:00:32.000Z | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import functools
def memoize(func):
answers = {}
@functools.wraps(func)
def memoized(n):
if n not in answers:
answers[n] = func(n)
return answers[n]
return memoized
@memoize
def fibonacci(n):
if n <= 2:
return 1
return fibonacci(n - 1) + fibonacci(n - 2)
start = time.perf_counter()
fib = fibonacci(40)
end = time.perf_counter()
print(fib)
print(end - start)
| 23.25 | 74 | 0.68915 | 150 | 1,023 | 4.686667 | 0.56 | 0.085349 | 0.036984 | 0.045519 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017654 | 0.224829 | 1,023 | 43 | 75 | 23.790698 | 0.868852 | 0.538612 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15 | false | 0 | 0.1 | 0 | 0.45 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3b39c2289d1b79f44c6ce12edc0939c142807ba | 8,742 | py | Python | pysymoro/baseparams.py | songhongxiang/symoro | 3db657778337e89c63d383789df5e4d2994bd542 | [
"MIT"
] | 109 | 2015-01-19T11:17:11.000Z | 2022-03-19T13:24:02.000Z | pysymoro/baseparams.py | songhongxiang/symoro | 3db657778337e89c63d383789df5e4d2994bd542 | [
"MIT"
] | 21 | 2015-04-17T11:19:57.000Z | 2021-11-30T03:21:49.000Z | pysymoro/baseparams.py | songhongxiang/symoro | 3db657778337e89c63d383789df5e4d2994bd542 | [
"MIT"
] | 50 | 2015-08-27T06:32:54.000Z | 2022-03-16T03:35:13.000Z | # -*- coding: utf-8 -*-
# This file is part of the OpenSYMORO project. Please see
# https://github.com/symoro/symoro/blob/master/LICENCE for the licence.
"""
This module of SYMORO package contains function to compute the base
inertial parameters.
"""
import sympy
from sympy import Matrix
from pysymoro.geometry import compute_rot_trans
from pysymoro.geometry import Transform
from symoroutils import symbolmgr
from symoroutils import tools
inert_names = ('XXR', 'XYR', 'XZR', 'YYR', 'YZR',
'ZZR', 'MXR', 'MYR', 'MZR', 'MR')
# TODO:Finish base parameters computation
def base_inertial_parameters(robo, symo):
"""Computes grouped inertia parameters. New parametrization
contains less parameters but generates the same dynamic model
Parameters
==========
robo : Robot
Instance of robot description container
Returns
=======
symo.sydi : dictionary
Dictionary with the information of all the sybstitution
"""
lam = [0 for i in xrange(robo.NL)]
# init transformation
antRj, antPj = compute_rot_trans(robo, symo)
for j in reversed(xrange(1, robo.NL)):
if robo.sigma[j] == 0:
# general grouping
compute_lambda(robo, symo, j, antRj, antPj, lam)
group_param_rot(robo, symo, j, lam)
# special grouping
group_param_rot_spec(robo, symo, j, lam, antRj, antPj)
pass
elif robo.sigma[j] == 1:
# general grouping
group_param_prism(robo, symo, j, antRj)
# special grouping
group_param_prism_spec(robo, symo, j, antRj, antPj)
pass
elif robo.sigma[j] == 2:
# fixed joint, group everuthing
compute_lambda(robo, symo, j, antRj, antPj, lam)
group_param_fix(robo, symo, j, lam)
symo.write_line('*=*')
def vec_mut_J(v, u):
"""Internal function. Needed for inertia parameters transformation
Parameters
==========
v, u : Matrix 3x1
two axis vectors
Returns : Matrix 6x1
"""
return Matrix([v[0]*u[0], v[0]*u[1], v[0]*u[2],
v[1]*u[1], v[1]*u[2], v[2]*u[2]])
def vec_mut_MS(v, P):
"""Internal function. Needed for inertia parameters transformation
Parameters
==========
v : Matrix 3x1
axis vector
P : Matrix 3x1
position vector
Returns : Matrix 6x1
"""
U = - tools.skew(v)*tools.skew(P)
return Matrix([2*U[0, 0], U[0, 1] + U[1, 0], U[0, 2] + U[2, 0],
2*U[1, 1], U[1, 2] + U[2, 1], 2*U[2, 2]])
def vec_mut_M(P):
"""Internal function. Needed for inertia parameters transformation
Parameters
==========
P : Matrix 3x1
position vector
Returns : Matrix 6x1
"""
U = -tools.skew(P)*tools.skew(P)
return Matrix([U[0, 0], U[0, 1], U[0, 2], U[1, 1], U[1, 2], U[2, 2]])
def compute_lambda(robo, symo, j, antRj, antPj, lam):
"""Internal function. Computes the inertia parameters
transformation matrix
Notes
=====
lam is the output paramete
"""
lamJJ_list = []
lamJMS_list = []
for e1 in xrange(3):
for e2 in xrange(e1, 3):
u = vec_mut_J(antRj[j][:, e1], antRj[j][:, e2])
if e1 != e2:
u += vec_mut_J(antRj[j][:, e2], antRj[j][:, e1])
lamJJ_list.append(u.T)
for e1 in xrange(3):
v = vec_mut_MS(antRj[j][:, e1], antPj[j])
lamJMS_list.append(v.T)
lamJJ = Matrix(lamJJ_list).T # , 'LamJ', j)
lamJMS = symo.mat_replace(Matrix(lamJMS_list).T, 'LamMS', j)
lamJM = symo.mat_replace(vec_mut_M(antPj[j]), 'LamM', j)
lamJ = lamJJ.row_join(lamJMS).row_join(lamJM)
lamMS = sympy.zeros(3, 6).row_join(antRj[j]).row_join(antPj[j])
lamM = sympy.zeros(1, 10)
lamM[9] = 1
lam[j] = Matrix([lamJ, lamMS, lamM])
def group_param_rot(robo, symo, j, lam):
"""Internal function. Groups inertia parameters according to the
general rule for a rotational joint.
Notes
=====
robo is the output paramete
"""
Kj = robo.get_inert_param(j)
lam03 = lam[j][:, 0] + lam[j][:, 3]
lam03 = lam03.applyfunc(symo.C2S2_simp)
for i in (3, 8, 9):
Kj[i] = symo.replace(Kj[i], inert_names[i], j)
if robo.ant[j] != -1:
Kant = robo.get_inert_param(robo.ant[j])
Kant += lam03*Kj[3] + lam[j][:, 8]*Kj[8] + lam[j][:, 9]*Kj[9]
robo.put_inert_param(Kant, robo.ant[j])
Kj[0] -= Kj[3] # XX
Kj[3] = 0 # YY
Kj[8] = 0 # MZ
Kj[9] = 0 # M
robo.put_inert_param(Kj, j)
def group_param_rot_spec(robo, symo, j, lam, antRj, antPj):
"""Internal function. Groups inertia parameters according to the
special rule for a rotational joint.
Notes
=====
robo is the output paramete
"""
chainj = robo.chain(j)
r1, r2, orthog = Transform.find_r12(robo, chainj, antRj, j)
kRj, all_paral = Transform.kRj(robo, antRj, r1, chainj)
r1_Px_j, r1_Py_j, r1_Pz_j = Transform.kPj(
robo, antPj, antRj, r1, chainj
)
Kj = robo.get_inert_param(j)
to_replace = {0, 1, 2, 4, 5, 6, 7}
if Transform.z_paral(kRj):
Kj[0] = 0 # XX
Kj[1] = 0 # XY
Kj[2] = 0 # XZ
Kj[4] = 0 # YZ
to_replace -= {0, 1, 2, 4}
joint_axis = antRj[chainj[-1]].col(2)
if all_paral and \
(robo.G.norm() == sympy.Abs(joint_axis.dot(robo.G))) and \
(r1_Px_j == 0) and (r1_Py_j == 0):
Kj[6] = 0 # MX
Kj[7] = 0 # MY
to_replace -= {6, 7}
if j == r1 or(j == r2 and orthog):
Kj[5] += robo.IA[j] # ZZ
robo.IA[j] = 0
for i in to_replace:
Kj[i] = symo.replace(Kj[i], inert_names[i], j)
robo.put_inert_param(Kj, j)
def group_param_fix(robo, symo, j, lam):
"""Internal function. Groups inertia parameters according to the
general rule for a fixed joint joint.
Notes
=====
robo is the output paramete
"""
Kj = robo.get_inert_param(j)
for i in xrange(10):
Kj[i] = symo.replace(Kj[i], inert_names[i], j)
if robo.ant[j] != -1:
Kant = robo.get_inert_param(robo.ant[j])
Kant += lam[j]*Kj
robo.put_inert_param(Kant, robo.ant[j])
robo.put_inert_param(sympy.zeros(10, 1), j)
def group_param_prism(robo, symo, j, antRj):
"""Internal function. Groups inertia parameters according to the
general rule for a prismatic joint.
Notes
=====
robo is the output paramete
"""
Kj = robo.get_inert_param(j)
for i in xrange(6):
Kj[i] = symo.replace(Kj[i], inert_names[i], j)
robo.put_inert_param(Kj, j)
if robo.ant[j] != -1:
antJj = antRj[j]*robo.J[j]*antRj[j].T
robo.J[robo.ant[j]] += antJj
robo.J[j] = sympy.zeros(3, 3)
def group_param_prism_spec(robo, symo, j, antRj, antPj):
"""Internal function. Groups inertia parameters according to the
special rule for a prismatic joint.
Notes
=====
robo is the output paramete
"""
chainj = robo.chain(j)
r1, r2, orthog = Transform.find_r12(robo, chainj, antRj, j)
Kj = robo.get_inert_param(j)
kRj, all_paral = Transform.kRj(robo, antRj, r1, chainj)
to_replace = {6, 7, 8, 9}
if r1 < j and j < r2:
if Transform.z_paral(kRj):
Kj[8] = 0 # MZ
for i in (6, 7):
Kj[i] = symo.replace(Kj[i], inert_names[i], j)
robo.MS[robo.ant[j]] += antRj[j]*Matrix([Kj[6], Kj[7], 0])
robo.JJ[2, 2] -= Kj[6]*antPj[j][0] + Kj[7]*antPj[j][1]
Kj[6] = 0 # MX
Kj[7] = 0 # MY
to_replace -= {6, 7, 8}
else:
jar1 = kRj.row(2)
if jar1[2] != 0:
Kj[6] -= jar1[0]/jar1[2]*Kj[8]
Kj[7] -= jar1[1]/jar1[2]*Kj[8]
Kj[8] = 0 # MZ
to_replace -= {8}
elif jar1[0]*jar1[1] != 0:
Kj[6] -= jar1[0]/jar1[1]*Kj[7]
Kj[7] = 0 # MY
to_replace -= {7}
elif jar1[0] != 0:
Kj[7] = 0 # MY
to_replace -= {7}
else:
Kj[6] = 0 # MX
to_replace -= {6}
elif j < r1:
Kj[6] = 0 # MX
Kj[7] = 0 # MY
Kj[8] = 0 # MZ
to_replace -= {6, 7, 8}
#TOD: rewrite
dotGa = Transform.sna(antRj[j])[2].dot(robo.G)
if dotGa == tools.ZERO:
revol_align = robo.ant[robo.ant[j]] == 0 and robo.ant[j] == tools.ZERO
if robo.ant[j] == 0 or revol_align:
Kj[9] += robo.IA[j]
robo.IA[j] = 0
for i in to_replace:
Kj[i] = symo.replace(Kj[i], inert_names[i], j)
robo.put_inert_param(Kj, j)
| 29.734694 | 78 | 0.549188 | 1,321 | 8,742 | 3.536715 | 0.162755 | 0.025685 | 0.025043 | 0.020976 | 0.533818 | 0.508348 | 0.462115 | 0.422303 | 0.399829 | 0.33476 | 0 | 0.042191 | 0.300503 | 8,742 | 293 | 79 | 29.836177 | 0.721832 | 0.23553 | 0 | 0.361963 | 0 | 0 | 0.006466 | 0 | 0 | 0 | 0 | 0.003413 | 0 | 1 | 0.06135 | false | 0.01227 | 0.03681 | 0 | 0.116564 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3b4d14d3f330ee0c96bf4049e9687a82a51d0f3 | 7,543 | py | Python | app/risks.py | allezalex/pyro-platform | 0205ca4121aff1185580fb0e97a211336c22792a | [
"Apache-2.0"
] | 6 | 2020-12-13T19:08:51.000Z | 2022-01-09T02:44:38.000Z | app/risks.py | allezalex/pyro-platform | 0205ca4121aff1185580fb0e97a211336c22792a | [
"Apache-2.0"
] | 41 | 2020-11-11T15:08:13.000Z | 2022-02-03T10:26:14.000Z | app/risks.py | allezalex/pyro-platform | 0205ca4121aff1185580fb0e97a211336c22792a | [
"Apache-2.0"
] | 3 | 2021-03-16T19:07:46.000Z | 2022-01-18T19:12:44.000Z | # Copyright (C) 2021, Pyronear contributors.
# This program is licensed under the Apache License version 2.
# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.
"""
The following file is dedicated to the "Risk Score" view of the dashboard.
Following a first section dedicated to imports, the content section is made of 3 blocks:
- the departments and risk score acquisition block, used to fetch the scores of each department;
- the choropleth map attributes block, which constructs the dl.GeoJSON object, as well as the color scale;
- a final block mobilising previously defined functions to instantiate the "Risk Score" map.
Most functions defined below are called in the main.py file, in the risks callbacks.
"""
# ----------------------------------------------------------------------------------------------------------------------
# IMPORTS
# NumPy to generate the score classes in the color scale
import numpy as np
# Useful imports to open and read the GeoJSON file and get risk data from the API
import requests
import config as cfg
# Various modules provided by Dash to build app components
import dash_core_components as dcc
import dash_html_components as html
import dash_leaflet as dl
import dash_leaflet.express as dlx
# Various imports from utils.py, useful for both Alerts and Risks dashboards
from utils import map_style, build_filters_object, build_legend_box
# ----------------------------------------------------------------------------------------------------------------------
# CONTENT
# ----------------------------------------------------------------------------------------------------------------------
# Departments and risk score acquisition
# The following block fetches risk scores from the data science team and adds them up to the departments geojson.
# NB: for now, scores are acquired from a static json file on GitHub; the API call is still to be implemented.
# We read the GeoJSON file from the Pyro-Risk release (URL in config.py) and store it in the departments variable
departments = requests.get(cfg.GEOJSON_FILE).json()
# We fetch the department risk score json and store it in the risk_json variable
# When everything is validated, we'll request the data directly from the API
risk_json = requests.get(cfg.PYRORISK_FALLBACK).json()
# We add to each department in the geojson a new property called "score" that corresponds to the risk level
for department in departments['features']:
dpt_name = department['properties']['nom']
geocode_list = [dpt['geocode'] for dpt in risk_json]
if dpt_name in geocode_list:
risk_json_index = geocode_list.index(dpt_name)
department['properties']['score'] = risk_json[risk_json_index]['score']
else:
department['properties']['score'] = 0
# ----------------------------------------------------------------------------------------------------------------------
# Choropleth map attributes
# The following block is used to instantiate the various Dash Leaflet objects needed to build the choropleth map.
def build_risks_geojson_and_colorbar(opacity_level=0.75):
"""
This function creates the main attributes specific to the choropleth map.
It simply takes as input an opacity level, which defaults to 0.75, for coloring the departments.
It returns:
- a dl.GeoJSON object that allows to displays the departments' boundaries and respective risk score categories;
- a colorbar object that distinguishes, as shades of yellow and red, 8 categories of risk score from 0 to 1.
"""
# First step is to prepare the choropleth map by building the color scale corresponding to score risks
# To define 8 risk levels between 0 and 1, we need to choose 9 floats that will serve as borders
classes = np.linspace(0, 1, 9)
# We choose 8 shades of yellow and red to define our color scale
colorscale = ['#FFEDA0', '#FED976', '#FEB24C', '#FD8D3C', '#FC4E2A', '#E31A1C', '#BD0026', '#800026']
# We create a 'categories' object of the right format, then plug it into the Dash Leaflet
# function instantiating the colorbar
ctg = ["{}+".format(round(cls, 2)) for i, cls in enumerate(classes[:-1])]
colorbar = dlx.categorical_colorbar(categories=ctg, colorscale=colorscale, width=500, height=30,
position="bottomleft")
# We define the style of department delimitations on the map
# (opacity and color of borders, opacity of color backgrounds...)
scale_style = dict(weight=2, opacity=0.9, color='white', dashArray='3', fillOpacity=opacity_level)
# We finally instantiate the dl.GeoJSON object that will be attributed to the "Niveaux de Risque" map
geojson = dl.GeoJSON(data=departments,
id='geojson_risks',
zoomToBoundsOnClick=True,
hoverStyle=dict(weight=3,
color='#666',
dashArray=''),
hideout=dict(colorscale=colorscale,
classes=classes,
style=scale_style,
color_prop='score'),
options=dict(style=dlx.choropleth.style))
return geojson, colorbar
def build_opacity_slider():
"""
This function instantiates the slider located in the blank space on the left of the map,
that allows the user to choose the most appropriate color opacity level when displaying
the risk score associated with the various departments.
"""
slider_title = dcc.Markdown("Choisissez le niveau d'opacité des aplats de couleurs :")
slider = dcc.Slider(id='opacity_slider_risks',
min=0, max=1,
step=0.01,
marks={0: '0%', 0.25: '25%', 0.5: '50%', 0.75: '75%', 1: '100%'},
value=0.75)
slider_div = html.Div(style=dict(width=330),
children=[slider_title, slider])
return html.Center(slider_div)
# ----------------------------------------------------------------------------------------------------------------------
# Map instantiation
# The last block gathers previously defined functions to output the "Risk Score" map.
def build_risks_map():
"""
This function mobilises functions defined hereabove or in the utils module to
instantiate and return a dl.Map object, corresponding to the "Risk Score" view.
"""
geojson, colorbar = build_risks_geojson_and_colorbar()
map_object = dl.Map(center=[46.5, 2], # Determines the point around which the map is initially centered
zoom=6, # Determines the initial level of zoom around the center point
children=[dl.TileLayer(id='tile_layer'),
geojson,
colorbar,
build_filters_object(map_type='risks'),
build_legend_box(map_type='risks'),
html.Div(id='fire_markers_risks'), # Will contain past fire markers of the risks map
html.Div(id='live_alerts_marker')
],
style=map_style, # Reminder: map_style is imported from utils.py
id='map')
return map_object
| 46.276074 | 120 | 0.603208 | 927 | 7,543 | 4.83603 | 0.344121 | 0.020076 | 0.013384 | 0.006246 | 0.051305 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017715 | 0.251624 | 7,543 | 162 | 121 | 46.561728 | 0.776439 | 0.5466 | 0 | 0 | 0 | 0 | 0.093551 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048387 | false | 0 | 0.129032 | 0 | 0.225806 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3b57743fc67237e5e0dfd141e3c95a3161e3a39 | 318 | py | Python | lws_backend/api/routes/auth.py | AlexKLWS/lws_backend | a886073b1b0f6ab80d848fb4b6e8465de9d88317 | [
"Unlicense"
] | null | null | null | lws_backend/api/routes/auth.py | AlexKLWS/lws_backend | a886073b1b0f6ab80d848fb4b6e8465de9d88317 | [
"Unlicense"
] | null | null | null | lws_backend/api/routes/auth.py | AlexKLWS/lws_backend | a886073b1b0f6ab80d848fb4b6e8465de9d88317 | [
"Unlicense"
] | null | null | null | from fastapi import APIRouter, Depends
from lws_backend.api.dependencies.authorization import check_user_auth
router = APIRouter()
@router.get("/user-access")
async def user_access(user_auth=Depends(check_user_auth)):
exception = user_auth[1]
if exception:
raise exception
return user_auth[0]
| 21.2 | 70 | 0.757862 | 43 | 318 | 5.395349 | 0.55814 | 0.172414 | 0.112069 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007463 | 0.157233 | 318 | 14 | 71 | 22.714286 | 0.858209 | 0 | 0 | 0 | 0 | 0 | 0.037736 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.222222 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3b5d9872af7cd4981466decde26abff958ca0d2 | 3,219 | py | Python | website/bhamon_orchestra_website/job_controller.py | BenjaminHamon/BuildService | 2ca12f9ae74e9cbf732229849f6cd6d13f40151a | [
"MIT"
] | 2 | 2021-01-28T15:56:50.000Z | 2021-03-02T06:27:09.000Z | website/bhamon_orchestra_website/job_controller.py | BenjaminHamon/BuildService | 2ca12f9ae74e9cbf732229849f6cd6d13f40151a | [
"MIT"
] | null | null | null | website/bhamon_orchestra_website/job_controller.py | BenjaminHamon/BuildService | 2ca12f9ae74e9cbf732229849f6cd6d13f40151a | [
"MIT"
] | null | null | null | import logging
import flask
import bhamon_orchestra_website.helpers as helpers
import bhamon_orchestra_website.service_client as service_client
logger = logging.getLogger("JobController")
def show_collection(project_identifier):
item_total = service_client.get("/project/{project_identifier}/job_count".format(**locals()))
pagination = helpers.get_pagination(item_total, { "project_identifier": project_identifier })
query_parameters = {
"skip": (pagination["page_number"] - 1) * pagination["item_count"],
"limit": pagination["item_count"],
"order_by": [ "identifier ascending" ],
}
view_data = {
"project": service_client.get("/project/{project_identifier}".format(**locals())),
"job_collection": service_client.get("/project/{project_identifier}/job_collection".format(**locals()), query_parameters),
"pagination": pagination,
}
helpers.add_display_names([ view_data["project"] ], view_data["job_collection"], [], [], [])
return flask.render_template("job/collection.html", title = "Jobs", **view_data)
def show(project_identifier, job_identifier): # pylint: disable = unused-argument
view_data = {
"project": service_client.get("/project/{project_identifier}".format(**locals())),
"job": service_client.get("/project/{project_identifier}/job/{job_identifier}".format(**locals())),
"run_collection": service_client.get("/project/{project_identifier}/job/{job_identifier}/runs".format(**locals()), { "limit": 10, "order_by": [ "update_date descending" ] }),
"worker_collection": service_client.get("/worker_collection", { "limit": 1000, "order_by": [ "identifier ascending" ] }),
}
view_data["job"]["project_display_name"] = view_data["project"]["display_name"]
helpers.add_display_names([ view_data["project"] ], [ view_data["job"] ], view_data["run_collection"], [], view_data["worker_collection"])
return flask.render_template("job/index.html", title = "Job " + view_data["job"]["display_name"], **view_data)
def trigger(project_identifier, job_identifier): # pylint: disable = unused-argument
trigger_data = { "parameters": {}, "source": { "type": "user", "identifier": flask.session["user"]["identifier"] } }
for key, value in flask.request.form.items():
if key.startswith("parameter-"):
trigger_data["parameters"][key[len("parameter-"):]] = value
service_client.post("/project/{project_identifier}/job/{job_identifier}/trigger".format(**locals()), trigger_data)
return flask.redirect(flask.request.referrer or flask.url_for("job_controller.show_collection", project_identifier = project_identifier))
def enable(project_identifier, job_identifier): # pylint: disable = unused-argument
service_client.post("/project/{project_identifier}/job/{job_identifier}/enable".format(**locals()))
return flask.redirect(flask.request.referrer or flask.url_for("job_controller.show_collection", project_identifier = project_identifier))
def disable(project_identifier, job_identifier): # pylint: disable = unused-argument
service_client.post("/project/{project_identifier}/job/{job_identifier}/disable".format(**locals()))
return flask.redirect(flask.request.referrer or flask.url_for("job_controller.show_collection", project_identifier = project_identifier))
| 50.296875 | 176 | 0.751476 | 382 | 3,219 | 6.060209 | 0.219895 | 0.161555 | 0.095032 | 0.081641 | 0.586609 | 0.586609 | 0.527862 | 0.509287 | 0.432829 | 0.359827 | 0 | 0.00237 | 0.082324 | 3,219 | 63 | 177 | 51.095238 | 0.781313 | 0.041938 | 0 | 0.162791 | 0 | 0 | 0.32608 | 0.165313 | 0 | 0 | 0 | 0 | 0 | 1 | 0.116279 | false | 0 | 0.093023 | 0 | 0.325581 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3b83373cf5e4f901428414e382481adff631cd0 | 10,751 | py | Python | resource_counter.py | ferragi/aws_resource_counter | dd0c8c61fd9e3c4cb0237310fa1a7a2c70b99a07 | [
"Apache-2.0"
] | 3 | 2021-03-01T12:50:39.000Z | 2021-09-06T13:53:00.000Z | resource_counter.py | ferragi/aws_resource_counter | dd0c8c61fd9e3c4cb0237310fa1a7a2c70b99a07 | [
"Apache-2.0"
] | null | null | null | resource_counter.py | ferragi/aws_resource_counter | dd0c8c61fd9e3c4cb0237310fa1a7a2c70b99a07 | [
"Apache-2.0"
] | 1 | 2021-03-01T12:51:21.000Z | 2021-03-01T12:51:21.000Z | # -*- coding: utf-8 -*-
import sys
import boto3
from botocore.exceptions import ClientError
import json
from datetime import datetime
CUSTOMER_FILE_NAME = "customer_assessment.yandeh.config.json"
SERVICES_FILE_NAME = "services.config.json"
def get_customer_config():
customer_file = open(CUSTOMER_FILE_NAME)
customer_config = json.loads(customer_file.read())
customer_file.close()
return customer_config
customer_config = get_customer_config()
def generate_account_list():
if 'CUSTOMER_ORGANIZATION_ACCT' not in customer_config:
sys.exit('\n[Fatal Err] Parameter CUSTOMER_ORGANIZATION_ACCT is mandatory in customer configuration.')
account_list = []
account_list.append(customer_config['CUSTOMER_ORGANIZATION_ACCT'])
if 'OTHER_CUSTOMER_ACCT_LIST' in customer_config:
for i in range(0, len(customer_config['OTHER_CUSTOMER_ACCT_LIST'])):
account_list.append(customer_config['OTHER_CUSTOMER_ACCT_LIST'][i]['acct_id'])
return account_list
def switch_role(acct_id):
client = boto3.client('sts')
try:
response = client.assume_role(
RoleArn='arn:aws:iam::' + str(acct_id) + ':role/' + str(customer_config['ROLE_NAME']),
RoleSessionName=str(customer_config['ROLE_NAME']) + '-Resource_Counter'
)
except ClientError as err:
print('\n[Err] Could switch role on acct ' + str(acct_id) + ' for role name '+ str(customer_config['ROLE_NAME']))
print('\n[Dbg] '+str(err))
return { 'AccessOK':False }
return {
'AccessOK': True,
'access_key_id': response['Credentials']['AccessKeyId'],
'secret_access_key': response['Credentials']['SecretAccessKey'],
'session_token': response['Credentials']['SessionToken']
}
def count_resources(service_data, **extra_params):
if 'region' in extra_params:
if 'access_key_id' in extra_params['access_data']:
try:
client = boto3.client(service_data["BOTO3_CLIENT"], region,
aws_access_key_id=extra_params['access_data']['access_key_id'],
aws_secret_access_key=extra_params['access_data']['secret_access_key'],
aws_session_token=extra_params['access_data']['session_token']
)
except:
print('\n[Err] Could not connect to client '+str(service_data["BOTO3_CLIENT"])+" in region "+str(region))
return 0
else:
try:
client = boto3.client(service_data["BOTO3_CLIENT"], region)
except:
print('\n[Err] Could not connect to client '+str(service_data["BOTO3_CLIENT"])+" in region "+str(region))
return 0
else:
if 'access_key_id' in extra_params['access_data']:
try:
client = boto3.client(service_data["BOTO3_CLIENT"],
aws_access_key_id=extra_params['access_data']['access_key_id'],
aws_secret_access_key=extra_params['access_data']['secret_access_key'],
aws_session_token=extra_params['access_data']['session_token']
)
except:
print('\n[Err] Could not connect to client '+str(service_data["BOTO3_CLIENT"]))
return 0
else:
try:
client = boto3.client(service_data["BOTO3_CLIENT"])
except:
print('\n[Err] Could not connect to client '+str(service_data["BOTO3_CLIENT"]))
return 0
filtered_params = ''
if 'CLIENT_PREFILTERS' in service_data:
###########################
## TDL
## Fazer o loop para dois ou mais params
##(Precisa será? Ou melhor tirar a lista do json?)
###########################
if service_data["CLIENT_PREFILTERS"][0]["filter_type"] == 'String':
filtered_params = str(service_data["CLIENT_PREFILTERS"][0]["filter_name"])+" = '"+str(service_data["CLIENT_PREFILTERS"][0]["filter_value"])+"'"
elif service_data["CLIENT_PREFILTERS"][0]["filter_type"] == 'List':
filtered_params = str(service_data["CLIENT_PREFILTERS"][0]["filter_name"])+" = ['"+str(service_data["CLIENT_PREFILTERS"][0]["filter_value"])+"']"
elif service_data["CLIENT_PREFILTERS"][0]["filter_type"] in ['Integer','Bool']:
filtered_params = str(service_data["CLIENT_PREFILTERS"][0]["filter_name"])+" = "+str(service_data["CLIENT_PREFILTERS"][0]["filter_value"])
if 'nexttoken' in extra_params:
if filtered_params == '':
filtered_params = 'NextToken=' + extra_params['nexttoken']
else:
filtered_params += ', NextToken=' + extra_params['nexttoken']
try:
response = eval("client."+service_data["CLIENT_FUNCTION"]+"("+filtered_params+")")
except:
if 'region' in extra_params:
print('\n[Err] Could not run function '+str("client."+service_data["CLIENT_FUNCTION"]+"("+filtered_params+")")+' for client '+str(service_data["BOTO3_CLIENT"])+" in region "+str(region))
else:
print('\n[Err] Could not run function '+str("client."+service_data["CLIENT_FUNCTION"]+"("+filtered_params+")")+' for client '+str(service_data["BOTO3_CLIENT"]))
return 0
try:
if response[service_data["COUNTED_RESOURCE_KEY"]]:
if 'NextToken' in response:
if 'region' in extra_params:
return len(response[service_data["COUNTED_RESOURCE_KEY"]]) + count_resources(service_data, region=region, nexttoken=response['NextToken'])
else:
return len(response[service_data["COUNTED_RESOURCE_KEY"]]) + count_resources(service_data, nexttoken=response['NextToken'])
else:
return len(response[service_data["COUNTED_RESOURCE_KEY"]])
else:
return 0
except KeyError:
if 'region' in extra_params:
print('\n[Err] Could not find key '+service_data["COUNTED_RESOURCE_KEY"]+' for client '+str(service_data["BOTO3_CLIENT"])+" in region "+str(region))
else:
print('\n[Err] Could not find key '+service_data["COUNTED_RESOURCE_KEY"]+' for client '+str(service_data["BOTO3_CLIENT"]))
return 0
def save_json_file(json_content):
dt_string = datetime.now().strftime("%d%m%Y%H%M%S")
filename = "assessment."+str(customer_config['CUSTOMER_ORGANIZATION_ACCT'])+"."+dt_string+".json"
try:
with open(filename, 'w+') as json_output_file:
json.dump(json_content, json_output_file, indent=4, sort_keys=True)
print("Output JSON file "+filename+" saved. [ok]")
except:
print('\n[Err] Could not write JSON file ' +filename)
def save_csv_file(json_content):
dt_string = datetime.now().strftime("%d%m%Y%H%M%S")
filename = "assessment." + str(customer_config['CUSTOMER_ORGANIZATION_ACCT']) + "." + dt_string + ".csv"
try:
with open(filename, 'w+') as csv_output_file:
csv_output_file.write("'Service Name';'Counted Resource';'AWS_Acct_Id';'Region';'#Counted'\n")
for service in json_content["SERVICES"]:
for counted_account in service['Count']:
if counted_account != 'Subtotal':
if service['CLIENT_ENDPOINT_SCOPE'] == 'global':
csv_output_file.write(
"'" + str(service['NAME']) + "';'" + str(service['COUNTED_RESOURCE_KEY']) + "';'" + str(
counted_account) + "';'global';" + str(service['Count'][counted_account]['global'])+"\n")
else:
for region in service['Count'][counted_account]:
csv_output_file.write(
"'" + str(service['NAME']) + "';'" + str(service['COUNTED_RESOURCE_KEY']) + "';'" + str(
counted_account) + "';'" + str(region) + "';" + str(
service['Count'][counted_account][region]) + "\n")
print("Output CSV file "+filename+" saved. [ok]")
except:
print('\n[Err] Could not write CSV file ' +filename)
service_config_file = open(SERVICES_FILE_NAME)
service_config = json.loads(service_config_file.read())
service_config_file.close()
accts_to_run = generate_account_list()
total_counted = 0
for acct_run_id in accts_to_run:
if acct_run_id != boto3.client('sts').get_caller_identity().get('Account'):
temporary_access_data = switch_role(acct_run_id)
else:
temporary_access_data = { 'AccessOK': True }
print('Checking resources on account ['+acct_run_id+'].')
if not temporary_access_data['AccessOK']:
print("[skipped]")
continue
i = 0
for service in service_config["SERVICES"]:
if 'Count' not in service_config["SERVICES"][i]: service_config["SERVICES"][i]["Count"] = {}
if acct_run_id not in service_config["SERVICES"][i]["Count"]: service_config["SERVICES"][i]["Count"][acct_run_id] = {}
if 'Subtotal' not in service_config["SERVICES"][i]["Count"]: service_config["SERVICES"][i]["Count"]['Subtotal'] = 0
if service['CLIENT_ENDPOINT_SCOPE'] == 'global':
service_config["SERVICES"][i]["Count"][acct_run_id]['global'] = count_resources(service, access_data=temporary_access_data)
service_config["SERVICES"][i]["Count"]['Subtotal'] += service_config["SERVICES"][i]["Count"][acct_run_id]['global']
else:
for region in customer_config['ASSESSMENT_REGION_COVERAGE_LIST']:
if 'EXCEPTION_REGION_LIST' in service and region in service["EXCEPTION_REGION_LIST"]:
continue
service_config["SERVICES"][i]["Count"][acct_run_id][region] = count_resources(service, access_data=temporary_access_data, region=region)
service_config["SERVICES"][i]["Count"]['Subtotal'] += service_config["SERVICES"][i]["Count"][acct_run_id][region]
total_counted += service_config["SERVICES"][i]["Count"]['Subtotal']
i += 1
print(".[ok]")
service_config["Total"] = total_counted
save_json_file(service_config)
save_csv_file(service_config)
print("Total Services: "+str(total_counted)) | 48.427928 | 199 | 0.594921 | 1,204 | 10,751 | 5.02907 | 0.135382 | 0.061767 | 0.03237 | 0.047234 | 0.60578 | 0.568456 | 0.497936 | 0.484228 | 0.466226 | 0.446078 | 0 | 0.005308 | 0.264068 | 10,751 | 222 | 200 | 48.427928 | 0.759985 | 0.010325 | 0 | 0.342697 | 0 | 0 | 0.26144 | 0.038617 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033708 | false | 0 | 0.02809 | 0 | 0.140449 | 0.101124 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3b9455435505a5fb9a159f6a19ce96afc0246de | 3,351 | py | Python | dags/JL_proj_dag.py | jsleslie/sparkify-airflow-pipeline | fd6706e6a40f0fa6420c6d00e53c0734be24d86f | [
"MIT"
] | null | null | null | dags/JL_proj_dag.py | jsleslie/sparkify-airflow-pipeline | fd6706e6a40f0fa6420c6d00e53c0734be24d86f | [
"MIT"
] | null | null | null | dags/JL_proj_dag.py | jsleslie/sparkify-airflow-pipeline | fd6706e6a40f0fa6420c6d00e53c0734be24d86f | [
"MIT"
] | null | null | null | from datetime import datetime, timedelta
import os
from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
from operators import (StageToRedshiftOperator, LoadFactOperator,
LoadDimensionOperator, DataQualityOperator)
from helpers import SqlQueries
# AWS_KEY = os.environ.get('AWS_KEY')
# AWS_SECRET = os.environ.get('AWS_SECRET')
default_args = {
'owner': 'udacity',
'start_date': datetime(2019, 1, 12),
'depends_on_past': False,
'retries': 3,
'retry_delay': timedelta(minutes=5),
'email_on_retry': False,
'catchup': False
}
dag = DAG('udac_jl_dag_52',
default_args=default_args,
description='Load and transform data in Redshift with Airflow',
schedule_interval='0 * * * *'
)
start_operator = DummyOperator(task_id='Begin_execution', dag=dag)
stage_events_to_redshift = StageToRedshiftOperator(
task_id='Stage_events',
table='staging_events',
s3_path='s3://udacity-dend',
s3_bucket='log_data',
s3_key='',
jsonpath='s3://udacity-dend/log_json_path.json',
dag=dag
)
stage_songs_to_redshift = StageToRedshiftOperator(
task_id='Stage_songs',
table='staging_songs',
s3_path='s3://udacity-dend',
s3_bucket='song_data',
s3_key='A/A/A/',
dag=dag
)
load_songplays_table = LoadFactOperator(
task_id='Load_songplays_fact_table',
table='songplays',
sql=SqlQueries.songplay_table_insert,
dag=dag
)
load_user_dimension_table = LoadDimensionOperator(
task_id='Load_user_dim_table',
table = 'users',
sql=SqlQueries.user_table_insert,
dag=dag
)
load_song_dimension_table = LoadDimensionOperator(
task_id='Load_song_dim_table',
table = 'songs',
sql=SqlQueries.song_table_insert,
dag=dag
)
load_artist_dimension_table = LoadDimensionOperator(
task_id='Load_artist_dim_table',
table = 'artists',
sql=SqlQueries.artist_table_insert,
dag=dag
)
load_time_dimension_table = LoadDimensionOperator(
task_id='Load_time_dim_table',
table = 'time',
sql=SqlQueries.time_table_insert,
append_data = False,
dag=dag
)
run_quality_checks = DataQualityOperator(
task_id='Run_data_quality_checks',
dq_checks=[
{'check_sql': "SELECT COUNT(*) FROM users WHERE userid is null", 'expected_result': 0},
{'check_sql': "SELECT COUNT(*) FROM songs WHERE songid is null", 'expected_result': 0},
{'check_sql': "SELECT COUNT(*) FROM artists WHERE artistid is null", 'expected_result': 0},
{'check_sql': "SELECT COUNT(*) FROM songplays WHERE playid is null", 'expected_result': 0}],
dag=dag
)
end_operator = DummyOperator(task_id='Stop_execution', dag=dag)
start_operator >> stage_events_to_redshift
start_operator >> stage_songs_to_redshift
stage_events_to_redshift >> load_songplays_table
stage_songs_to_redshift >> load_songplays_table
load_songplays_table >> load_song_dimension_table
load_songplays_table >> load_user_dimension_table
load_songplays_table >> load_artist_dimension_table
load_songplays_table >> load_time_dimension_table
load_song_dimension_table >> run_quality_checks
load_user_dimension_table >> run_quality_checks
load_artist_dimension_table >> run_quality_checks
load_time_dimension_table >> run_quality_checks
run_quality_checks >> end_operator
| 29.394737 | 101 | 0.7359 | 426 | 3,351 | 5.396714 | 0.251174 | 0.073075 | 0.054806 | 0.047847 | 0.398869 | 0.288821 | 0.080905 | 0.057416 | 0.057416 | 0.057416 | 0 | 0.008906 | 0.16234 | 3,351 | 113 | 102 | 29.654867 | 0.810118 | 0.022978 | 0 | 0.107527 | 0 | 0 | 0.235546 | 0.03212 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.064516 | 0 | 0.064516 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3b9f47ce9246c0dbf4929a2fc4048535c1ebffb | 3,468 | py | Python | datasets/leejaponic/__init__.py | LinguList/lexibank-data-old | 7bf886597afa26863de8527dfd8529d9eb99fcd6 | [
"Apache-2.0"
] | null | null | null | datasets/leejaponic/__init__.py | LinguList/lexibank-data-old | 7bf886597afa26863de8527dfd8529d9eb99fcd6 | [
"Apache-2.0"
] | null | null | null | datasets/leejaponic/__init__.py | LinguList/lexibank-data-old | 7bf886597afa26863de8527dfd8529d9eb99fcd6 | [
"Apache-2.0"
] | 1 | 2018-10-19T11:58:00.000Z | 2018-10-19T11:58:00.000Z | # coding: utf8
from __future__ import unicode_literals, print_function, division
from clldutils.dsv import UnicodeReader
from clldutils.misc import slug
from pylexibank.util import xls2csv
from pylexibank.lingpy_util import iter_alignments, segmentize
from pylexibank.dataset import CldfDataset
def download(dataset):
xls2csv(dataset.raw.joinpath('supplementary.xlsx'), outdir=dataset.raw)
xls2csv(dataset.raw.joinpath('Japonic_recovered.xlsx'), outdir=dataset.raw)
def read_csv(dataset, name, offset):
header, rows = None, []
with UnicodeReader(dataset.raw.joinpath(name)) as reader:
for i, row in enumerate(reader):
row = [c.strip() for c in row]
if i == offset:
header = row
if i > offset:
rows.append(row)
return header, rows
def cldf(dataset, concepticon, **kw):
language_map = {l['NAME']: l['GLOTTOCODE'] or None for l in dataset.languages}
concept_map = {
c.english: c.concepticon_id for c in dataset.conceptlist.concepts.values()}
wordsh, words = read_csv(dataset, 'supplementary.Sheet1.csv', 0)
cognatesh, cognates = read_csv(dataset, 'Japonic_recovered.Sheet1.csv', 1)
def concepts(h, step):
l = h[2:]
return {i + 2: l[i] for i in range(0, len(l), step)}
word_index_to_concept = concepts(wordsh, 1)
assert all(c in concept_map for c in word_index_to_concept.values())
assert len(words) == len(cognates)
def sorted_(l):
return sorted(l, key=lambda r: r[:2])
cognatesets = []
with CldfDataset((
'ID',
'Language_ID',
'Language_name',
'Parameter_ID',
'Parameter_name',
'Value',
'Segments',
'AltTranscription',
), dataset) as ds:
for i, (word, cognate) in enumerate(zip(sorted_(words), sorted_(cognates))):
if not word[1]:
continue
if word[1] == 'Nigata':
word[1] = 'Niigata'
assert word[:2] == cognate[:2]
lname = word[1]
lid = slug(lname)
for index, concept in word_index_to_concept.items():
if word[index] == '?':
continue
wid = '%s-%s' % (lid, index - 1)
cindex = (index - 1) * 2
assert cognatesh[cindex] == concept
ds.add_row([
wid,
language_map[lname],
lname,
concept_map[concept],
concept,
word[index],
'',
cognate[cindex],
])
cs = cognate[cindex + 1]
for css in cs.split('&'):
css = css.strip()
if css != '?':
css = int(float(css))
cognatesets.append([
wid,
ds.name,
word[index],
'%s-%s' % (index - 1, css),
False,
'expert',
'',
'',
'',
'',
])
segmentize(ds)
dataset.cognates.extend(iter_alignments(ds, cognatesets, column='Segments'))
| 33.346154 | 84 | 0.488466 | 350 | 3,468 | 4.734286 | 0.334286 | 0.032589 | 0.032589 | 0.032589 | 0.02414 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011572 | 0.401961 | 3,468 | 103 | 85 | 33.669903 | 0.787367 | 0.00346 | 0 | 0.149425 | 0 | 0 | 0.065721 | 0.021424 | 0 | 0 | 0 | 0 | 0.045977 | 1 | 0.057471 | false | 0 | 0.068966 | 0.011494 | 0.16092 | 0.011494 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3be443cdf964d3bbf6ca7a7ee7114e3f65cd9ab | 3,051 | py | Python | example.py | isanich/aiochannels | 0f2fc1466ade008bf9c470b3e681412ddbb01a73 | [
"MIT"
] | null | null | null | example.py | isanich/aiochannels | 0f2fc1466ade008bf9c470b3e681412ddbb01a73 | [
"MIT"
] | null | null | null | example.py | isanich/aiochannels | 0f2fc1466ade008bf9c470b3e681412ddbb01a73 | [
"MIT"
] | null | null | null | import asyncio
from aiochannels import Channel, aenumerate
async def simple_pinger(ch):
sender = await ch.new_sender()
while sender.is_attached:
# sender can be detached from Channel with `sender.detach`
await sender.send('ping')
async def simple_ponger(ch):
sender = await ch.new_sender()
while sender.is_attached:
await (sender << 'pong') # another variant of `sender.send`
async def main():
channel = await Channel(buffer_size=10)
# Channel without buffer_size argument or buffer_size=1 leads to Go-like behavior
# meaning that senders can send only if getters have already received previously sent
# data with `getter.get` or `getter.get_forever`. buffer_size>1 behavior is also similar to Go.
pinger_task = loop.create_task(simple_pinger(channel))
ponger_task = loop.create_task(simple_ponger(channel))
# pinger and ponger are created with asyncio.Task and will be running asynchronously until their tasks
# are cancelled or senders detached (you should reference those senders elsewhere for this).
getter = await channel.new_getter()
# getters can receive with `getter.get` manually
print(await getter.get()) # ping
print(await getter.get()) # pong
print(await getter.get()) # ping
# and with async generator `getter.get_forever`
async for ix, data in aenumerate(getter.get_forever()): # aenumerate is async `enumerate` analogue
if ix >= 5:
# await getter.detach() - could cause to similar effect as `break`, but getter will no longer receive
break
print(f'Received from `getter.get_forever` - {data}')
# Sync/async callbacks are supported too
def cb_1(msg):
print(f'Sync callback got - {msg}')
async def cb_2(msg):
print(f'Async callback got - {msg}')
getter.add_callback(cb_1)
getter.add_callback(cb_2)
# If you getter is not `silent` (see `silent_getter` below) callbacks should be
# triggered with `getter.get` or `getter.get_forever`
await getter.get() # `cb_1` fired and `cb_2` task is put into asyncio loop
await asyncio.sleep(0) # let async callback fire
getter.remove_callback(cb_1)
getter.remove_callback(cb_2)
await getter.detach() # getter can be detached and will no longer receive
# await getter.attach() - and attached again
silent_getter = await channel.new_getter(silent=True)
# You can pass silent=True argument to `new_getter()` if you are planning to use this getter with
# callbacks only without explicit `getter.get` or `getter.get_forever`).
silent_getter.add_callback(cb_1)
print('Calbacks from silent getter:')
await asyncio.sleep(0.03) # callbacks will be triggered during sleep
await silent_getter.detach()
# As Channel buffer_size is 10 and there is no more getters
# pinger/ponger tasks are asleep now, but we can cancel them anyway.
pinger_task.cancel()
ponger_task.cancel()
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| 40.68 | 113 | 0.707965 | 441 | 3,051 | 4.784581 | 0.319728 | 0.059716 | 0.045498 | 0.024171 | 0.173934 | 0.084834 | 0.072038 | 0.042654 | 0.042654 | 0.042654 | 0 | 0.008254 | 0.205834 | 3,051 | 74 | 114 | 41.22973 | 0.862567 | 0.478859 | 0 | 0.166667 | 0 | 0 | 0.083173 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02381 | false | 0 | 0.047619 | 0 | 0.071429 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3be6789804fb5752eb28306b4e6c26da559d325 | 1,004 | py | Python | itembase/core/urls/vendor_item_urls.py | wedwardbeck/ibase | 5647fa5aff6c1bdc99b6c93884ff0d5aef17d85b | [
"MIT"
] | null | null | null | itembase/core/urls/vendor_item_urls.py | wedwardbeck/ibase | 5647fa5aff6c1bdc99b6c93884ff0d5aef17d85b | [
"MIT"
] | 9 | 2020-01-17T14:16:08.000Z | 2020-02-18T15:07:40.000Z | itembase/core/urls/vendor_item_urls.py | wedwardbeck/ibase | 5647fa5aff6c1bdc99b6c93884ff0d5aef17d85b | [
"MIT"
] | null | null | null | from django.urls import path
from itembase.core.views.item_views import UOMCreateView, UOMDeleteView, UOMDetailView, \
UOMListView, UOMUpdateView, VendorItemCreateView, VendorItemDeleteView, VendorItemDetailView, \
VendorItemListView, VendorItemUpdateView
app_name = "vendor-items"
urlpatterns = [
path("uom/", UOMListView.as_view(), name="uom-list"),
path("uom/new/", UOMCreateView.as_view(), name="uom-new"),
path("uom/edit/<int:pk>/", UOMUpdateView.as_view(), name="uom-edit"),
path("uom/delete/<int:pk>/", UOMDeleteView.as_view(), name="uom-delete"),
path("uom/<int:pk>/", UOMDetailView.as_view(), name="uom-view"),
path("vi/", VendorItemListView.as_view(), name="list"),
path("vi/new/", VendorItemCreateView.as_view(), name="new"),
path("vi/edit/<int:pk>/", VendorItemUpdateView.as_view(), name="edit"),
path("vi/delete/<int:pk>/", VendorItemDeleteView.as_view(), name="delete"),
path("vi/<int:pk>/", VendorItemDetailView.as_view(), name="view"),
]
| 50.2 | 99 | 0.698207 | 120 | 1,004 | 5.741667 | 0.283333 | 0.087083 | 0.145138 | 0.09434 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.105578 | 1,004 | 19 | 100 | 52.842105 | 0.767261 | 0 | 0 | 0 | 0 | 0 | 0.194223 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.117647 | 0 | 0.117647 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3c1e9ab9358afaa83215e8fcdb31e334f3d7732 | 1,472 | py | Python | exercises/utils/runner.py | rattletat/homework-server | abfac831ed45cc567a6a1610edee934200ffada7 | [
"Unlicense"
] | 1 | 2020-06-03T14:54:38.000Z | 2020-06-03T14:54:38.000Z | exercises/utils/runner.py | rattletat/homework-server | abfac831ed45cc567a6a1610edee934200ffada7 | [
"Unlicense"
] | null | null | null | exercises/utils/runner.py | rattletat/homework-server | abfac831ed45cc567a6a1610edee934200ffada7 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
"""
Runs the test suite and prints the results to standard output.
It uses the given separator in the following way:
<Separator>
#Tests
<Separator>
#Succeeded Tests
<Separator>
First Error
<Separator>
First Failure
<Separator>
"""
import sys
import traceback
import unittest
def main():
sep = sys.argv[1]
try:
import tests
except Exception as e:
test_count = 1
success_count = 0
error = str(e) + "\n" + str(traceback.format_exc())
failure = ""
else:
suite = unittest.TestLoader().loadTestsFromModule(tests)
result = unittest.TextTestRunner(verbosity=0).run(suite)
test_count = result.testsRun
success_count = test_count - len(result.errors) - len(result.failures)
try:
first_error = result.errors[0]
error1 = first_error[0]
error2 = first_error[1]
error = f"{error1}\n{error2}"
except IndexError:
error = ""
try:
first_failure = result.failures[0]
failure1 = first_failure[0]
failure2 = first_failure[1]
failure = f"{failure1}\n{failure2}"
except IndexError:
failure = ""
print(
sep
+ str(test_count)
+ sep
+ str(success_count)
+ sep
+ error.strip()
+ sep
+ failure.strip()
+ sep
)
if __name__ == "__main__":
main()
| 21.647059 | 78 | 0.570652 | 161 | 1,472 | 5.080745 | 0.42236 | 0.0489 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019153 | 0.326087 | 1,472 | 67 | 79 | 21.970149 | 0.805444 | 0.16644 | 0 | 0.25 | 0 | 0 | 0.041017 | 0.018048 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022727 | false | 0 | 0.090909 | 0 | 0.113636 | 0.022727 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3c368e347b19b994c0f594074bd9213901ef162 | 1,135 | py | Python | django-budget/config/urls.py | eliostvs/django-budget | c3b181e0dd259f14de6cb6f537508190e1344ec3 | [
"MIT"
] | null | null | null | django-budget/config/urls.py | eliostvs/django-budget | c3b181e0dd259f14de6cb6f537508190e1344ec3 | [
"MIT"
] | null | null | null | django-budget/config/urls.py | eliostvs/django-budget | c3b181e0dd259f14de6cb6f537508190e1344ec3 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from django.conf.urls import include, patterns, url
from django.conf.urls.i18n import i18n_patterns
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^$',
'base.views.index',
name='index'),
url(r'^i18n/',
include('django.conf.urls.i18n')),
)
urlpatterns += i18n_patterns(
'',
url(r'^login/$',
'django.contrib.auth.views.login',
name='login'),
url(r'^logout/$',
'django.contrib.auth.views.logout_then_login',
name='logout'),
url(r'^dashboard/$',
'dashboard.views.dashboard',
name='dashboard'),
url(r'^setup/$',
'base.views.setup',
name='setup'),
url(r'^budget/',
include('budget.urls', namespace='budget')),
url(r'^category/',
include('category.urls', namespace='category')),
url(r'^admin/',
include(admin.site.urls)),
url(r'^transaction/',
include('transaction.urls', namespace='transaction')),
url(r'^summary/',
include('summary.urls', namespace='summary')),
)
| 21.018519 | 62 | 0.592952 | 125 | 1,135 | 5.312 | 0.264 | 0.066265 | 0.063253 | 0.054217 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011364 | 0.22467 | 1,135 | 53 | 63 | 21.415094 | 0.743182 | 0 | 0 | 0.052632 | 0 | 0 | 0.315419 | 0.105727 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.105263 | 0 | 0.105263 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3c39873bff99fe7e5298c28441bd0599d3f6194 | 5,433 | py | Python | tests/fdb/test_fdb.py | GarrickHe/sonic-mgmt | 74c2ac63ad948227ac90d7ab89205cff08cc9833 | [
"Apache-2.0"
] | null | null | null | tests/fdb/test_fdb.py | GarrickHe/sonic-mgmt | 74c2ac63ad948227ac90d7ab89205cff08cc9833 | [
"Apache-2.0"
] | 11 | 2019-07-10T16:27:32.000Z | 2019-09-10T15:56:48.000Z | tests/fdb/test_fdb.py | GarrickHe/sonic-mgmt | 74c2ac63ad948227ac90d7ab89205cff08cc9833 | [
"Apache-2.0"
] | null | null | null | from ansible_host import AnsibleHost
import pytest
import ptf.testutils as testutils
import time
import itertools
import logging
import pprint
DEFAULT_FDB_ETHERNET_TYPE = 0x1234
DUMMY_MAC_PREFIX = "02:11:22:33"
DUMMY_MAC_COUNT = 10
FDB_POPULATE_SLEEP_TIMEOUT = 2
logger = logging.getLogger(__name__)
def send_eth(ptfadapter, source_port, source_mac, dest_mac):
"""
send ethernet packet
:param ptfadapter: PTF adapter object
:param source_port: source port
:param source_mac: source MAC
:param dest_mac: destination MAC
:return:
"""
pkt = testutils.simple_eth_packet(
eth_dst=dest_mac,
eth_src=source_mac,
eth_type=DEFAULT_FDB_ETHERNET_TYPE
)
logger.debug('send packet source port id {} smac: {} dmac: {}'.format(source_port, source_mac, dest_mac))
testutils.send(ptfadapter, source_port, pkt)
def send_recv_eth(ptfadapter, source_port, source_mac, dest_port, dest_mac):
"""
send ethernet packet and verify it on dest_port
:param ptfadapter: PTF adapter object
:param source_port: source port
:param source_mac: source MAC
:param dest_port: destination port to receive packet on
:param dest_mac: destination MAC
:return:
"""
pkt = testutils.simple_eth_packet(
eth_dst=dest_mac,
eth_src=source_mac,
eth_type=DEFAULT_FDB_ETHERNET_TYPE
)
logger.debug('send packet src port {} smac: {} dmac: {} verifying on dst port {}'.format(
source_port, source_mac, dest_mac, dest_port))
testutils.send(ptfadapter, source_port, pkt)
testutils.verify_packet_any_port(ptfadapter, pkt, [dest_port])
def setup_fdb(ptfadapter, vlan_table, router_mac):
"""
:param ptfadapter: PTF adapter object
:param vlan_table: VLAN table map: VLAN subnet -> list of VLAN members
:return: FDB table map : VLAN member -> MAC addresses set
"""
fdb = {}
for vlan in vlan_table:
for member in vlan_table[vlan]:
mac = ptfadapter.dataplane.get_mac(0, member)
# send a packet to switch to populate layer 2 table with MAC of PTF interface
send_eth(ptfadapter, member, mac, router_mac)
# put in learned MAC
fdb[member] = { mac }
# Send packets to switch to populate the layer 2 table with dummy MACs for each port
# Totally 10 dummy MACs for each port, send 1 packet for each dummy MAC
dummy_macs = ['{}:{:02x}:{:02x}'.format(DUMMY_MAC_PREFIX, member, i)
for i in range(DUMMY_MAC_COUNT)]
for dummy_mac in dummy_macs:
send_eth(ptfadapter, member, dummy_mac, router_mac)
# put in set learned dummy MACs
fdb[member].update(dummy_macs)
time.sleep(FDB_POPULATE_SLEEP_TIMEOUT)
return fdb
@pytest.fixture
def fdb_cleanup(ansible_adhoc, testbed):
""" cleanup FDB before and after test run """
duthost = AnsibleHost(ansible_adhoc, testbed['dut'])
try:
duthost.command('sonic-clear fdb all')
yield
finally:
# in any case clear fdb after test
duthost.command('sonic-clear fdb all')
@pytest.mark.usefixtures('fdb_cleanup')
def test_fdb(ansible_adhoc, testbed, ptfadapter):
"""
1. verify fdb forwarding in T0 topology.
2. verify show mac command on DUT for learned mac.
"""
if testbed['topo'] not in ['t0', 't0-64', 't0-116']:
pytest.skip('unsupported testbed type')
duthost = AnsibleHost(ansible_adhoc, testbed['dut'])
ptfhost = AnsibleHost(ansible_adhoc, testbed['ptf'])
host_facts = duthost.setup()['ansible_facts']
mg_facts = duthost.minigraph_facts(host=duthost.hostname)['ansible_facts']
# remove existing IPs from PTF host
ptfhost.script('scripts/remove_ip.sh')
# set unique MACs to PTF interfaces
ptfhost.script('scripts/change_mac.sh')
# reinitialize data plane due to above changes on PTF interfaces
ptfadapter.reinit()
router_mac = host_facts['ansible_Ethernet0']['macaddress']
vlan_member_count = sum([len(v['members']) for k, v in mg_facts['minigraph_vlans'].items()])
vlan_table = {}
for vlan in mg_facts['minigraph_vlan_interfaces']:
vlan_table[vlan['subnet']] = []
for ifname in mg_facts['minigraph_vlans'][vlan['attachto']]['members']:
vlan_table[vlan['subnet']].append(mg_facts['minigraph_port_indices'][ifname])
fdb = setup_fdb(ptfadapter, vlan_table, router_mac)
for vlan in vlan_table:
for src, dst in itertools.combinations(vlan_table[vlan], 2):
for src_mac, dst_mac in itertools.product(fdb[src], fdb[dst]):
send_recv_eth(ptfadapter, src, src_mac, dst, dst_mac)
# Should we have fdb_facts ansible module for this test?
res = duthost.command('show mac')
logger.info('"show mac" output on DUT:\n{}'.format(pprint.pformat(res['stdout_lines'])))
dummy_mac_count = 0
total_mac_count = 0
for l in res['stdout_lines']:
if DUMMY_MAC_PREFIX in l.lower():
dummy_mac_count += 1
if "dynamic" in l.lower():
total_mac_count += 1
# Verify that the number of dummy MAC entries is expected
assert dummy_mac_count == DUMMY_MAC_COUNT * vlan_member_count
# Verify that total number of MAC entries is expected
assert total_mac_count == DUMMY_MAC_COUNT * vlan_member_count + vlan_member_count
| 33.95625 | 109 | 0.677342 | 748 | 5,433 | 4.705882 | 0.251337 | 0.031818 | 0.025852 | 0.021591 | 0.349148 | 0.288068 | 0.20483 | 0.145739 | 0.125284 | 0.125284 | 0 | 0.010238 | 0.226946 | 5,433 | 159 | 110 | 34.169811 | 0.827857 | 0.241671 | 0 | 0.186047 | 0 | 0 | 0.128482 | 0.017064 | 0 | 0 | 0.001506 | 0 | 0.023256 | 1 | 0.05814 | false | 0 | 0.081395 | 0 | 0.151163 | 0.023256 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3c7792d212a10cd3a3492a2b0a312b11bc2457f | 4,916 | py | Python | python/adso_odata_to_neo4j.py | fbelleau/sap2model | ecabfb8f3b514e5c2e23f5fc8594aa3415701ad3 | [
"MIT"
] | null | null | null | python/adso_odata_to_neo4j.py | fbelleau/sap2model | ecabfb8f3b514e5c2e23f5fc8594aa3415701ad3 | [
"MIT"
] | null | null | null | python/adso_odata_to_neo4j.py | fbelleau/sap2model | ecabfb8f3b514e5c2e23f5fc8594aa3415701ad3 | [
"MIT"
] | null | null | null | # adso_odata_to_neo4j.py
# from francois.belleau@saaq.gouv.qc.ca
# create ADSO nodes in NEO4J using a CDS VIEW exposed as OData service
from neo4j import GraphDatabase
#pip install --trusted-host pypi.org --trusted-host files.pythonhosted.org neo4j
driver = GraphDatabase.driver("bolt://localhost:7687", auth=("neo4j", "password"))
import requests
import pyodata
#pip install --trusted-host pypi.org --trusted-host files.pythonhosted.org pyodata
import datetime
import re
def add_adso(tx, name, subtype, infoarea, label, system, environment, ID, date, user, caracteristics):
tx.run("MERGE (a:ADSO {name: $name, label: $label, subtype: $subtype, system: $system, environment: $environment, ID: $ID , date: $date, user: $user, infoarea: $infoarea, caracteristics: $caracteristics})",
name=name, label=label, subtype=subtype, system=system, environment=environment, ID=ID, date=date, user=user, infoarea=infoarea, caracteristics=caracteristics)
# extract system name from infoarea name
def infoarea_system_name(system):
if system[0:4] == '/IMO':
system = 'CO'
elif system[0:6] == 'ZSAAQ_':
system = system[6:8]
else:
system = 'NULL'
return(system)
# convert sap odata date format to a string date format
def odata_date2string(conttimestmp):
try:
matches = re.match(r"^/Date\((.*)\+0000\)/$", conttimestmp)
value = matches.group(1)
#print(value, int(value))
value = datetime.datetime(1970, 1, 1) + datetime.timedelta(milliseconds=int(value))
#print(type(value))
result = value.strftime('%Y-%m-%d %H:%M:%S')
except:
result = ''
return(result)
# compute ADSO type
def AdsoType(AdsoName):
if AdsoName[0:3] == 'ZCM':
type = 'Corporate memory'
elif AdsoName[0:3] == 'ZD_':
type = 'Core layer'
elif AdsoName[0:7] == '/IMO/CM':
type = 'Corporate memory'
elif AdsoName[0:7] == '/IMO/D_':
type = 'Core layer'
else :
type = ''
return(type)
# odata feed connection
environment = 'SW1'
host = 'TO_BE_DEFINED'
exec(open('./CONFIDENTIAL_'+environment+'.py').read())
SERVICE_URL = 'http://' + host + '/sap/opu/odata/SAAQ/BW_RSOADSO_CDS'
EntityName = 'xSAAQxBW_RSOADSO'
print('OData service URL:', SERVICE_URL)
session = requests.Session()
session.auth = requests_auth
odata_feed = pyodata.Client(SERVICE_URL, session)
# number of entries
print('xSAAQxBW_RSOADSO:', odata_feed.entity_sets.xSAAQxBW_RSOADSO.get_entities().count().execute())
#exit()
# list column names
rows = odata_feed.entity_sets.xSAAQxBW_RSOADSO.get_entities().execute()
row = rows[0].__dict__['_cache']
print('colonnes: ', row)
adso = row['adsonm']
subtype = AdsoType(adso)
infoarea = row['infoarea']
name = row['description']
system = infoarea_system_name(infoarea)
ID = environment + ' ' + system + ' ' + adso
date_str = odata_date2string(row['timestmp'])
user = row['tstpnm']
caracteristics = []
for key in ['activate_data', 'write_changelog', 'cubedeltaonly', 'no_aq_deletion', 'unique_records', 'planning_mode', 'check_delta_cons', 'extended_aq_table', 'all_sids_checked', 'all_sids_materialized', 'direct_update', 'snapshot_scenario', 'dyn_tiering_per_part', 'is_reporting_obj', 'force_no_concat', 'compatibility_views', 'autorefresh']:
#print(key)
if row[key]:
caracteristics.append(key)
print('ADSO', adso, name, subtype, infoarea, environment, system, ID, date_str, user, caracteristics)
#exit()
with driver.session() as session:
# delete existing node collection
print('DELETING')
result = session.run("MATCH n = (p:ADSO) DETACH DELETE n")
# create nodes from odata feeed
print('LOADING')
# for data in odata_feed.entity_sets.Z001_RSDAREA.get_entities().execute():
for data in odata_feed.entity_sets.xSAAQxBW_RSOADSO.get_entities().execute():
row = data.__dict__['_cache']
#print(a)
name = row['adsonm']
subtype = AdsoType(name)
infoarea = row['infoarea']
label = row['description']
system = infoarea_system_name(infoarea)
ID = environment + ' ' + system + ' ' + name + ' ' + subtype
date = odata_date2string(row['timestmp'])
user = row['tstpnm']
#create caracteristics list to replace boolean
caracteristics = []
for key in ['activate_data', 'write_changelog', 'cubedeltaonly', 'no_aq_deletion', 'unique_records', 'planning_mode', 'check_delta_cons', 'extended_aq_table', 'all_sids_checked', 'all_sids_materialized', 'direct_update', 'snapshot_scenario', 'dyn_tiering_per_part', 'is_reporting_obj', 'force_no_concat', 'compatibility_views', 'autorefresh']:
if row[key]:
caracteristics.append(key)
#print('ADSO', adso, name, subtype, infoarea, environment, label, system, ID, date, user, caracteristics)
session.write_transaction(add_adso, name, subtype, infoarea, label, system, environment, ID, date, user, caracteristics)
#exit()
# create parent relationship
print('CREATING RELATION')
result = session.run("MATCH (i:Infoarea),(a:ADSO) WHERE i.name = a.infoarea CREATE (i)-[:contient]->(a)")
| 35.114286 | 345 | 0.718267 | 643 | 4,916 | 5.326594 | 0.332815 | 0.010511 | 0.02219 | 0.02219 | 0.486131 | 0.48 | 0.461314 | 0.427153 | 0.414015 | 0.414015 | 0 | 0.009836 | 0.131408 | 4,916 | 139 | 346 | 35.366906 | 0.792272 | 0.172498 | 0 | 0.235294 | 0 | 0.023529 | 0.312886 | 0.034628 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047059 | false | 0.011765 | 0.058824 | 0 | 0.105882 | 0.082353 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3c875a221b839dc6e51b8ef33e2086e6f987b91 | 3,372 | py | Python | kbsbot/compose_engine/compose_utils.py | astandre/cb-compose-engine-ms | ed4141f57dcb544743fd17fe62001d573ae1efc9 | [
"MIT"
] | null | null | null | kbsbot/compose_engine/compose_utils.py | astandre/cb-compose-engine-ms | ed4141f57dcb544743fd17fe62001d573ae1efc9 | [
"MIT"
] | null | null | null | kbsbot/compose_engine/compose_utils.py | astandre/cb-compose-engine-ms | ed4141f57dcb544743fd17fe62001d573ae1efc9 | [
"MIT"
] | null | null | null | import re
def clean_uri(uri):
"""
This method removes the url part of the URI in order to obtain just the property or class
:param uri: An uri to be cleaned
:return: The name of the property or the class
"""
if uri.find('#') != -1:
special_char = '#'
else:
special_char = '/'
index = uri.rfind(special_char)
return uri[index + 1:len(uri)]
def check_requirements(requirements, entities):
"""
This method compares the existing entities and the entities required to complete an intent.
:param requirements: The list of the entities needed
:param entities: The list of current entities
:return: If entities are missing, a list of this missing entities
"""
missing = requirements
missing_status = True
if len(requirements) > len(entities):
return missing_status, missing
else:
for entity in entities:
for i, needed_entity in enumerate(requirements):
if entity["type"] == needed_entity:
del missing[i]
break
if len(missing) == 0:
missing_status = False
return missing_status, missing
def build_answer(raw_answer, answer_type):
"""
This method builds the answer, depending of the type of answer.
:param raw_answer: A dict containing the template of the answer, and the different part of tha answer
:param answer_type: The type of answer to be constructed
:return: the raw text of the final answer
"""
final_answer = None
answer = {}
if answer_type == "text":
final_answer = raw_answer["template"]
re_template = re.compile(r"{%[a-zA-Z]*%}")
found = re_template.findall(final_answer)
for aux in found:
simple_aux = aux.replace("{%", "")
simple_aux = simple_aux.replace("%}", "")
for answ in raw_answer["answer"]:
if answ["property"] == simple_aux:
answer_aux = ""
for i, part in enumerate(answ["value"]):
if i + 1 < len(answ["value"]):
answer_aux += " " + part + ","
else:
answer_aux += " " + part
final_answer = final_answer.replace(aux, answer_aux)
break
answer["answer_type"] = answer_type
answer["text"] = final_answer
elif answer_type == "options":
final_answer = raw_answer["template"]
answer["answer_type"] = answer_type
answer["text"] = final_answer
answer["options"] = raw_answer["options"]
return answer
def update_entities(current_entities, new_entities):
"""
This method updates the current list of entities, by looking for the same type of entity.
Parameters:
:param current_entities: Current list of entities
:param new_entities: New list of entities
:return: Current list of entities updated
"""
if len(current_entities) == 0:
current_entities = new_entities
else:
for i, c_entity in enumerate(current_entities):
for n_entity in new_entities:
if c_entity["type"] == n_entity["type"]:
current_entities[i] = n_entity
break
return current_entities
| 31.514019 | 105 | 0.590747 | 405 | 3,372 | 4.767901 | 0.241975 | 0.051269 | 0.033143 | 0.032626 | 0.07768 | 0.048679 | 0.048679 | 0.048679 | 0.048679 | 0 | 0 | 0.00218 | 0.319692 | 3,372 | 106 | 106 | 31.811321 | 0.839582 | 0.279063 | 0 | 0.25 | 0 | 0 | 0.056204 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.016667 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3cadd5bfa7e24c0eed1116844676bed9022fba5 | 1,008 | py | Python | textscreen.py | MennoNij/DrivePy | 2c5c5ed60b4704e656895fb0d2afc9fe3524696c | [
"MIT"
] | 1 | 2021-02-16T10:47:39.000Z | 2021-02-16T10:47:39.000Z | textscreen.py | MennoNij/DrivePy | 2c5c5ed60b4704e656895fb0d2afc9fe3524696c | [
"MIT"
] | null | null | null | textscreen.py | MennoNij/DrivePy | 2c5c5ed60b4704e656895fb0d2afc9fe3524696c | [
"MIT"
] | null | null | null | import time
from pyglet.gl import *
from pyglet import image
from pyglet.window import key
import globals
import helpers
class TextScreen(object):
def __init__(self, txt):
self.text = txt
self.state = 0
self.startTime = 0.0
def draw(self, ww):
label = pyglet.text.HTMLLabel(self.text, x=0, y=0,
width=ww-0,
multiline=True,
anchor_x='center', anchor_y='center')
label.draw()
def start(self):
self.startTime = time.time()
def end(self):
self.state = 1
def done(self):
global hasWheel
if time.time() - self.startTime > 0.5:
if self.state > 0:
self.state = 0
#return True
#if globals.hasWheel:
#if globals.joystick.buttons[1]:
#return True
if helpers.findKey('space') >= 0:
return True
return False
| 22.4 | 63 | 0.507937 | 116 | 1,008 | 4.362069 | 0.396552 | 0.071146 | 0.059289 | 0.055336 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021417 | 0.397817 | 1,008 | 44 | 64 | 22.909091 | 0.812191 | 0.072421 | 0 | 0.068966 | 0 | 0 | 0.01826 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.172414 | false | 0 | 0.206897 | 0 | 0.482759 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3cc8a8a3a90142b361a941d04d0660448e6bf61 | 2,490 | py | Python | examples/classify_digits.py | mbaelde/sklearn-hierarchical-classification | f2cc1535b043e323a25fe0de5e26c04011dbfcb2 | [
"Apache-2.0"
] | 174 | 2018-02-09T05:37:42.000Z | 2022-03-21T07:09:43.000Z | examples/classify_digits.py | mbaelde/sklearn-hierarchical-classification | f2cc1535b043e323a25fe0de5e26c04011dbfcb2 | [
"Apache-2.0"
] | 42 | 2018-03-15T06:51:16.000Z | 2022-01-17T15:44:55.000Z | examples/classify_digits.py | mbaelde/sklearn-hierarchical-classification | f2cc1535b043e323a25fe0de5e26c04011dbfcb2 | [
"Apache-2.0"
] | 51 | 2018-03-21T17:13:11.000Z | 2022-03-21T13:30:29.000Z | #!/usr/bin/env python
"""
Example of using the hierarchical classifier to classify (a subset of) the digits data set.
Demonstrated some of the capabilities, e.g using a Pipeline as the base estimator,
defining a non-trivial class hierarchy, etc.
"""
from sklearn import svm
from sklearn.decomposition import TruncatedSVD
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn_hierarchical_classification.classifier import HierarchicalClassifier
from sklearn_hierarchical_classification.constants import ROOT
from sklearn_hierarchical_classification.metrics import h_fbeta_score, multi_labeled
from sklearn_hierarchical_classification.tests.fixtures import make_digits_dataset
# Used for seeding random state
RANDOM_STATE = 42
def classify_digits():
r"""Test that a nontrivial hierarchy leaf classification behaves as expected.
We build the following class hierarchy along with data from the handwritten digits dataset:
<ROOT>
/ \
A B
/ \ | \
1 7 C 9
/ \
3 8
"""
class_hierarchy = {
ROOT: ["A", "B"],
"A": ["1", "7"],
"B": ["C", "9"],
"C": ["3", "8"],
}
base_estimator = make_pipeline(
TruncatedSVD(n_components=24),
svm.SVC(
gamma=0.001,
kernel="rbf",
probability=True
),
)
clf = HierarchicalClassifier(
base_estimator=base_estimator,
class_hierarchy=class_hierarchy,
)
X, y = make_digits_dataset(
targets=[1, 7, 3, 8, 9],
as_str=False,
)
# cast the targets to strings so we have consistent typing of labels across hierarchy
y = y.astype(str)
X_train, X_test, y_train, y_test = train_test_split(
X,
y,
test_size=0.2,
random_state=RANDOM_STATE,
)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print("Classification Report:\n", classification_report(y_test, y_pred))
# Demonstrate using our hierarchical metrics module with MLB wrapper
with multi_labeled(y_test, y_pred, clf.graph_) as (y_test_, y_pred_, graph_):
h_fbeta = h_fbeta_score(
y_test_,
y_pred_,
graph_,
)
print("h_fbeta_score: ", h_fbeta)
if __name__ == "__main__":
classify_digits()
| 28.295455 | 95 | 0.646988 | 310 | 2,490 | 4.951613 | 0.403226 | 0.064495 | 0.059935 | 0.096417 | 0.019544 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013774 | 0.271084 | 2,490 | 87 | 96 | 28.62069 | 0.831956 | 0.288353 | 0 | 0 | 0 | 0 | 0.035527 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.018868 | false | 0 | 0.169811 | 0 | 0.188679 | 0.037736 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |