hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b88292bee373cc7990068571abe863b6f24f9ffb | 3,499 | py | Python | src/data_management/preprocess_baseline.py | joaquimgomez/BachelorThesis-TextSimilarityMeasures | a5efd4d651c53f7e35cab05e8408ebf769f5aadf | [
"MIT"
] | null | null | null | src/data_management/preprocess_baseline.py | joaquimgomez/BachelorThesis-TextSimilarityMeasures | a5efd4d651c53f7e35cab05e8408ebf769f5aadf | [
"MIT"
] | null | null | null | src/data_management/preprocess_baseline.py | joaquimgomez/BachelorThesis-TextSimilarityMeasures | a5efd4d651c53f7e35cab05e8408ebf769f5aadf | [
"MIT"
] | null | null | null | import argparse
import nltk.data
import string
import pandas as pd
import re
from os import listdir, mkdir
from os.path import isfile, join
from gensim.parsing.porter import PorterStemmer
from gensim.parsing.preprocessing import remove_stopwords
def saveFiles(documents, dest):
mkdir(dest)
for doc in documents:
with open(dest + str(doc) + '.txt', 'w') as f:
f.write(documents[doc])
#for sentence in documents[doc]:
# f.write(sentence + '\n')
def documentPreprocessing(document):
# Filter for non-printable characters
filter_printable = lambda x: x in string.printable
# Stemmer
porter = PorterStemmer()
#for i in range(0, len(document)):
doc = document
# Lowercasing
doc = doc.lower()
# Remove emails and web addresses
doc = re.sub(r'\S*@\S*\s?', '', doc, flags = re.MULTILINE)
doc = re.sub(r'http\S+', '', doc, flags = re.MULTILINE)
# Erase non-printable characters
doc = ''.join(filter(filter_printable, doc))
# Remove Stopwords (using gensim stopwords set)
doc = remove_stopwords(doc)
# Stemming
doc = porter.stem_sentence(doc)
return doc
def obtainFileContents(index):
#tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
documents = {}
print("Obtaining content from indexed files.")
for ind, row in index.iterrows():
print("Obtaining content from file " + row['file_name'] + ".")
f = open(row['file_path'])
fContent = f.read()
documents[row['id']] = fContent #tokenizer.tokenize(fContent)
f.close()
print("\n\n")
return documents
def generateIndex(folders):
print("Constructing index from files:")
index = pd.DataFrame(columns=['id', 'category', 'file_name', 'file_path'])
currentId = 1
for (folderName, path) in folders:
print("Indexing files from folder " + folderName + ".")
files = [(file, join(path, file)) for file in listdir(path) if isfile(join(path, file)) and not file.startswith('.')]
for (file, filePath) in files:
#group = file.split("-")[0]
index = index.append({'id': str(currentId) + "-" + folderName, 'category': folderName, 'file_name': file, 'file_path': filePath}, ignore_index=True)
currentId = currentId + 1
print("\nTotal number of indexed files: " + str(len(index.index)))
print("Indexed files:")
print(index)
print("\n\n")
return index
def main(org, dest):
# Obtain all the folders
folders = [(folder, join(org, folder)) for folder in listdir(org) if not isfile(join(org, folder)) and not folder.startswith('.')]
# Generate an index for all files
index = generateIndex(folders)
# Save index to csv
mkdir('./meta/')
index.to_csv('./meta/pdfs_index.csv', index=False)
# Obtain content of all documents in index
documents = obtainFileContents(index)
# Preprocess documents
print("Preprocessing loaded documents:")
for doc in documents:
print("Preprocessing document with id " + str(doc) + ".")
documents[doc] = documentPreprocessing(documents[doc])
print("\n\n")
# Save preprocessed files
print("Saving preprocessed files.")
saveFiles(documents, dest)
print("\n\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = "Creates the dataset from files in the input directory.")
parser.add_argument("--origen", "-o",
help = "Directory of folders containing files.",
default = "./")
parser.add_argument("--destination", "-d",
help = "Directory where dataset goes. The destination folder must not exist.",
default = "./")
args = parser.parse_args()
main(args.origen, args.destination)
| 26.507576 | 151 | 0.697914 | 463 | 3,499 | 5.220302 | 0.334773 | 0.019859 | 0.011585 | 0.014067 | 0.016549 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00136 | 0.159474 | 3,499 | 131 | 152 | 26.709924 | 0.820469 | 0.153472 | 0 | 0.106667 | 0 | 0 | 0.204553 | 0.007136 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.12 | 0 | 0.226667 | 0.213333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b88395899ebc32a4278d4246e56444985227280a | 4,910 | py | Python | pyreach/gyms/envs/integration_fanuc.py | google-research/pyreach | f91753ce7a26e77e122eb02a9fdd5a1ce3ce0159 | [
"Apache-2.0"
] | 13 | 2021-09-01T01:10:22.000Z | 2022-03-05T10:01:52.000Z | pyreach/gyms/envs/integration_fanuc.py | google-research/pyreach | f91753ce7a26e77e122eb02a9fdd5a1ce3ce0159 | [
"Apache-2.0"
] | null | null | null | pyreach/gyms/envs/integration_fanuc.py | google-research/pyreach | f91753ce7a26e77e122eb02a9fdd5a1ce3ce0159 | [
"Apache-2.0"
] | 6 | 2021-09-20T21:17:53.000Z | 2022-03-14T18:42:48.000Z | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Gym environment for the Fanuc Integration Test Workcell."""
import time
from typing import Any, Dict, Tuple
import numpy as np # type: ignore
from pyreach.gyms import core
from pyreach.gyms import reach_env
class IntegrationTestFanucEnv(reach_env.ReachEnv):
"""Integration Test for Fanuc Environment."""
SAFE_JOINT_ANGLES: np.ndarray = np.deg2rad([0, 0, -35, 0, -60, -75])
# pylint: disable=g-bad-todo
# TODO: Grab MAX and MIN joint angles from real workcell
MIN_JOINT_ANGLES = SAFE_JOINT_ANGLES - 5.0
MAX_JOINT_ANGLES = SAFE_JOINT_ANGLES + 5.0
TIMEOUT_PER_INSTRUCTION_SECONDS = 600.0
def __init__(self, is_synchronous: bool = True, **kwargs: Any) -> None:
self.timer_running: bool = False
self.deadline: float = 0.0
self.agent_done_signal = False
response_queue_length: int = 0 if is_synchronous else 2
pyreach_config: Dict[str, reach_env.ReachElement] = {
"arm":
reach_env.ReachArm(
"",
self.MIN_JOINT_ANGLES,
self.MAX_JOINT_ANGLES,
is_synchronous=is_synchronous,
response_queue_length=response_queue_length,
ik_lib="ikfast"),
"camera":
reach_env.ReachColorCamera("realsense", (360, 640)),
"depth_camera":
reach_env.ReachDepthCamera("", (720, 1280), color_enabled=True),
"server":
reach_env.ReachServer("Server"),
"vacuum":
reach_env.ReachVacuum("",
is_synchronous=is_synchronous,
state_enable=True,
vacuum_gauge_enable=True,
vacuum_detect_enable=True),
"annotation":
reach_env.ReachAnnotation("",
is_synchronous=False,
maximum_size=1024),
}
super().__init__(pyreach_config=pyreach_config, **kwargs)
def _movej(self,
joints: np.ndarray) -> Tuple[core.Observation, float, bool, Any]:
action = {
"arm": {
"command": 1,
"joint_angles": joints,
"synchronous": 1,
"velocity": 1.05,
"acceleration": 1.4
}
}
return super().step(action)
def _stow_workcell(self) -> core.Observation:
"""Stow the workcell arm clear of the FOV of the camera.
This will move the workcell arm to a safe joint position.
Returns:
Observation after moving arm to safe joint position
"""
print(f"{time.time():.4f}:ENV: Stowing workcell arm")
obs, _, _, _ = self._movej(self.SAFE_JOINT_ANGLES)
return obs
def reset(self) -> core.Observation:
"""Resets the benchmark.
Returns:
Initial observation.
"""
print(f"{time.time():.4f}:ENV: Resetting the integration test environment")
# End any current task with reset
obs = super().reset()
self.timer_running = False
return obs
def close(self) -> None:
self._stow_workcell()
print(f"{time.time():.4f}:ENV: Closing gym")
super().close()
def step(self,
action: core.Action) -> Tuple[core.Observation, float, bool, Any]:
"""Perform one step."""
observation: core.Observation
reward: float
done: bool
info: Any
observation, reward, done, info = super().step(action)
if not self.timer_running:
self.timer_running = True
self.deadline = time.time() + self.TIMEOUT_PER_INSTRUCTION_SECONDS
if done:
self.timer_running = False
observation = self._stow_workcell()
elif time.time() >= self.deadline:
print(f"{time.time():.4f}:ENV: You ran out of time!")
self.timer_running = False
reward = -1.0
done = True
observation = self._stow_workcell()
return (observation, reward, done, info)
class IntegrationTestFanucSyncEnv(IntegrationTestFanucEnv):
"""Configure a Gym environment with a synchronous arm."""
def __init__(self, **kwargs: Any) -> None:
super().__init__(is_synchronous=True, **kwargs)
class IntegrationTestFanucAsyncEnv(IntegrationTestFanucEnv):
"""Configure a Gym environment with an asynchronous arm."""
def __init__(self, **kwargs: Any) -> None:
super().__init__(is_synchronous=False, **kwargs)
| 31.273885 | 79 | 0.63279 | 587 | 4,910 | 5.115843 | 0.361158 | 0.03663 | 0.031968 | 0.018648 | 0.131868 | 0.131868 | 0.051282 | 0.032634 | 0.032634 | 0.032634 | 0 | 0.016579 | 0.262933 | 4,910 | 156 | 80 | 31.474359 | 0.813208 | 0.230754 | 0 | 0.11828 | 0 | 0 | 0.081732 | 0.023816 | 0 | 0 | 0 | 0.00641 | 0 | 1 | 0.086022 | false | 0 | 0.053763 | 0 | 0.258065 | 0.043011 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b88507041018ffdbb286986a54a1eb9795b333f5 | 7,409 | py | Python | models/node_models.py | joshchang1112/gcnn-survey-paper | 591af8d6c4374378831cab2cdec79575e2540d79 | [
"Apache-2.0"
] | 155 | 2019-12-18T19:01:02.000Z | 2022-03-12T16:34:06.000Z | models/node_models.py | google/gcnn-survey-paper | 591af8d6c4374378831cab2cdec79575e2540d79 | [
"Apache-2.0"
] | null | null | null | models/node_models.py | google/gcnn-survey-paper | 591af8d6c4374378831cab2cdec79575e2540d79 | [
"Apache-2.0"
] | 23 | 2020-05-11T12:39:58.000Z | 2022-03-04T09:13:58.000Z | #Copyright 2018 Google LLC
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
"""Inference step for node classification models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from models.base_models import NodeModel
import tensorflow as tf
from utils.model_utils import cheby_module
from utils.model_utils import compute_adj
from utils.model_utils import gat_module
from utils.model_utils import gcn_module
from utils.model_utils import gcn_pool_layer
from utils.model_utils import mlp_module
from utils.model_utils import sp_gat_layer
from utils.model_utils import sp_gcn_layer
class Gat(NodeModel):
"""Graph Attention (GAT) Model (Velickovic & al).
arXiv link: https://arxiv.org/abs/1710.10903
"""
def compute_inference(self, node_features, adj_matrix, is_training):
"""Forward step for GAT model."""
sparse = self.sparse_features
in_dim = self.input_dim
average_last = True
with tf.variable_scope('node-model'):
logits = gat_module(node_features, adj_matrix, self.n_hidden, self.n_att,
self.p_drop, is_training, in_dim, sparse,
average_last)
return logits
class Gcn(NodeModel):
"""Graph convolution network (Kipf & al).
arXiv link: https://arxiv.org/abs/1609.02907
"""
def compute_inference(self, node_features, adj_matrix, is_training):
"""Forward step for graph convolution model."""
with tf.variable_scope('node-model'):
logits = gcn_module(node_features, adj_matrix, self.n_hidden, self.p_drop,
is_training, self.input_dim, self.sparse_features)
return logits
class Mlp(NodeModel):
"""Multi-layer perceptron model."""
def compute_inference(self, node_features, adj_matrix, is_training):
"""Forward step for graph convolution model."""
with tf.variable_scope('node-model'):
logits = mlp_module(node_features, self.n_hidden, self.p_drop,
is_training, self.input_dim, self.sparse_features,
use_bias=True)
return logits
class SemiEmb(NodeModel):
"""Deep Learning via Semi-Supervised Embedding (Weston & al).
paper: http://icml2008.cs.helsinki.fi/papers/340.pdf
"""
def __init__(self, config):
super(SemiEmb, self).__init__(config)
self.semi_emb_k = config.semi_emb_k
def compute_inference(self, node_features, adj_matrix, is_training):
with tf.variable_scope('node-model'):
hidden_repr = mlp_module(node_features, self.n_hidden, self.p_drop,
is_training, self.input_dim,
self.sparse_features, use_bias=True,
return_hidden=True)
logits = hidden_repr[-1]
hidden_repr_reg = hidden_repr[self.semi_emb_k]
l2_scores = compute_adj(hidden_repr_reg, self.att_mechanism, self.p_drop,
is_training=False)
self.l2_scores = tf.gather_nd(l2_scores, adj_matrix.indices)
return logits
def _compute_node_loss(self, logits, labels):
supervised_loss = super(SemiEmb, self)._compute_node_loss(logits, labels)
# supervised_loss = tf.nn.softmax_cross_entropy_with_logits(
# labels=labels, logits=logits)
# supervised_loss = tf.reduce_sum(supervised_loss) / self.nb_nodes
reg_loss = tf.reduce_mean(self.l2_scores)
return supervised_loss + self.edge_reg * reg_loss
class Cheby(NodeModel):
"""Chebyshev polynomials for Spectral Graph Convolutions (Defferrard & al).
arXiv link: https://arxiv.org/abs/1606.09375
"""
def __init__(self, config):
super(Cheby, self).__init__(config)
self.cheby_k_loc = config.cheby_k_loc
def compute_inference(self, node_features, normalized_laplacian, is_training):
with tf.variable_scope('node-model'):
dense_normalized_laplacian = tf.sparse_to_dense(
sparse_indices=normalized_laplacian.indices,
output_shape=normalized_laplacian.dense_shape,
sparse_values=normalized_laplacian.values)
cheby_polynomials = [tf.eye(self.nb_nodes), dense_normalized_laplacian]
self.cheby = cheby_polynomials
for _ in range(2, self.cheby_k_loc+1):
cheby_polynomials.append(2 * tf.sparse_tensor_dense_matmul(
normalized_laplacian, cheby_polynomials[-1]) - cheby_polynomials[-2]
)
logits = cheby_module(node_features, cheby_polynomials, self.n_hidden,
self.p_drop, is_training, self.input_dim,
self.sparse_features)
return logits
############################ EXPERIMENTAL MODELS #############################
class Hgat(NodeModel):
"""Hierarchical Graph Attention (GAT) Model."""
def compute_inference(self, node_features, adj_matrix, is_training):
"""Forward step for GAT model."""
in_dim = self.input_dim
att = []
for j in range(4):
with tf.variable_scope('gat-layer1-att{}'.format(j)):
att.append(
sp_gat_layer(node_features, adj_matrix, in_dim, 8, self.p_drop,
is_training, True))
hidden_2 = []
hidden_2.append(tf.nn.elu(tf.concat(att[:2], axis=-1)))
hidden_2.append(tf.nn.elu(tf.concat(att[2:], axis=-1)))
att = []
for j in range(2):
with tf.variable_scope('gat-layer2-att{}'.format(j)):
att.append(
sp_gat_layer(hidden_2[j], adj_matrix, 16, 7, self.p_drop,
is_training, False))
return tf.add_n(att) / 2.
class Pgcn(NodeModel):
"""Pooling Graph Convolution Network."""
def compute_inference(self, node_features, adj_matrix, is_training):
adj_matrix_dense = tf.sparse_to_dense(
sparse_indices=adj_matrix.indices,
output_shape=adj_matrix.dense_shape,
sparse_values=adj_matrix.values,
validate_indices=False)
adj_matrix_dense = tf.cast(tf.greater(adj_matrix_dense, 0), tf.float32)
adj_matrix_dense = tf.expand_dims(adj_matrix_dense, -1) # N x N x 1
in_dim = self.input_dim
sparse = self.sparse_features
for i, out_dim in enumerate(self.n_hidden[:-1]):
if i > 0:
sparse = False
with tf.variable_scope('gcn-pool-{}'.format(i)):
node_features = gcn_pool_layer(
node_features,
adj_matrix_dense,
in_dim=in_dim,
out_dim=out_dim,
sparse=sparse,
is_training=is_training,
p_drop=self.p_drop)
node_features = tf.reshape(node_features, (-1, out_dim))
node_features = tf.contrib.layers.bias_add(node_features)
node_features = tf.nn.elu(node_features)
in_dim = out_dim
with tf.variable_scope('gcn-layer-last'):
logits = sp_gcn_layer(node_features, adj_matrix, in_dim,
self.n_hidden[-1], self.p_drop, is_training, False)
return logits
| 37.231156 | 80 | 0.677959 | 1,001 | 7,409 | 4.734266 | 0.223776 | 0.055708 | 0.034817 | 0.048744 | 0.419709 | 0.351129 | 0.300274 | 0.23697 | 0.210804 | 0.196244 | 0 | 0.013472 | 0.218518 | 7,409 | 198 | 81 | 37.419192 | 0.805009 | 0.193278 | 0 | 0.222222 | 0 | 0 | 0.018379 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.079365 | false | 0 | 0.103175 | 0 | 0.301587 | 0.007937 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8851f94f0ae9fafbdff3230843baeb1f89b0e24 | 1,860 | py | Python | tests/test_views.py | mb-wali/invenio-previewer | 400be6f0b15c1fb1137ef5ad8b8b7534190e12e9 | [
"MIT"
] | 3 | 2015-08-19T12:50:16.000Z | 2020-12-14T04:06:04.000Z | tests/test_views.py | mb-wali/invenio-previewer | 400be6f0b15c1fb1137ef5ad8b8b7534190e12e9 | [
"MIT"
] | 99 | 2015-09-13T17:59:28.000Z | 2022-03-08T17:21:34.000Z | tests/test_views.py | mb-wali/invenio-previewer | 400be6f0b15c1fb1137ef5ad8b8b7534190e12e9 | [
"MIT"
] | 52 | 2015-08-13T13:42:26.000Z | 2022-03-28T07:54:17.000Z | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Views module tests."""
from __future__ import absolute_import, print_function
from flask import render_template_string
def test_view_macro_file_list(app):
"""Test file list macro."""
with app.test_request_context():
files = [
{
'key': 'test1.txt',
'size': 10,
'date': '2016-07-12',
},
{
'key': 'test2.txt',
'size': 12000000,
'date': '2016-07-12',
},
]
pid = {
'pid_value': 1
}
result = render_template_string("""
{%- from "invenio_previewer/macros.html" import file_list %}
{{ file_list(files, pid) }}
""", files=files, pid=pid)
assert 'href="/record/1/files/test1.txt?download=1"' in result
assert '<td class="nowrap">10 Bytes</td>' in result
assert 'href="/record/1/files/test2.txt?download=1"' in result
assert '<td class="nowrap">12.0 MB</td>' in result
def test_previwable_test(app):
"""Test template test."""
file = {
'type': 'md'
}
template = "{% if file.type is previewable %}Previwable" \
"{% else %}Not previwable{% endif %}"
assert render_template_string(template, file=file) == "Previwable"
file['type'] = 'no'
assert render_template_string(template, file=file) == "Not previwable"
file['type'] = 'pdf'
assert render_template_string(template, file=file) == "Previwable"
file['type'] = ''
assert render_template_string(template, file=file) == "Not previwable"
| 29.0625 | 74 | 0.57043 | 219 | 1,860 | 4.707763 | 0.424658 | 0.081474 | 0.116392 | 0.100873 | 0.341416 | 0.298739 | 0.298739 | 0.298739 | 0.298739 | 0.116392 | 0 | 0.037009 | 0.288172 | 1,860 | 63 | 75 | 29.52381 | 0.741692 | 0.151613 | 0 | 0.15 | 0 | 0 | 0.316838 | 0.075193 | 0 | 0 | 0 | 0 | 0.2 | 1 | 0.05 | false | 0 | 0.075 | 0 | 0.125 | 0.025 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b88a4a0cbcce0942b11a13b0528f11069c87843e | 529 | py | Python | tkinter_module_event_handling.py | chauhanmahavir/Python-Basics | c250a9eee203e1188a968ba2c60262442719fa49 | [
"MIT"
] | 1 | 2020-08-05T05:38:44.000Z | 2020-08-05T05:38:44.000Z | tkinter_module_text_and_adding_image.py | chauhanmahavir/Python-Basics | c250a9eee203e1188a968ba2c60262442719fa49 | [
"MIT"
] | null | null | null | tkinter_module_text_and_adding_image.py | chauhanmahavir/Python-Basics | c250a9eee203e1188a968ba2c60262442719fa49 | [
"MIT"
] | null | null | null | from tkinter import *
class Window(Frame):
def __init__(self, master=None):
Frame.__init__(self, master=None)
self.master=master
self.init_window()
def init_window(self):
self.master.title("First GUI")
self.pack(fill=BOTH, expand=1)
quitButton = Button(self,text="Quit",command=self.client_exit)
quitButton.place(x=0,y=0)
def client_exit(self):
exit()
root = Tk()
root.geometry("400x300")
app=Window(root)
root.mainloop()
| 21.16 | 71 | 0.606805 | 67 | 529 | 4.61194 | 0.537313 | 0.12945 | 0.090615 | 0.116505 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.023077 | 0.26276 | 529 | 24 | 72 | 22.041667 | 0.769231 | 0 | 0 | 0 | 0 | 0 | 0.039604 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.176471 | false | 0 | 0.058824 | 0 | 0.294118 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b88aa396b92ba2bca8b371c9bfa0c943b0870abf | 3,805 | py | Python | verce-hpc-pe/src/test/hpc/training_xcorr_orig.py | KNMI/VERCE | c2f9eaa70ecf1621a218afc5d73ca1304ca8ed36 | [
"MIT"
] | 2 | 2017-09-07T04:33:18.000Z | 2019-01-07T13:32:15.000Z | verce-hpc-pe/src/test/hpc/training_xcorr_orig.py | KNMI/VERCE | c2f9eaa70ecf1621a218afc5d73ca1304ca8ed36 | [
"MIT"
] | 2 | 2016-10-06T13:07:05.000Z | 2017-12-20T09:47:08.000Z | verce-hpc-pe/src/test/hpc/training_xcorr_orig.py | KNMI/VERCE | c2f9eaa70ecf1621a218afc5d73ca1304ca8ed36 | [
"MIT"
] | 4 | 2016-04-25T22:15:40.000Z | 2017-12-18T14:40:58.000Z | from obspy.core import read
sta1 = 'http://escience8.inf.ed.ac.uk:8080/laquila/SAC/A25A.TA..BHZ.2011.025.00.00.00.000-2011.026.00.00.39.000.rm.scale-AUTO.SAC'
sta2 = 'http://escience8.inf.ed.ac.uk:8080/laquila/SAC/BMN.LB..BHZ.2011.025.00.00.00.023-2011.026.00.00.38.998.rm.scale-AUTO.SAC'
from dispel4py.base import SimpleFunctionPE, IterativePE, create_iterative_chain
def stream_producer(data):
filename = data
st = read(filename)
return st
def readstats(st):
station_date = st[0].stats['starttime'].date
station_day = station_date.strftime('%d-%m-%Y')
station = st[0].stats['station']
return [station_day, station, st]
def decimate(st, sps):
st.decimate(int(st[0].stats.sampling_rate/sps))
return st
def detrend(st):
st.detrend('simple')
return st
def demean(st):
st.detrend('demean')
return st
def filter(st, freqmin=0.01, freqmax=1., corners=4, zerophase=False):
st.filter('bandpass', freqmin=freqmin, freqmax=freqmax, corners=corners, zerophase=zerophase)
return st
from numpy import arange, sqrt, abs, multiply, conjugate, real
from obspy.signal.util import nextpow2
from scipy.fftpack import fft, ifft
def spectralwhitening(st):
"""
Apply spectral whitening to data.
Data is divided by its smoothed (Default: None) amplitude spectrum.
"""
for trace in arange(len(st)):
data = st[trace].data
n = len(data)
nfft = nextpow2(n)
spec = fft(data, nfft)
spec_ampl = sqrt(abs(multiply(spec, conjugate(spec))))
spec /= spec_ampl #Do we need to do some smoothing here?
ret = real(ifft(spec, nfft)[:n])
st[trace].data = ret
return st
from dispel4py.core import GenericPE
class MatchPE(GenericPE):
def __init__(self):
GenericPE.__init__(self)
self._add_input('input1', grouping=[0])
self._add_input('input2', grouping=[0])
self._add_output('output')
self.data = {}
def process(self, inputs):
try:
tup = inputs['input1']
except KeyError:
tup = inputs['input2']
date = tup[0]
station = tup[1]
try:
matching = self.data[date]
result = [date, tup[2], matching[2]]
self.write('output', result)
except:
self.data[date] = tup
from obspy.signal.cross_correlation import xcorr
import numpy
def xcorrelation(data, maxlag):
st1 = data[1]
st2 = data[2]
tr1 = st1[0].data
tr2 = st2[0].data
tr1 = tr1/numpy.linalg.norm(tr1)
tr2 = tr2/numpy.linalg.norm(tr2)
return xcorr(tr1, tr2, maxlag, full_xcorr=True)[2]
from dispel4py.workflow_graph import WorkflowGraph
streamProducer = SimpleFunctionPE(stream_producer)
stats1 = SimpleFunctionPE(readstats)
stats2 = SimpleFunctionPE(readstats)
match_traces=MatchPE()
xcorrelation_traces= SimpleFunctionPE(xcorrelation, {'maxlag':1000})
pipeline = [
(decimate, {'sps':4}),
detrend,
demean,
(filter, {'freqmin':0.01, 'freqmax':1., 'corners':4, 'zerophase':False}),
spectralwhitening,
readstats]
preprocess_trace_1 = create_iterative_chain(pipeline)
preprocess_trace_2 = create_iterative_chain(pipeline)
graph = WorkflowGraph()
graph.connect(streamProducer, 'output', preprocess_trace_1, 'input')
graph.connect(streamProducer, 'output', preprocess_trace_2, 'input')
graph.connect(preprocess_trace_1, 'output', match_traces, 'input1')
graph.connect(preprocess_trace_2, 'output', match_traces, 'input2')
graph.connect(match_traces, 'output', xcorrelation_traces, 'input')
#from dispel4py import simple_process
#input_data = [ {'input' : sta1 },{'input' : sta2 }]
#simple_process.process(graph, input_data) | 30.44 | 130 | 0.661235 | 494 | 3,805 | 4.983806 | 0.334008 | 0.009748 | 0.017872 | 0.014622 | 0.112916 | 0.112916 | 0.061738 | 0.061738 | 0.061738 | 0 | 0 | 0.047398 | 0.207096 | 3,805 | 125 | 131 | 30.44 | 0.768644 | 0.070171 | 0 | 0.087912 | 0 | 0.021978 | 0.118601 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.10989 | false | 0.010989 | 0.098901 | 0 | 0.307692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b88b48da3a9f70ac10bf14d6f080b5aff8d97bb3 | 5,343 | py | Python | perceiver_pytorch/utils.py | openclimatefix/perceiver-pytorch | 62c314b302aec95571796684732b2bcd0a81cc75 | [
"MIT"
] | 7 | 2021-07-30T22:06:26.000Z | 2022-02-24T09:39:02.000Z | perceiver_pytorch/utils.py | openclimatefix/perceiver-pytorch | 62c314b302aec95571796684732b2bcd0a81cc75 | [
"MIT"
] | 16 | 2021-07-27T09:58:03.000Z | 2021-12-16T12:26:53.000Z | perceiver_pytorch/utils.py | openclimatefix/perceiver-pytorch | 62c314b302aec95571796684732b2bcd0a81cc75 | [
"MIT"
] | null | null | null | from math import log, pi
import torch
import torch.nn.functional as F
import numpy as np
import math
import einops
def extract_image_patches(
x: torch.Tensor, kernel: int, stride: int = 1, dilation: int = 1
) -> torch.Tensor:
"""
Extract image patches in a way similar to TensorFlow extract_image_patches
Taken from https://discuss.pytorch.org/t/tf-extract-image-patches-in-pytorch/43837/8
In the Perceiver JAX implementation they extract image patches matching TensorFlow's SAME padding.
PyTorch doesn't have that same kind of option, so this is a way to do that.
Args:
x: Input Torch Tensor
kernel: Size of kernel
stride: Stride of patch
dilation: Dilation rate
Returns:
Tensor of size [Batch, Height, Width, Channels*kernel*stride]
"""
# Do TF 'SAME' Padding
b, c, h, w = x.shape
h2 = math.ceil(h / stride)
w2 = math.ceil(w / stride)
pad_row = (h2 - 1) * stride + (kernel - 1) * dilation + 1 - h
pad_col = (w2 - 1) * stride + (kernel - 1) * dilation + 1 - w
x = F.pad(x, (pad_row // 2, pad_row - pad_row // 2, pad_col // 2, pad_col - pad_col // 2))
# Extract patches
# get all image windows of size (kernel, stride) and stride (kernel, stride)
patches = x.unfold(2, kernel, stride).unfold(3, kernel, stride)
# Permute so that channels are next to patch dimension
patches = patches.permute(0, 4, 5, 1, 2, 3).contiguous()
# View as [batch_size, height, width, channels*kh*kw]
return patches.view(b, -1, patches.shape[-2], patches.shape[-1])
def reverse_space_to_depth(
frames: torch.Tensor, temporal_block_size: int = 1, spatial_block_size: int = 1
) -> torch.Tensor:
"""Reverse space to depth transform.
Works for images (dim = 4) and videos (dim = 5)"""
if len(frames.shape) == 4:
return einops.rearrange(
frames,
"b (dh dw c) h w -> b c (h dh) (w dw)",
dh=spatial_block_size,
dw=spatial_block_size,
)
elif len(frames.shape) == 5:
return einops.rearrange(
frames,
"b t (dt dh dw c) h w -> b (t dt) c (h dh) (w dw)",
dt=temporal_block_size,
dh=spatial_block_size,
dw=spatial_block_size,
)
else:
raise ValueError(
"Frames should be of rank 4 (batch, height, width, channels)"
" or rank 5 (batch, time, height, width, channels)"
)
def space_to_depth(
frames: torch.Tensor, temporal_block_size: int = 1, spatial_block_size: int = 1
) -> torch.Tensor:
"""Space to depth transform.
Works for images (dim = 4) and videos (dim = 5)"""
if len(frames.shape) == 4:
return einops.rearrange(
frames,
"b c (h dh) (w dw) -> b (dh dw c) h w",
dh=spatial_block_size,
dw=spatial_block_size,
)
elif len(frames.shape) == 5:
return einops.rearrange(
frames,
"b (t dt) c (h dh) (w dw) -> b t (dt dh dw c) h w ",
dt=temporal_block_size,
dh=spatial_block_size,
dw=spatial_block_size,
)
else:
raise ValueError(
"Frames should be of rank 4 (batch, height, width, channels)"
" or rank 5 (batch, time, height, width, channels)"
)
def encode_position(
batch_size: int,
axis: list,
max_frequency: float,
num_frequency_bands: int,
sine_only: bool = False,
) -> torch.Tensor:
"""
Encode the Fourier Features and return them
Args:
batch_size: Batch size
axis: List containing the size of each axis
max_frequency: Max frequency
num_frequency_bands: Number of frequency bands to use
sine_only: (bool) Whether to only use Sine features or both Sine and Cosine, defaults to both
Returns:
Torch tensor containing the Fourier Features of shape [Batch, *axis]
"""
axis_pos = list(
map(
lambda size: torch.linspace(-1.0, 1.0, steps=size),
axis,
)
)
pos = torch.stack(torch.meshgrid(*axis_pos), dim=-1)
enc_pos = fourier_encode(
pos,
max_frequency,
num_frequency_bands,
sine_only=sine_only,
)
enc_pos = einops.rearrange(enc_pos, "... n d -> ... (n d)")
enc_pos = einops.repeat(enc_pos, "... -> b ...", b=batch_size)
return enc_pos
def fourier_encode(
x: torch.Tensor,
max_freq: float,
num_bands: int = 4,
sine_only: bool = False,
) -> torch.Tensor:
"""
Create Fourier Encoding
Args:
x: Input Torch Tensor
max_freq: Maximum frequency for the Fourier features
num_bands: Number of frequency bands
sine_only: Whether to only use sine or both sine and cosine features
Returns:
Torch Tensor with the fourier position encoded concatenated
"""
x = x.unsqueeze(-1)
device, dtype, orig_x = x.device, x.dtype, x
scales = torch.linspace(
1.0,
max_freq / 2,
num_bands,
device=device,
dtype=dtype,
)
scales = scales[(*((None,) * (len(x.shape) - 1)), Ellipsis)]
x = x * scales * pi
x = x.sin() if sine_only else torch.cat([x.sin(), x.cos()], dim=-1)
x = torch.cat((x, orig_x), dim=-1)
return x
| 30.884393 | 102 | 0.596107 | 755 | 5,343 | 4.107285 | 0.235762 | 0.040632 | 0.051596 | 0.016769 | 0.398904 | 0.325701 | 0.283134 | 0.283134 | 0.271525 | 0.271525 | 0 | 0.01645 | 0.294591 | 5,343 | 172 | 103 | 31.063953 | 0.806315 | 0.298896 | 0 | 0.364486 | 0 | 0.018692 | 0.116578 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046729 | false | 0 | 0.056075 | 0 | 0.168224 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b88c0ab2779701b72a9706b3a83f48e762eafc2f | 25,525 | py | Python | primary-analysis/spacerExtractor.py | TanmayTanna/Transcriptional-Recording | 237ddf65264eb6fc5808fe2638479f616eef6ae2 | [
"Apache-2.0"
] | 1 | 2022-03-21T11:47:58.000Z | 2022-03-21T11:47:58.000Z | primary-analysis/spacerExtractor.py | TanmayTanna/Transcriptional-Recording | 237ddf65264eb6fc5808fe2638479f616eef6ae2 | [
"Apache-2.0"
] | null | null | null | primary-analysis/spacerExtractor.py | TanmayTanna/Transcriptional-Recording | 237ddf65264eb6fc5808fe2638479f616eef6ae2 | [
"Apache-2.0"
] | 3 | 2019-04-10T14:42:34.000Z | 2020-12-07T13:53:19.000Z | # April 12, 2019
# Tanmay Tanna
from __future__ import division
import sys, os, argparse, operator, numpy, pandas, fuzzysearch
from collections import Counter
## set up parser for user inputs
parser = argparse.ArgumentParser()
optional = parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
## user inputs required
required.add_argument('-p', '--inFile', help='path to .fasta file.', dest='file_inFile')
required.add_argument('-o', '--outPath', help='path to output directory.', dest='path_outPath')
required.add_argument('-d1', '--drOne', help='first partial CRISPR repeat ', dest='dr_sequence1')
required.add_argument('-d2', '--drTwo', help='second partial CRISPR repeat ', dest='dr_sequence2')
required.add_argument('-df', '--drFull', help='second partial CRISPR repeat ', dest='dr_sequenceFull')
## optional (defaults provided)
optional.add_argument('-l', '--LBC', help='library identifier ', dest='LBC', default='null')
optional.add_argument('-s', '--outName', help='path to output directory.', dest='file_outName', default='null')
optional.add_argument('-a', help='Number of allowed mismatches in the first partial CRISPR repeat. Default=2', type=int, dest='m1', default=2)
optional.add_argument('-b', help='Number of allowed mismatches in the second partial CRISPR repeat. Default=3', type=int, dest='m2', default=3)
optional.add_argument('-m', help='Minimum spacer size. Default=30', type=int, dest='min', default=25)
optional.add_argument('-n', help='Maximum spacer size. Default=55', type=int, dest='max', default=56)
optional.add_argument('-sMin', help='Minimum stagger length according to primer design. Default=0', type=int, dest='min_stagger', default=0)
optional.add_argument('-sMax', help='Maximum stagger length according to primer design. Default=8', type=int, dest='max_stagger', default=8)
optional.add_argument('--infoFile', help='boolean to generate files with GC content and length info for spacers', dest='infoFile', action='store_true')
optional.add_argument('--no-infoFile', help='boolean to generate files with GC content and length info for spacers', dest='infoFile', action='store_false')
optional.set_defaults(infoFile=False)
parser._action_groups.append(optional)
args = parser.parse_args()
# assign arguments to variables
DR1Mismatch = int(args.m1)
DR2Mismatch = int(args.m2)
minSpacer = int(args.min)
maxSpacer = int(args.max)
inFile = str(args.file_inFile)
outPath = str(args.path_outPath)+'/'
outName = str(args.file_outName)
firstRepeat = str(args.dr_sequence1)
secondRepeat = str(args.dr_sequence2)
fullRepeat = str(args.dr_sequenceFull)
LBC = str(args.LBC)
minStagger = int(args.min_stagger)
maxStagger = int(args.max_stagger)
infoFile=args.infoFile
if outName is 'null':
outName = inFile.split("/")[-1]
if outName.endswith('.fasta'):
outName = outName[:-6]
# Function that takes in a part of the read and gives back a spacer
def editSpacer(read,firstRepeat,firstExpect,secondRepeat,secondExpect,DR1Mismatch,DR2Mismatch,minSpacer,maxSpacer,firstRangeToLook,secondRangeToLook):
s=''
# Find the first repeat using string search, if not found, use fuzzy string matching
s = read[firstExpect:(firstExpect+firstRangeToLook+len(firstRepeat))]
findDR1 = s.find(firstRepeat)
if findDR1 ==-1:
firstMatch = fuzzysearch.find_near_matches(firstRepeat, s, max_l_dist = DR1Mismatch)
firstMatch = sorted(firstMatch, key=lambda x: x[2])
else:
firstMatch = [[findDR1, findDR1 + len(firstRepeat)]]
if not firstMatch: return '' # Too many mismatches. Return empty string.
s=''
# Find the second repeat using fuzzy string matching
s = read[secondExpect:(secondExpect+secondRangeToLook+len(secondRepeat))]
if len(s)>len(secondRepeat):
findDR2 = s.find(secondRepeat)
if findDR2 ==-1:
secondMatch = fuzzysearch.find_near_matches(secondRepeat, s, max_l_dist = DR2Mismatch)
secondMatch = sorted(secondMatch, key=lambda x: x[2])
else:
secondMatch = [[findDR2, findDR2 + len(secondRepeat)]]
if not secondMatch: return ''
else:
return ''
# Too many mismatches or read too short. Return empty string.
# If both repeats seem to have been found, return spacer
spacerStart = firstMatch[0][1]
spacerEnd = secondMatch[0][0] + secondExpect
# if spacer is too short, looking for other matches for second DR in the read
if len(secondMatch) > 1 & spacerEnd - spacerStart < minSpacer:
if findDR1 ==-1:
i=1
while i < len(secondMatch) and spacerEnd - spacerStart < minSpacer:
spacerEnd = secondMatch[i].start + secondExpect
i+=1
else:
secondMatch = fuzzysearch.find_near_matches(secondRepeat, s, max_l_dist = DR2Mismatch)
secondMatch = sorted(secondMatch, key=lambda x: x[2])
i=0
while i < len(secondMatch) and spacerEnd - spacerStart < minSpacer:
spacerEnd = secondMatch[i].start + secondExpect
i+=1
spacer = read[spacerStart:spacerEnd]
# no spacer if out of bounds
if len(spacer) > maxSpacer: return ''
if len(spacer) < minSpacer: return ''
return spacer
# identify and process files with the terms below
if ('.fasta' in inFile):
# open inFile for reading/writing and report file being processed
F = open(inFile,mode='rU')
G = open(outPath+outName+'.unique.fasta',mode='w') # unique spacers based on spacer sequence only
I = open(outPath+outName+'.doubleAcquisitions.fasta',mode='w')
J = open(outPath+outName+'.doubleAcquisitions.paired.fasta',mode='w') # double acquisitions with both spacers
K = open(outPath+outName+'.all.fasta',mode='w')
MC = open(outPath+outName+'.multipleAcquisitions.complete.fasta',mode='w') # multiple acquisitions with all spacers
M = open(outPath+outName+'.multipleAcquisitions.fasta',mode='w')
if not os.path.exists('outputs'):
os.makedirs('outputs')
SS = open("outputs/summaryStats.txt", mode = 'a')
if infoFile:
GC = open(outPath+outName+'.info.txt',mode='w')
GC.write("Unique_Spacer_Sequence"+'\t'+"Sequence_Length"+'\t'+"GC_content"+'\n')
os.system(str("echo '##################################################'"))
os.system(str('echo '+"'"+inFile+' accepted for processing'+"'"))
readName = ''
D={}
rawReads=0 # total reads in fasta
spacerReads=0 # reads having a spacer
UniqueSingleAcquisitions=0 # number of unique single acquisitions based on spacer sequence
UniqueDoubleAcquisitions=0 # number of unique double acquisitions based on spacer sequence
UniqueMultipleAcquisitions=0 # number of unique multiple acquisitions based on spacer sequence
SinglefullRepeatReads=0 # reads with a double acquisition
MultiplefullRepeatReads=0 # reads with multiple acquisitions
spacerReadsDoubleBoth=0 # double acquisitions with both spacers
spacerReadsDoubleOne=0 # double acquisitions with one spacer
spacerReadsMultiComplete=0 # multiple acquisition with all spacers
spacerReadsMultiSome=0 # multiple acquisiton with one spacer
spacerReadsMultiNoSpacerBetweenFullDRs=0 # multiple DRs without spacers between them (probably PCR artifacts)
minReadLength=minStagger + len(firstRepeat) + minSpacer + len(secondRepeat)
allDistalSpacers=[]
if 'null' not in LBC:
NonLBCReads=0
for L in F: # loop through reads in file
if '>' in L: # defline, skip for processing but save read name
readName = L.strip()
rawReads+=1
continue
L=L.strip()
# ignore reads that are too short to detect adapted spacer
if len(L) < minReadLength:
continue
# Identify LBC within file
if 'null' not in LBC:
findLBC = L.find(LBC)
if findLBC==-1:
numMismatches = int(round(len(LBC)*0.1))
LBCcoord = fuzzysearch.find_near_matches(LBC, L, max_l_dist = numMismatches)
else:
LBCcoord = [[findLBC, findLBC+len(LBC)]]
if not LBCcoord:
NonLBCReads+=1
continue
# identify and store reads with more than one acquisition (ie those that contain a full DR sequence)
numMismatches = int(round(len(fullRepeat)*0.1))
tempFullRepeat = fuzzysearch.find_near_matches(fullRepeat, L, max_l_dist = numMismatches)
if tempFullRepeat: # if full repeat is found
## DOUBLE ACQUISITIONS ##
if len(tempFullRepeat) is 1:
SinglefullRepeatReads += 1
I.write(readName+'\n'+L+'\n')
# split read into (leader) proximal and (leader) distal and independently run the editSpacer code to extract proximal/distal spacers
tempSpacerProximal = L[:tempFullRepeat[0].start+len(secondRepeat)]
firstExpect = minStagger
firstRangeToLook = maxStagger - minStagger + len(fullRepeat) - len(firstRepeat) # this is overkill because using len(fullRepeat), when this could be reduced by knowing the true primer to end of DR distance.
secondExpect = firstExpect + len(firstRepeat) + minSpacer
secondRangeToLook = maxStagger - minStagger + maxSpacer - minSpacer + len(fullRepeat) # this is overkill because using len(fullRepeat), when this could be reduced by knowing the true primer to end of DR distance.
spacerProximal=editSpacer(tempSpacerProximal,firstRepeat,firstExpect,secondRepeat,secondExpect,DR1Mismatch,DR2Mismatch,minSpacer,maxSpacer,firstRangeToLook,secondRangeToLook)
tempSpacerDistal = L[tempFullRepeat[0].end-len(firstRepeat):] # the for is to account for the tendency of regex to add up to 3 nucleotides at the end of a spacer
firstExpect = 0
firstRangeToLook = len(fullRepeat) - len(firstRepeat) # this is overkill because using len(fullRepeat), when this could be reduced by knowing the true primer to end of DR distance.
secondExpect = firstExpect + len(firstRepeat) + minSpacer
secondRangeToLook = maxSpacer - minSpacer + len(fullRepeat) # this is overkill because using len(fullRepeat), when this could be reduced by knowing the true primer to end of DR distance.
spacerDistal=editSpacer(tempSpacerDistal,firstRepeat,firstExpect,secondRepeat,secondExpect,DR1Mismatch,DR2Mismatch,minSpacer,maxSpacer,firstRangeToLook,secondRangeToLook)
# if single reads have distal & proximal spacers, label read and export these to file for looking into distal-proximal pairs
if spacerDistal and spacerProximal:
spacerReads+=1
spacerReadsDoubleBoth+=1
doubleSpacer= spacerProximal+"_"+spacerDistal
# store spacers in dict, this will force uniqueness
if doubleSpacer not in allDistalSpacers:
if doubleSpacer not in D:
D[doubleSpacer]=[readName+'_doubleAcquisitions',0]
D[doubleSpacer][1]+=1
allDistalSpacers.append(spacerDistal)
J.write(readName+'_doubleAcquisitions_both_distal'+'\n'+spacerDistal+'\n')
J.write(readName+'_doubleAcquisitions_both_proximal'+'\n'+spacerProximal+'\n')
K.write(readName+'_doubleAcquisitions_both_distal'+'\n'+spacerDistal+'\n')
K.write(readName+'_doubleAcquisitions_both_proximal'+'\n'+spacerProximal+'\n')
# process reads with only distal spacer
elif spacerDistal:
spacerReads+=1
spacerReadsDoubleOne+=1
if spacerDistal not in allDistalSpacers:
if spacerDistal not in D:
D[spacerDistal]=[readName+'_doubleAcquisitions_distal',0]
D[spacerDistal][1]+=1
K.write(readName+'_doubleAcquisitions_distal'+'\n'+spacerDistal+'\n')
# process reads with only proximal spacer
elif spacerProximal:
spacerReads+=1
spacerReadsDoubleOne+=1
if spacerProximal not in D:
D[spacerProximal]=[readName+'_doubleAcquisitions_proximal',0]
D[spacerProximal][1]+=1
K.write(readName+'_doubleAcquisitions_proximal'+'\n'+spacerProximal+'\n')
else:
continue
## MULTIPLE ACQUISITIONS ##
elif len(tempFullRepeat)>1:
MultiplefullRepeatReads+=1
M.write(readName+'\n'+L+'\n')
tempSpacerProximal = L[:tempFullRepeat[0].start+len(secondRepeat)]
firstExpect = minStagger
firstRangeToLook = maxStagger - minStagger + len(fullRepeat) - len(firstRepeat) # this is overkill because using len(fullRepeat), when this could be reduced by knowing the true primer to end of DR distance.
secondExpect = firstExpect + len(firstRepeat) + minSpacer
secondRangeToLook = maxStagger - minStagger + maxSpacer - minSpacer + len(fullRepeat) # this is overkill because using len(fullRepeat), when this could be reduced by knowing the true primer to end of DR distance.
spacerProximal=editSpacer(tempSpacerProximal,firstRepeat,firstExpect,secondRepeat,secondExpect,DR1Mismatch,DR2Mismatch,minSpacer,maxSpacer,firstRangeToLook,secondRangeToLook)
spacersMedial=''
for index, DR in enumerate(tempFullRepeat):
length = tempFullRepeat[index].start - tempFullRepeat[index-1].end
if length > minSpacer and length < maxSpacer:
tempspacersMedial = L[tempFullRepeat[index-1].end:tempFullRepeat[index].start]
spacersMedial=spacersMedial+"_"+tempspacersMedial
if spacersMedial:
spacersMedial=spacersMedial[1:]
tempSpacerDistal = L[tempFullRepeat[-1].end-len(firstRepeat):]
firstExpect = 0
firstRangeToLook = len(fullRepeat) - len(firstRepeat) # this is overkill because using len(fullRepeat), when this could be reduced by knowing the true primer to end of DR distance.
secondExpect = firstExpect + len(firstRepeat) + minSpacer
secondRangeToLook = maxSpacer - minSpacer + len(fullRepeat) # this is overkill because using len(fullRepeat), when this could be reduced by knowing the true primer to end of DR distance.
spacerDistal=editSpacer(tempSpacerDistal,firstRepeat,firstExpect,secondRepeat,secondExpect,DR1Mismatch,DR2Mismatch,minSpacer,maxSpacer,firstRangeToLook,secondRangeToLook)
if spacerDistal and spacerProximal and spacersMedial:
multipleSpacer=spacerProximal+"_"+spacersMedial+"_"+spacerDistal
spacerReads+=1
spacerReadsMultiComplete+=1
if multipleSpacer not in allDistalSpacers:
if multipleSpacer not in D:
D[multipleSpacer]=[readName+'_multipleAcquisitions',0]
D[multipleSpacer][1]+=1
for i in range(1,multipleSpacer.count('_')+1):
if multipleSpacer.split('_', i)[i] not in allDistalSpacers:
allDistalSpacers.append(multipleSpacer.split('_', i)[i])
MC.write(readName+'_multipleAcquisitions_complete_distal'+'\n'+spacerDistal+'\n')
MC.write(readName+'_multipleAcquisitions_complete_proximal'+'\n'+spacerProximal+'\n')
for index,spacer in enumerate(spacersMedial.split('_')):
MC.write(readName+'_multipleAcquisitions_complete_medial_'+str(index)+'\n'+spacer+'\n')
K.write(readName+'_multipleAcquisitions_complete_distal'+'\n'+spacerDistal+'\n')
K.write(readName+'_multipleAcquisitions_complete_proximal'+'\n'+spacerProximal+'\n')
for index,spacer in enumerate(spacersMedial.split('_')):
K.write(readName+'_multipleAcquisitions_complete_medial_'+str(index)+'\n'+spacer+'\n')
elif spacerDistal and spacersMedial:
partMultipleSpacer = spacersMedial + "_" + spacerDistal
spacerReads+=1
spacerReadsMultiSome+=1
if partMultipleSpacer not in allDistalSpacers:
if partMultipleSpacer not in D:
D[partMultipleSpacer]=[readName+'_multipleAcquisitions_noProximal',0]
D[partMultipleSpacer][1]+=1
for i in range(1,partMultipleSpacer.count('_')+1):
if partMultipleSpacer.split('_', i)[i] not in allDistalSpacers:
allDistalSpacers.append(partMultipleSpacer.split('_', i)[i])
K.write(readName+'_multipleAcquisitions_noProximal_distal'+'\n'+spacerDistal+'\n')
for index,spacer in enumerate(spacersMedial.split('_')):
K.write(readName+'_multipleAcquisitions_noProximal_medial_'+str(index)+'\n'+spacer+'\n')
elif spacerProximal and spacersMedial:
spacerReads+=1
spacerReadsMultiSome+=1
partMultipleSpacer =spacerProximal+"_"+spacersMedial
if partMultipleSpacer not in allDistalSpacers:
if partMultipleSpacer not in D:
D[partMultipleSpacer]=[readName+'_multipleAcquisitions_noDistal',0]
D[partMultipleSpacer][1]+=1
for i in range(1,partMultipleSpacer.count('_')+1):
if partMultipleSpacer.split('_', i)[i] not in allDistalSpacers:
allDistalSpacers.append(partMultipleSpacer.split('_', i)[i])
K.write(readName+'_multipleAcquisitions_noDistal_proximal'+'\n'+spacerProximal+'\n')
for index,spacer in enumerate(spacersMedial.split('_')):
K.write(readName+'_multipleAcquisitions_noDistal_medial_'+str(index)+'\n'+spacer+'\n')
elif spacerDistal and spacerProximal and not spacersMedial:
spacerReads+=1
spacerReadsMultiSome+=1
spacerReadsMultiNoSpacerBetweenFullDRs+=1
partMultipleSpacer=spacerProximal+"_"+spacerDistal
if partMultipleSpacer not in allDistalSpacers:
if partMultipleSpacer not in D:
D[partMultipleSpacer]=[readName+'_multipleAcquisitions_noMedial',0]
D[partMultipleSpacer][1]+=1
if spacerDistal not in allDistalSpacers:
allDistalSpacers.append(spacerDistal)
K.write(readName+'_multipleAcquisitions_noMedial_distal'+'\n'+spacerDistal+'\n')
K.write(readName+'_multipleAcquisitions_noMedial_proximal'+'\n'+spacerProximal+'\n')
elif spacerDistal:
spacerReads+=1
spacerReadsMultiSome+=1
spacerReadsMultiNoSpacerBetweenFullDRs+=1
if spacerDistal not in allDistalSpacers:
if spacerDistal not in D:
# store spacers in dict, this will force uniqueness
D[spacerDistal]=[readName+'_multipleAcquisitions_onlyDistal',0]
D[spacerDistal][1]+=1
K.write(readName+'_multipleAcquisitions_onlyDistal'+'\n'+spacerDistal+'\n')
elif spacerProximal:
spacerReads+=1
spacerReadsMultiSome+=1
spacerReadsMultiNoSpacerBetweenFullDRs+=1
if spacerProximal not in D:
# store spacers in dict, this will force uniqueness
D[spacerProximal]=[readName+'_multipleAcquisitions_onlyProximal',0]
D[spacerProximal][1]+=1
K.write(readName+'_multipleAcquisitions_onlyProximal'+'\n'+spacerProximal+'\n')
elif spacersMedial:
spacerReads+=1
spacerReadsMultiSome+=1
partMultipleSpacer = spacersMedial
if partMultipleSpacer not in allDistalSpacers:
if partMultipleSpacer not in D:
D[partMultipleSpacer]=[readName+'_multipleAcquisitions_onlyMedial',0]
D[partMultipleSpacer][1]+=1
for i in range(1,partMultipleSpacer.count('_')+1):
if partMultipleSpacer.split('_', i)[i] not in allDistalSpacers:
allDistalSpacers.append(partMultipleSpacer.split('_', i)[i])
for index,spacer in enumerate(spacersMedial):
K.write(readName+'_multipleAcquisitions_only_medial_'+str(index)+'\n'+spacer+'\n')
else:
continue
# run standard code if multiple acquisitions (ie full DR sequence) not detected
else:
firstExpect = minStagger
firstRangeToLook = maxStagger - minStagger + len(fullRepeat) - len(firstRepeat) # this is overkill because using len(fullRepeat), when this could be reduced by knowing the true primer to end of DR distance.
secondExpect = firstExpect + len(firstRepeat) + minSpacer
secondRangeToLook = maxStagger - minStagger + maxSpacer - minSpacer + len(fullRepeat) # this is overkill because using len(fullRepeat), when this could be reduced by knowing the true primer to end of DR distance.
spacer=editSpacer(L,firstRepeat,firstExpect,secondRepeat,secondExpect,DR1Mismatch,DR2Mismatch,minSpacer,maxSpacer,firstRangeToLook,secondRangeToLook)
if not spacer:
continue
spacerReads+=1
if spacer not in allDistalSpacers:
if spacer not in D:
D[spacer]=[readName,0]
D[spacer][1]+=1
K.write(readName+'\n'+spacer+'\n')
# iterate through spacers and print to file
for spacer in allDistalSpacers:
if spacer in D:
del(D[spacer])
seqlist=sorted(D.keys())
for S in seqlist:
if 'double' in D[S][0]:
UniqueDoubleAcquisitions+=1
elif 'multiple' in D[S][0]:
UniqueMultipleAcquisitions+=1
else:
UniqueSingleAcquisitions+=1
if '_' not in S and len(S)>0:
G.write(D[S][0]+'_rep_'+str(D[S][1])+'\n'+S+'\n')
if infoFile:
count = Counter(S)
gc = (count['G']+count['C'])*100/len(S)
GC.write(str(S)+'\t'+str(len(S))+'\t'+str(gc)+'\n')
else:
for index, spcr in enumerate(S.split('_')):
if len(spcr)>0:
G.write(D[S][0]+'_rep_'+str(D[S][1])+'_spacerPosition_'+str(index)+'\n'+spcr+'\n')
if infoFile:
count = Counter(spcr)
gc = (count['G']+count['C'])*100/len(spcr)
GC.write(str(spcr)+'\t'+str(len(spcr))+'\t'+str(gc)+'\n')
if not D:
os.system(str("echo 'No spacers to map'"))
NonLBCReadPercentage = 'null'
if 'null' not in LBC:
NonLBCReadPercentage = NonLBCReads*100/rawReads
os.system(str('echo '+"'"+'*'+'\t'+' sampleName'+'\t'+' rawReads'+'\t'+'nonLibraryBarcodeRead%'+'\t'+'identifiedSpacers'+'\t'+'uniqueSpacers'+'\t'+'doubleAcquisitions'+'\t'+'doubleAcquisitions.paired'+'\t'+'multipleAcquisitions'+'\t'+'multipleAcquisitions.complete'+'\t'+'uniqueSingleAcquisitions'+'\t'+'uniqueDoubleAcquisitions'+'\t'+'uniqueMultipleAcquisitions'+"'"))
os.system(str('echo '+"'"+'@'+'\t'+str(outPath+outName)+'\t'+str(rawReads)+'\t'+str(NonLBCReadPercentage)+'\t'+str(spacerReads)+'\t'+str(len(D.keys()))+'\t'+str(SinglefullRepeatReads)+'\t'+str(spacerReadsDoubleBoth)+'\t'+str(MultiplefullRepeatReads)+'\t'+str(spacerReadsMultiComplete)+'\t'+str(UniqueSingleAcquisitions)+'\t'+str(UniqueDoubleAcquisitions)+'\t'+str(UniqueMultipleAcquisitions)+"'"))
SS.write(str(outName)+'\t'+str(rawReads)+'\t'+str(NonLBCReadPercentage)+'\t'+str(spacerReads)+'\t'+str(len(D.keys()))+'\t'+str(SinglefullRepeatReads)+'\t'+str(spacerReadsDoubleBoth)+'\t'+str(MultiplefullRepeatReads)+'\t'+str(spacerReadsMultiComplete)+'\t'+str(UniqueSingleAcquisitions)+'\t'+str(UniqueDoubleAcquisitions)+'\t'+str(UniqueMultipleAcquisitions)+'\n')
# print useful summary stats to stdout
F.close()
G.close()
I.close()
J.close()
K.close()
MC.close()
M.close()
SS.close()
if infoFile:
GC.close()
| 51.985743 | 401 | 0.622762 | 2,612 | 25,525 | 6.01608 | 0.142037 | 0.009227 | 0.015146 | 0.025964 | 0.521955 | 0.469645 | 0.438526 | 0.409889 | 0.370434 | 0.366616 | 0 | 0.010423 | 0.270793 | 25,525 | 491 | 402 | 51.985743 | 0.833826 | 0.143232 | 0 | 0.357746 | 0 | 0 | 0.131376 | 0.063073 | 0 | 0 | 0 | 0 | 0 | 1 | 0.002817 | false | 0 | 0.008451 | 0 | 0.016901 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b88e8922fc2ced5a9409ad5914c870c1fce2e6e0 | 2,247 | py | Python | akm/cmd/cmdRotate.py | paloth/aws-accesskey-manager | 46c420621063e314b3455ccf0a31a8c6954f251c | [
"MIT"
] | 1 | 2020-08-12T11:27:37.000Z | 2020-08-12T11:27:37.000Z | akm/cmd/cmdRotate.py | paloth/aws-accesskey-manager | 46c420621063e314b3455ccf0a31a8c6954f251c | [
"MIT"
] | 5 | 2020-06-24T12:50:47.000Z | 2020-08-11T13:49:25.000Z | akm/cmd/cmdRotate.py | paloth/aws-accesskey-manager | 46c420621063e314b3455ccf0a31a8c6954f251c | [
"MIT"
] | null | null | null | import sys
from datetime import datetime, timedelta
import boto3
from botocore.exceptions import ClientError
from dateutil.tz import tzutc
from akm.internal import aws_config, keymgt
def change_key(profile_config, profile_path, iam, deactivate, profile, user_name):
key = keymgt.renew(profile_config, iam, deactivate, profile, user_name)
aws_config.update_profile(profile_path, profile, profile_config, key)
print("Your access key has been renewed")
def execute(profile_path, deactivate, expire, profile, user_name, yes):
print(f"Access Key rotation for profile {profile} ...")
profile_config = aws_config.get(profile_path)
if not profile_config.has_section(profile):
sys.exit(f"The profile {profile} does not exists in your credential file\nPlease select a valid profile")
if not profile_config.has_option(profile, "aws_access_key_id"):
sys.exit(f"The profile {profile} does not have access key id configured")
access_key_id = aws_config.get_profile_ak_id(profile, profile_config)
session = boto3.session.Session(profile_name=profile)
iam = session.client("iam")
try:
access_keys = iam.list_access_keys(UserName=user_name)
except ClientError as error:
raise error
access_key = keymgt.check_access_key_exist(access_key_id, access_keys)
if keymgt.is_access_key_expired(access_key["CreateDate"], expire) is True:
print("Your access key is expired ...")
change_key(profile_config, profile_path, iam, deactivate, profile, user_name)
else:
if yes:
change_key(profile_config, profile_path, iam, deactivate, profile, user_name)
else:
print("Your access key is not expired ...")
answer = input("Do you want to change it anyway ? (Only 'yes' is good answer)")
if answer.lower == "yes":
change_key(profile_config, profile_path, iam, deactivate, profile, user_name)
else:
remaining_days = (access_key["CreateDate"] + timedelta(days=expire)) - datetime.now().replace(tzinfo=tzutc())
print(f"Your access key will expire in {remaining_days.days} days ")
sys.exit("The key has not been renewed")
| 38.084746 | 125 | 0.701825 | 304 | 2,247 | 4.986842 | 0.309211 | 0.083113 | 0.059367 | 0.079156 | 0.287599 | 0.21504 | 0.21504 | 0.21504 | 0.172823 | 0.172823 | 0 | 0.001123 | 0.207388 | 2,247 | 58 | 126 | 38.741379 | 0.850084 | 0 | 0 | 0.15 | 0 | 0 | 0.214953 | 0.009346 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.15 | 0 | 0.2 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b88f51f4917eeed923bc67a6ccf5758356f1d067 | 2,137 | py | Python | code2vec/predict.py | ikasat/code2vec | 49d80bba24b66046179db8873fc7928b1cd0e53c | [
"Apache-2.0"
] | null | null | null | code2vec/predict.py | ikasat/code2vec | 49d80bba24b66046179db8873fc7928b1cd0e53c | [
"Apache-2.0"
] | null | null | null | code2vec/predict.py | ikasat/code2vec | 49d80bba24b66046179db8873fc7928b1cd0e53c | [
"Apache-2.0"
] | null | null | null | import atexit
import readline
import traceback
def _show_result(result):
for word, distance in result:
print('{:6.4f} {}'.format(distance, word))
def predict(model, words):
positive_words = []
negative_words = []
for word in words:
if word.startswith('+'):
positive_words.append(word[1:])
elif word.startswith('-'):
negative_words.append(word[1:])
else:
positive_words.append(word)
result = model.most_similar(positive=positive_words, negative=negative_words)
_show_result(result)
def _parse_line_in_shell(line):
line = line.strip()
words_by_pm = {'+': [], '-': []}
mode = '+'
pos = 0
while pos < len(line):
p = line.find('+', pos)
p = p if p >= 0 else len(line)
m = line.find('-', pos)
m = m if m >= 0 else len(line)
if p > m:
words_by_pm[mode] += [w for w in line[pos:m].split() if w]
mode = '-'
pos = m + 1
else:
words_by_pm[mode] += [w for w in line[pos:p].split() if w]
mode = '+'
pos = p + 1
return words_by_pm['+'], words_by_pm['-']
def shell(model, history_file: str):
try:
readline.read_history_file(history_file)
h_len = readline.get_current_history_length()
except FileNotFoundError:
open(history_file, 'wb').close()
h_len = 0
def save(prev_h_len, histfile):
new_h_len = readline.get_current_history_length()
readline.set_history_length(1000)
readline.append_history_file(new_h_len - prev_h_len, histfile)
atexit.register(save, h_len, history_file)
while True:
try:
line = input('> ')
positive, negative = _parse_line_in_shell(line)
if len(positive) + len(negative) > 0:
result = model.most_similar(positive=positive, negative=negative)
_show_result(result)
except KeyError as e:
print(' '.join(e.args))
except Exception as e:
traceback.format_exc()
print()
break
| 29.273973 | 81 | 0.568554 | 270 | 2,137 | 4.274074 | 0.281481 | 0.024263 | 0.038995 | 0.033795 | 0.233969 | 0.17331 | 0.107452 | 0.046794 | 0.046794 | 0.046794 | 0 | 0.010142 | 0.307908 | 2,137 | 72 | 82 | 29.680556 | 0.770115 | 0 | 0 | 0.129032 | 0 | 0 | 0.012167 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.080645 | false | 0 | 0.048387 | 0 | 0.145161 | 0.048387 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b88fdedc0827f35dfcf95ff582477d7c65567851 | 561 | py | Python | src/iombian_config_service.py | Tknika/iombian-bluetooth-configurator | 78c04ce1dce4673601f2e8f2d170f4f5c0026d63 | [
"Apache-2.0"
] | null | null | null | src/iombian_config_service.py | Tknika/iombian-bluetooth-configurator | 78c04ce1dce4673601f2e8f2d170f4f5c0026d63 | [
"Apache-2.0"
] | 1 | 2021-02-09T11:51:41.000Z | 2021-02-09T11:56:18.000Z | src/iombian_config_service.py | Tknika/iombian-bluetooth-configurator | 78c04ce1dce4673601f2e8f2d170f4f5c0026d63 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
from pybleno import BlenoPrimaryService
from iombian_config_length_characteristic import IoMBianConfigLengthCharacteristic
from iombian_config_values_characteristic import IoMBianConfigValuesCharacteristic
class IoMBianConfigService(BlenoPrimaryService):
def __init__(self, iombian_config):
BlenoPrimaryService.__init__(self, {
'uuid': 'ec00',
'characteristics': [
IoMBianConfigLengthCharacteristic(iombian_config),
IoMBianConfigValuesCharacteristic(iombian_config)
]}) | 37.4 | 82 | 0.768271 | 42 | 561 | 9.857143 | 0.547619 | 0.157005 | 0.082126 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006452 | 0.171123 | 561 | 15 | 83 | 37.4 | 0.883871 | 0.037433 | 0 | 0 | 0 | 0 | 0.042593 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.272727 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b890e8e03e349ecb355fa98019f0962299cc861d | 8,269 | py | Python | src/carts/views.py | LABETE/ecommerce | 821105c439b1b320b39fc7658dbf5f0452af385b | [
"MIT"
] | null | null | null | src/carts/views.py | LABETE/ecommerce | 821105c439b1b320b39fc7658dbf5f0452af385b | [
"MIT"
] | null | null | null | src/carts/views.py | LABETE/ecommerce | 821105c439b1b320b39fc7658dbf5f0452af385b | [
"MIT"
] | null | null | null | from django.conf import settings
from django.contrib import messages
from django.contrib.auth.forms import AuthenticationForm
from django.core.urlresolvers import reverse
from django.views.generic.base import View
from django.views.generic.detail import SingleObjectMixin, DetailView
from django.views.generic.edit import FormMixin
from django.http import HttpResponseRedirect, JsonResponse, Http404
from django.shortcuts import render, get_object_or_404, redirect
from products.models import Variation
from orders.forms import GuestCheckoutForm
from .mixins import CartOrderMixin
from orders.models import UserCheckout, Order, UserAddress
from .models import Cart, CartItem
import braintree
if settings.DEBUG:
braintree.Configuration.configure(
braintree.Environment.Sandbox,
merchant_id=settings.BRAINTREE_MERCHANT_ID,
public_key=settings.BRAINTREE_PUBLIC,
private_key=settings.BRAINTREE_PRIVATE,
)
class ItemCountView(View):
def get(self, *args, **kwargs):
if self.request.is_ajax():
cart_id = self.request.session.get("cart_id")
if cart_id == None:
count = 0
else:
cart = Cart.objects.get(id=cart_id)
count = cart.items.count()
self.request.session["cart_item_count"] = count
return JsonResponse({"count": count})
else:
raise Http404
class CartView(View):
model = Cart
template_name = "carts/view.html"
def get_object(self, *args, **kwargs):
self.request.session.set_expiry(0)
cart_id = self.request.session.get("cart_id")
if cart_id is None:
cart = Cart()
cart.save()
cart_id = cart.id
self.request.session["cart_id"] = cart_id
cart = Cart.objects.get(id=cart_id)
if self.request.user.is_authenticated():
cart.user = self.request.user
cart.save()
return cart
def get(self, *args, **kwargs):
cart = self.get_object()
item_id = self.request.GET.get("item")
delete_item = self.request.GET.get("delete", False)
item_added = False
if item_id:
item_instance = get_object_or_404(Variation, id=item_id)
qty = self.request.GET.get("qty", 1)
try:
if int(qty) < 1:
delete_item = True
except:
raise Http404
cart_item, created = CartItem.objects.get_or_create(cart=cart, item=item_instance)
if created:
flash_message = "Item successfully added"
item_added = True
if delete_item:
flash_message = "Item successfully removed"
cart_item.delete()
else:
flash_message = "Item successfully updated"
cart_item.quantity = qty
cart_item.save()
if not self.request.is_ajax():
return HttpResponseRedirect(reverse("cart"))
if self.request.is_ajax():
try:
total = cart_item.line_item_total
except:
total = None
try:
subtotal = cart_item.cart.subtotal
except:
subtotal = None
try:
tax_total = cart_item.cart.tax_total
except:
tax_total = None
try:
total = cart_item.cart.total
except:
total = None
try:
total_items = cart_item.cart.items.count()
except:
total_items = 0
data = {
"deleted": delete_item,
"item_added": item_added,
"line_total": total,
"subtotal": subtotal,
"tax_total": tax_total,
"cart_total": total,
"flash_message": flash_message,
"total_items": total_items
}
return JsonResponse(data)
context = {
"object": cart
}
template = self.template_name
return render(self.request, template, context)
class CheckoutView(CartOrderMixin, FormMixin, DetailView):
model = Cart
template_name = "carts/checkout_view.html"
form_class = GuestCheckoutForm
def get_object(self, *args, **kwargs):
cart = self.get_cart()
if cart is None:
return None
return cart
def get_context_data(self, *args, **kwargs):
context = super(CheckoutView, self).get_context_data(*args, **kwargs)
user_can_continue = False
user_check_id = self.request.session.get("user_checkout_id")
if self.request.user.is_authenticated():
user_can_continue = True
user_checkout, created = UserCheckout.objects.get_or_create(email=self.request.user.email)
user_checkout.user = self.request.user
user_checkout.save()
context["client_token"] = user_checkout.get_client_token()
self.request.session["user_checkout_id"] = user_checkout.id
elif not self.request.user.is_authenticated() and user_check_id == None:
context["login_form"] = AuthenticationForm()
context["next_url"] = self.request.build_absolute_uri()
else:
pass
if user_check_id != None:
user_can_continue = True
if self.request.user.is_authenticated():
user_checkout_2 = UserCheckout.objects.get(id=user_check_id)
context["client_token"] = user_checkout_2.get_client_token()
context["order"] = self.get_order()
context["user_can_continue"] = user_can_continue
context["form"] = self.get_form()
return context
def post(self, *args, **kwargs):
self.object = self.get_object()
form = self.get_form()
if form.is_valid():
email = form.cleaned_data.get("email")
user_checkout, created = UserCheckout.objects.get_or_create(email=email)
self.request.session["user_checkout_id"] = user_checkout.id
return self.form_valid(form)
return self.form_invalid(form)
def get_success_url(self):
return reverse("checkout")
def get(self, *args, **kwargs):
get_data = super(CheckoutView, self).get(*args, **kwargs)
cart = self.get_object()
if cart == None:
return redirect("cart")
new_order = self.get_order()
user_checkout_id = self.request.session.get("user_checkout_id")
if user_checkout_id != None:
user_checkout = UserCheckout.objects.get(id=user_checkout_id)
if new_order.billing_address == None or new_order.shipping_address == None:
return redirect("order_address")
new_order.user = user_checkout
new_order.save()
return get_data
class CheckoutFinalView(CartOrderMixin, View):
def post(self, *args, **kwargs):
order = self.get_order()
order_total = order.order_total
nonce = self.request.POST.get("payment_method_nonce")
if nonce:
result = braintree.Transaction.sale({
"amount": order_total,
"payment_method_nonce": nonce,
"billing": {
"postal_code": "{0}".format(order.billing_address.zipcode)
},
"options": {
"submit_for_settlement": True
}
})
if result.is_success:
order.order_id = result.transaction.id
order.mark_completed(order_id=result.transaction.id)
messages.success(self.request, "Thank you for your oder.")
del self.request.session["cart_id"]
del self.request.session["order_id"]
else:
messages.error(self.request, "{0}".format(result.message))
return redirect("checkout")
return redirect("order_detail", pk=order.pk)
def get(self, *args, **kwargs):
return redirect("checkout")
| 36.588496 | 109 | 0.587133 | 907 | 8,269 | 5.144432 | 0.184123 | 0.068367 | 0.042435 | 0.021432 | 0.242392 | 0.136091 | 0.110159 | 0.077154 | 0.077154 | 0.017574 | 0 | 0.004266 | 0.319628 | 8,269 | 225 | 110 | 36.751111 | 0.825098 | 0 | 0 | 0.258706 | 0 | 0 | 0.070262 | 0.005442 | 0 | 0 | 0 | 0 | 0 | 1 | 0.049751 | false | 0.004975 | 0.074627 | 0.00995 | 0.253731 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b893bb6daba2c217d25e39e2e0e298d5c56d6c75 | 3,286 | py | Python | recipes/conv_glu/librispeech/prepare.py | Zilv1128/test1 | 49fbb1392e69b5194c077df9847505ec995b4e3d | [
"BSD-3-Clause"
] | 5,921 | 2017-12-29T17:04:46.000Z | 2021-04-16T00:37:35.000Z | recipes/conv_glu/librispeech/prepare.py | piEYj/wav2letter | 49fbb1392e69b5194c077df9847505ec995b4e3d | [
"BSD-3-Clause"
] | 949 | 2018-01-01T06:36:58.000Z | 2021-04-16T06:49:05.000Z | recipes/conv_glu/librispeech/prepare.py | piEYj/wav2letter | 49fbb1392e69b5194c077df9847505ec995b4e3d | [
"BSD-3-Clause"
] | 1,032 | 2017-12-30T09:47:51.000Z | 2021-04-11T11:40:00.000Z | """
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the MIT-style license found in the
LICENSE file in the root directory of this source tree.
----------
Script to prepare recipe to train/eval model on Librispeech in wav2letter++ pipelines
Please install `kenlm` on your own - https://github.com/kpu/kenlm
Command : python3 prepare.py --data_dst [...] --model_dst [...] --kenlm [...]/kenlm/
Replace [...] with appropriate paths
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import os
from collections import defaultdict
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Librispeech Dataset creation.")
parser.add_argument(
"--data_dst", help="data destination directory", default="./librispeech"
)
parser.add_argument(
"--model_dst",
help="model auxilary files destination directory",
default="./conv_glu_librispeech_char",
)
parser.add_argument("--kenlm", help="location to installed kenlm directory")
parser.add_argument(
"-p", "--process", help="# of process for Multiprocessing", default=8, type=int
)
args = parser.parse_args()
os.system(
"python3 {}/../../../data/librispeech/prepare.py --dst {} -p {}".format(
os.path.dirname(os.path.abspath(__file__)), args.data_dst, args.process
)
)
subpaths = {
"train": ["train-clean-100", "train-clean-360", "train-other-500"],
"dev": ["dev-clean", "dev-other"],
"test": ["test-clean", "test-other"],
}
lists_path = os.path.join(args.data_dst, "lists")
am_path = os.path.join(args.model_dst, "am")
decoder_path = os.path.join(args.model_dst, "decoder")
os.makedirs(am_path, exist_ok=True)
os.makedirs(decoder_path, exist_ok=True)
# Generating am/*
print("Generating tokens.txt for acoustic model training", flush=True)
with open(os.path.join(am_path, "tokens.txt"), "w") as fout:
fout.write("|\n")
fout.write("'\n")
for alphabet in range(ord("a"), ord("z") + 1):
fout.write(chr(alphabet) + "\n")
print(
"Generating lexicon.txt (word -> tokens) for acoustic model training",
flush=True,
)
word_dict = defaultdict(set)
for key, names in subpaths.items():
for name in names:
with open(os.path.join(lists_path, name + ".lst"), "r") as flist:
for line in flist:
transcription = line.strip().split(" ")[3:]
word_dict[key].update(transcription)
lexicon_words = sorted(word_dict["train"] | word_dict["dev"])
with open(os.path.join(am_path, "lexicon_train+dev.txt"), "w") as f:
for word in lexicon_words:
f.write(
"{word}\t{tokens} |\n".format(word=word, tokens=" ".join(list(word)))
)
# Generating decoder/*
cmd = [
"python3 {}/../../utilities/prepare_librispeech_official_lm.py",
"--dst {}",
"--kenlm {}",
]
os.system(
" ".join(cmd).format(
os.path.dirname(os.path.abspath(__file__)), decoder_path, args.kenlm
)
)
print("Done!", flush=True)
| 33.191919 | 87 | 0.615338 | 407 | 3,286 | 4.820639 | 0.402948 | 0.030581 | 0.030581 | 0.021407 | 0.139653 | 0.121305 | 0.087666 | 0.036697 | 0 | 0 | 0 | 0.006319 | 0.229458 | 3,286 | 98 | 88 | 33.530612 | 0.768562 | 0.15916 | 0 | 0.073529 | 0 | 0 | 0.255265 | 0.050835 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.058824 | 0 | 0.058824 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b896ffd71dab064d1d9b630691e77203048df060 | 5,149 | py | Python | samples-logs-back-end/app/main.py | enasequence/covid-workflow-manager | 185899b236f925dadb45ea5f224713202c4e7b00 | [
"Apache-2.0"
] | 5 | 2020-06-29T19:50:36.000Z | 2022-01-31T09:16:29.000Z | samples-logs-back-end/app/main.py | enasequence/covid-workflow-manager | 185899b236f925dadb45ea5f224713202c4e7b00 | [
"Apache-2.0"
] | 10 | 2020-06-29T19:48:57.000Z | 2022-02-13T11:54:06.000Z | samples-logs-back-end/app/main.py | enasequence/covid-workflow-manager | 185899b236f925dadb45ea5f224713202c4e7b00 | [
"Apache-2.0"
] | 2 | 2020-06-25T13:40:52.000Z | 2021-02-03T20:23:09.000Z | from pymongo import MongoClient
from flask_cors import CORS, cross_origin
from flask import Flask, request
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
@app.route("/phylogeny")
@cross_origin()
def phylogenetic_tree():
client = MongoClient('mongodb://samples-logs-db-svc')
db = client.samples
size, start = (request.args.get(key) for key in ['size', 'start'])
if size and start:
return {'results': list(db.phylo.find({}, {'_id': 0}).skip(int(start)).limit(int(size)))}
elif size and not start:
return {'results': list(db.phylo.find({}, {'_id': 0}).limit(int(size)))}
else:
return {'results': list(db.phylo.find({}, {'_id': 0}))}
@app.route("/phylogeny_suspended")
def phylogenetic_tree_suspended():
client = MongoClient('mongodb://samples-logs-db-svc')
db = client.samples
return {'results': list(db.suspended.find({}, {'_id': 0}))}
@app.route("/lineages")
def lineages():
client = MongoClient('mongodb://samples-logs-db-svc')
db = client.samples
size, start = (request.args.get(key) for key in ['size', 'start'])
if size and start:
return {'results': list(db.lineages.find({}, {'_id': 0}).skip(int(start)).limit(int(size)))}
elif size and not start:
return {'results': list(db.lineages.find({}, {'_id': 0}).limit(int(size)))}
else:
return {'results': list(db.lineages.find({}, {'_id': 0}))}
@app.route("/lineages/metadata")
def lineages_metadata():
client = MongoClient('mongodb://samples-logs-db-svc')
db = client.samples
return {
'total_count': db.lineages.count_documents({}),
'has_lineage_count': db.lineages.count_documents({'has_lineage': True}),
'last_updated': db.lineages.find_one({}).get('_id').generation_time,
}
@app.route("/phylogeny/metadata")
def phylogeny_metadata():
client = MongoClient('mongodb://samples-logs-db-svc')
db = client.samples
return {
"total_count": db.phylo.count_documents({}),
"has_phylogeny_count": db.phylo.count_documents({'phylogeny': True}),
"last_updated": db.phylo.find_one({}).get('_id').generation_time,
}
@app.route("/jovian")
def jovian_samples():
# Getting access to MongoDB
client = MongoClient('mongodb://samples-logs-db-svc')
db = client.samples
results = list()
for sample in db.samples.find({'pipeline_name': 'Jovian'}, {'_id': 0}):
results.append(sample)
return {'results': results}
@app.route("/jovian/<run_id>")
def jovian_samples_details(run_id):
# Getting access to MongoDB
client = MongoClient('mongodb://samples-logs-db-svc')
db = client.samples
return {'results': db.samples.find_one({'id': run_id}, {'_id': 0})}
@app.route("/ont")
def ont_samples():
# Getting access to MongoDB
client = MongoClient('mongodb://samples-logs-db-svc')
db = client.samples
results = list()
for sample in db.samples.find({'pipeline_name': 'ONT'}, {'_id': 0}):
results.append(sample)
return {'results': results}
@app.route("/ont/<run_id>")
def ont_samples_details(run_id):
# Getting access to MongoDB
client = MongoClient('mongodb://samples-logs-db-svc')
db = client.samples
return {'results': db.samples.find_one({'id': run_id}, {'_id': 0})}
@app.route("/ont/<stage>/<status>")
def ont_samples_filters(stage, status):
# TODO: change front- or back-end to have consistent names
xref = {
'import_from_ena': 'ena_import',
'pipeline_analysis': 'pipeline_analysis',
'export_to_ena': 'ena_export'
}
# Getting access to MongoDB
client = MongoClient('mongodb://samples-logs-db-svc')
db = client.samples
results = list()
for sample in db.samples.find({'pipeline_name': 'ONT'}, {'_id': 0}):
item = sample[stage]['status']
xref_status = xref[stage]
if get_status(item, xref_status) == status:
results.append(sample)
return {'results': results}
def get_status(item, key):
statuses = {
'ena_import': {
'success': 'download finished',
'failed': 'download failed'
},
'pipeline_analysis': {
'success': 'pipeline_finished',
'started': 'pipeline_started'
},
'ena_export': {
'success': 'export_finished',
'started': 'export_started'
}
}
if key == 'ena_import':
if statuses[key]['success'] in item:
return 'Success'
elif statuses[key]['failed'] in item:
return 'Failed'
else:
return 'Undefined'
else:
if statuses[key]['success'] in item:
return 'Success'
elif statuses[key]['started'] in item:
all_indices = [el for el in item if el == statuses[key]['started']]
if len(all_indices) >= 6:
return 'Failed'
else:
return 'Processing'
else:
return 'Undefined'
if __name__ == "__main__":
# Only for debugging while developing
app.run(host='0.0.0.0', debug=True, port=80)
| 31.396341 | 100 | 0.611187 | 628 | 5,149 | 4.861465 | 0.176752 | 0.051097 | 0.078611 | 0.101539 | 0.63282 | 0.61546 | 0.589912 | 0.566656 | 0.535539 | 0.527023 | 0 | 0.004741 | 0.221596 | 5,149 | 163 | 101 | 31.588957 | 0.756986 | 0.043115 | 0 | 0.426357 | 0 | 0 | 0.228705 | 0.063224 | 0 | 0 | 0 | 0.006135 | 0 | 1 | 0.085271 | false | 0 | 0.046512 | 0 | 0.294574 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b899610b2d2bbedc7091cb000500742a478aed6e | 6,868 | py | Python | experiments/n_tree_experiment.py | pyensemble/wildwood | b261cbd7d0b425b50647f719ab99c1d89f477d5c | [
"BSD-3-Clause"
] | 22 | 2021-06-24T11:30:03.000Z | 2022-03-09T00:59:30.000Z | experiments/n_tree_experiment.py | pyensemble/wildwood | b261cbd7d0b425b50647f719ab99c1d89f477d5c | [
"BSD-3-Clause"
] | 65 | 2021-03-13T17:50:03.000Z | 2022-02-22T16:50:02.000Z | experiments/n_tree_experiment.py | pyensemble/wildwood | b261cbd7d0b425b50647f719ab99c1d89f477d5c | [
"BSD-3-Clause"
] | 3 | 2021-03-04T18:44:10.000Z | 2022-01-26T17:28:35.000Z | # Authors: Stephane Gaiffas <stephane.gaiffas@gmail.com>
# License: BSD 3 clause
"""
This script produces Figure 2 from the WildWood's paper.
"""
import sys
import subprocess
from datetime import datetime
import pickle as pkl
import numpy as np
import logging
import pandas as pd
from sklearn.metrics import (
roc_auc_score,
average_precision_score,
log_loss,
accuracy_score,
)
from sklearn.preprocessing import LabelBinarizer
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
sys.path.extend([".", ".."])
from wildwood.datasets import load_adult, load_bank, load_car, load_default_cb
from wildwood.forest import ForestClassifier
loaders = [load_adult, load_bank, load_default_cb, load_car]
random_state = 42
classifiers = [
lambda n: (
"RFW",
RandomForestClassifier(n_estimators=n, n_jobs=-1, random_state=random_state,),
),
lambda n: (
"WildWood",
ForestClassifier(
n_estimators=n, multiclass="ovr", n_jobs=-1, random_state=random_state,
),
),
lambda n: (
"ET",
ExtraTreesClassifier(n_estimators=n, n_jobs=-1, random_state=random_state,),
),
]
logging.basicConfig(
level=logging.INFO, format="%(asctime)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S"
)
data_extraction = {
"RandomForestClassifier": {
"one_hot_encode": True,
"standardize": False,
"drop": None,
"pd_df_categories": False,
},
"WildWood": {
"one_hot_encode": False,
"standardize": False,
"drop": None,
"pd_df_categories": False,
},
"ForestClassifier": {
"one_hot_encode": True,
"standardize": False,
"drop": None,
"pd_df_categories": False,
},
"ExtraTreesClassifier": {
"one_hot_encode": True,
"standardize": False,
"drop": None,
"pd_df_categories": False,
},
}
def fit_kwargs_generator(clf_name, y_train, dataset):
if clf_name == "RandomForestClassifier":
return {}
elif clf_name == "ForestClassifier":
return {"categorical_features": dataset.categorical_features_}
elif clf_name == "ExtraTreesClassifier":
return {}
else:
print("ERROR : NOT Found : ", clf_name)
# Number of time each experiment is repeated, one for each seed (leading
data_random_states = list(range(42, 42 + 2))
# data_random_states = list(range(42, 42 + 20))
col_data = []
col_classifier = []
col_classifier_title = []
col_n_trees = []
col_x_pos = []
col_repeat = []
col_roc_auc = []
col_roc_auc_weighted = []
col_avg_precision_score = []
col_avg_precision_score_weighted = []
col_log_loss = []
col_accuracy = []
n_datasets = None # set to None to use all
n_treess = [1, 2, 5, 10, 20, 50, 100, 200]
for x, n in enumerate(n_treess):
for Clf in classifiers:
clf_title, clf = Clf(n)
clf_name = clf.__class__.__name__
for loader in loaders[:n_datasets]:
dataset = loader()
data_name = dataset.name
task = dataset.task
for key, val in data_extraction[clf_name].items():
setattr(dataset, key, val)
logging.info("-" * 64)
logging.info("Launching task for dataset %r" % dataset)
for repeat, data_random_state in enumerate(data_random_states):
clf_title, clf = Clf(n)
logging.info(
"Repeat: %d random_state: %d" % (repeat, data_random_state)
)
col_data.append(data_name)
col_classifier.append(clf_name)
col_classifier_title.append(clf_title)
col_n_trees.append(n)
col_x_pos.append(x + 1)
col_repeat.append(repeat)
X_train, X_test, y_train, y_test = dataset.extract(
random_state=data_random_state
)
y_test_binary = LabelBinarizer().fit_transform(y_test)
clf.fit(
X_train,
y_train,
**(fit_kwargs_generator(clf_name, y_train, dataset))
)
y_scores = clf.predict_proba(X_test)
y_pred = np.argmax(y_scores, axis=1)
if task == "binary-classification":
roc_auc = roc_auc_score(y_test, y_scores[:, 1])
roc_auc_weighted = roc_auc
avg_precision_score = average_precision_score(
y_test, y_scores[:, 1]
)
avg_precision_score_weighted = avg_precision_score
log_loss_ = log_loss(y_test, y_scores)
accuracy = accuracy_score(y_test, y_pred)
elif task == "multiclass-classification":
roc_auc = roc_auc_score(
y_test, y_scores, multi_class="ovr", average="macro"
)
roc_auc_weighted = roc_auc_score(
y_test, y_scores, multi_class="ovr", average="weighted"
)
avg_precision_score = average_precision_score(
y_test_binary, y_scores
)
avg_precision_score_weighted = average_precision_score(
y_test_binary, y_scores, average="weighted"
)
log_loss_ = log_loss(y_test, y_scores)
accuracy = accuracy_score(y_test, y_pred)
else:
raise ValueError("Task %s not understood" % task)
col_roc_auc.append(roc_auc)
col_roc_auc_weighted.append(roc_auc_weighted)
col_avg_precision_score.append(avg_precision_score)
col_avg_precision_score_weighted.append(avg_precision_score_weighted)
col_log_loss.append(log_loss_)
col_accuracy.append(accuracy)
results = pd.DataFrame(
{
"dataset": col_data,
"classifier": col_classifier,
"classifier_title": col_classifier_title,
"repeat": col_repeat,
"n_trees": col_n_trees,
"x_pos": col_x_pos,
"roc_auc": col_roc_auc,
"roc_auc_w": col_roc_auc_weighted,
"avg_prec": col_avg_precision_score,
"avg_prec_w": col_avg_precision_score_weighted,
"log_loss": col_log_loss,
"accuracy": col_accuracy,
}
)
print(results)
now = datetime.now().strftime("%Y-%m-%d-%H:%M:%S")
# Get the commit number as a string
commit = subprocess.check_output(["git", "rev-parse", "--short", "HEAD"])
commit = commit.decode("utf-8").strip()
with open("experiments/ntrees_experiment_" + now + ".pickle", "wb") as f:
pkl.dump({"datetime": now, "commit": commit, "results": results}, f)
| 31.64977 | 86 | 0.591584 | 795 | 6,868 | 4.772327 | 0.254088 | 0.030047 | 0.05825 | 0.031629 | 0.333421 | 0.294149 | 0.280179 | 0.239589 | 0.148129 | 0.137059 | 0 | 0.008389 | 0.305766 | 6,868 | 216 | 87 | 31.796296 | 0.787332 | 0.044846 | 0 | 0.19209 | 0 | 0 | 0.116236 | 0.018329 | 0 | 0 | 0 | 0 | 0 | 1 | 0.00565 | false | 0 | 0.067797 | 0 | 0.090395 | 0.011299 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b899c323dcd1c6a33e25a6835d01a3040b54d478 | 2,113 | py | Python | pps/cli.py | SeungUkLee/preview-pipfile-script | d28d963f1feee9ed1621a04b25c02d34a0919829 | [
"MIT"
] | null | null | null | pps/cli.py | SeungUkLee/preview-pipfile-script | d28d963f1feee9ed1621a04b25c02d34a0919829 | [
"MIT"
] | null | null | null | pps/cli.py | SeungUkLee/preview-pipfile-script | d28d963f1feee9ed1621a04b25c02d34a0919829 | [
"MIT"
] | null | null | null | """
pss provide CLI that preview Pipfile script and run.
$ pps
$ pps --show
"""
import argparse
import os
from .color import CYAN, ENDC
from .helper import (
exception,
inquirer_prompt,
read_file,
run_script,
toml_parsing,
)
from .message import EXE_SCRIPT_ERR_MSG
@exception
def run_pps_cmd(args, file_path, test=False):
"""
Run 'pps' command
:param args: Arguments to distinguish test or run
:param file_path: Pipfile path.
:param test: Argument to distinguish whether it is a test or not.
:return: opt: CLI option (ex. --show).
res: Result after run script.
err: Whether the error occurred.
"""
scripts = toml_parsing(read_file(file_path))['scripts']
opt, res, err = None, None, None
if args.show:
opt = 'show'
res = [
'{0}: "{1}"'.format(script, cmd)
for script, cmd in sorted(scripts.items())
]
elif not test:
ans = inquirer_prompt(scripts)
if ans is None:
raise KeyboardInterrupt
cmd = ans['cmd']
res = run_script(scripts[cmd])
err = -1 if res != 0 else 1
return opt, res, err
def arg_parser():
"""
Create argument parser.
:return: Parser
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--show', help="show pipfile scripts list", action='store_true'
)
return parser
def main(arg=None, file_path=None):
"""
Main function to be executed by CLI.
:param arg: Arguments for testing
:param file_path: Pipfile path
"""
parser = arg_parser()
args = (
parser.parse_args()
if arg is None
else parser.parse_args(arg.split(' '))
)
if file_path is None:
file_path = '{0}/Pipfile'.format(os.getcwd())
opt, res, err = run_pps_cmd(args, file_path)
if err == -1:
print(EXE_SCRIPT_ERR_MSG)
return
if opt == 'show':
for cmd_and_script in res:
cmd, script = cmd_and_script.split(':')
print('{0}{1}{2}:{3}'.format(CYAN, cmd, ENDC, script))
| 23.741573 | 71 | 0.593942 | 279 | 2,113 | 4.369176 | 0.340502 | 0.052502 | 0.022149 | 0.02461 | 0.073831 | 0.034454 | 0 | 0 | 0 | 0 | 0 | 0.007343 | 0.291055 | 2,113 | 88 | 72 | 24.011364 | 0.806409 | 0.23663 | 0 | 0 | 0 | 0 | 0.062336 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057692 | false | 0 | 0.096154 | 0 | 0.211538 | 0.038462 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b899dc44de3f3a0d0311f27ae992e3f9dd28e6a6 | 10,223 | py | Python | ilearn/easy_meta/Model.py | PTYin/ESRT | 4d3e5c523cef7bd15ea8ce10e5cf8b7e05ad2d5c | [
"BSD-2-Clause"
] | null | null | null | ilearn/easy_meta/Model.py | PTYin/ESRT | 4d3e5c523cef7bd15ea8ce10e5cf8b7e05ad2d5c | [
"BSD-2-Clause"
] | null | null | null | ilearn/easy_meta/Model.py | PTYin/ESRT | 4d3e5c523cef7bd15ea8ce10e5cf8b7e05ad2d5c | [
"BSD-2-Clause"
] | null | null | null | import torch
from torch import nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import learn2learn as l2l
class AttentionLayer(nn.Module):
def __init__(self, input_dim, hidden_dim):
super(AttentionLayer, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.query_projection = nn.Linear(input_dim, input_dim * hidden_dim)
self.reduce_projection = nn.Linear(hidden_dim, 1, bias=False)
def reset_parameters(self):
nn.init.xavier_normal_(self.query_projection.weight)
nn.init.uniform_(self.query_projection.bias)
nn.init.xavier_normal_(self.reduce_projection.weight)
def forward(self, reviews_embedding, query_embedding):
"""
Parameters
-----------
reviews_embedding: shape(batch, input_dim)
query_embedding: shape(1, input_dim) or (input_dim,)
Return
-----------
torch.Tensor: shape(input_dim)
"""
# ------------tanh(W*q+b)------------
projected_query = torch.tanh(self.query_projection(query_embedding))
# shape: (1, input_dim * hidden_dim) or (input_dim * hidden_dim)
projected_query = projected_query.view((self.input_dim, self.hidden_dim))
# shape: (input_dim, hidden_dim)
# ------------r*tanh(W*q+b)------------
reviews_query_dotted_sum = reviews_embedding @ projected_query
# shape: (batch, hidden_dim)
# ------------(r*tanh(W_1*q+b))*W_2------------
reviews_query_reduce_sum = self.reduce_projection(reviews_query_dotted_sum)
# shape: (batch, 1)
weight = torch.softmax(reviews_query_reduce_sum, dim=0)
# # ------------exp((r*tanh(W_1*q+b))*W_2)------------
# shape: (batch, 1)
entity_embedding = torch.sum(weight * reviews_embedding, dim=0)
# shape: (input_dim)
return entity_embedding
class SelfAttentionLayer(AttentionLayer):
def __init__(self, input_dim, hidden_dim):
super(SelfAttentionLayer, self).__init__(input_dim, hidden_dim)
def forward(self, words_embedding: torch.Tensor, **kwargs):
"""
Parameters
-----------
words_embedding: shape(batch, sequence, input_dim)
Return
-----------
torch.Tensor: shape(batch, input_dim)
"""
original_shape = words_embedding.shape
# ------------tanh(W*w)------------
projected_words = torch.tanh(self.query_projection(words_embedding))
# shape: (batch, sequence, input_dim * hidden_dim)
projected_words = projected_words.view((*original_shape, self.hidden_dim))
# shape: (batch, sequence, input_dim, hidden_dim)
reduced_words = torch.squeeze(self.reduce_projection(projected_words), dim=-1)
# shape: (batch, sequence, input_dim)
weight = torch.softmax(reduced_words, dim=1)
# shape: (batch, sequence, input_dim)
entity_embedding = torch.sum(weight * words_embedding, dim=1)
# shape: (batch, input_dim)
return entity_embedding
class Model(nn.Module):
def __init__(self, user_num, item_num, embedding_size):
super(Model, self).__init__()
self.user_num = user_num
self.item_num = item_num
self.embedding_size = embedding_size
self.user_embedding_layer = nn.Embedding(user_num, embedding_size)
self.item_embedding_layer = nn.Embedding(item_num, embedding_size)
self.projection_layer = nn.Linear(embedding_size, 1, bias=False)
self.activation = nn.ReLU()
self.local_parameters: list = [self.projection_layer.weight]
self.global_parameters: list = [self.user_embedding_layer.weight, self.item_embedding_layer.weight]
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.user_embedding_layer.weight, 0.0, 0.01)
nn.init.normal_(self.item_embedding_layer.weight, 0.0, 0.01)
nn.init.xavier_normal_(self.projection_layer.weight)
def set_local(self):
for global_parameters in self.global_parameters:
global_parameters.requires_grad = False
def set_global(self):
for global_parameters in self.global_parameters:
global_parameters.requires_grad = True
def forward(self, user_id: torch.LongTensor, item_id: torch.LongTensor, negative_item_id=None):
user_id = user_id.unsqueeze(dim=0)
item_id = item_id.unsqueeze(dim=0)
user_entity = self.user_embedding_layer(user_id)
item_entity = self.item_embedding_layer(item_id)
positive = user_entity * item_entity
score: torch.Tensor = self.projection_layer(positive)
return score[0, 0]
# class Model(nn.Module):
# def __init__(self, word_num, word_embedding_size, doc_embedding_size,
# attention_hidden_dim):
# super(Model, self).__init__()
# self.word_num = word_num
# self.word_embedding_size = word_embedding_size
# self.doc_embedding_size = doc_embedding_size
# self.attention_hidden_dim = attention_hidden_dim
#
# self.word_embedding_layer = nn.Embedding(self.word_num, self.word_embedding_size, padding_idx=0)
# self.doc_embedding_layer = SelfAttentionLayer(self.word_embedding_size, self.doc_embedding_size)
# # self.doc_embedding_layer = nn.LSTM(input_size=self.word_embedding_size,
# # hidden_size=self.doc_embedding_size,
# # num_layers=1,
# # batch_first=True)
# self.attention_layer = AttentionLayer(self.doc_embedding_size, self.attention_hidden_dim)
# self.personalized_factor = nn.Parameter(torch.tensor([0.0]))
# self.gamma = nn.Parameter(torch.tensor([0.0]))
#
# self.local_parameters: list = [*self.attention_layer.parameters(), self.personalized_factor, self.gamma]
# self.global_parameters: list = [self.word_embedding_layer.weight, *self.doc_embedding_layer.parameters()]
# # self.global_parameters: list = [self.word_embedding_layer.weight, *self.doc_embedding_layer.all_weights[0]]
# # self.local_parameters: list = [self.personalized_factor, self.gamma]
# # self.global_parameters: list = [self.word_embedding_layer.weight]
#
# self.reset_parameters()
#
# def reset_parameters(self):
# nn.init.normal_(self.word_embedding_layer.weight, 0.0, 0.01)
# self.doc_embedding_layer.reset_parameters()
# self.attention_layer.reset_parameters()
# nn.init.uniform_(self.personalized_factor)
# nn.init.uniform_(self.gamma)
#
# def set_local(self):
# for global_parameters in self.global_parameters:
# global_parameters.requires_grad = False
#
# def set_global(self):
# for global_parameters in self.global_parameters:
# global_parameters.requires_grad = True
#
# def embedding(self, words, lengths):
# # word_embedding = pack_padded_sequence(self.word_embedding_layer(words), lengths,
# # batch_first=True, enforce_sorted=False)
# # _, (_, doc_embedding) = self.doc_embedding_layer(word_embedding)
# # return doc_embedding.squeeze(dim=0)
# word_embedding = self.word_embedding_layer(words)
# doc_embedding = self.doc_embedding_layer(word_embedding)
# # print(doc_embedding.shape)
# return doc_embedding
#
# # output, (_, _) = self.doc_embedding_layer(word_embedding)
# # output, _ = pad_packed_sequence(output, batch_first=True)
# # doc_embedding = torch.max(output, dim=1).values
# # return doc_embedding
#
# def forward(self,
# user_reviews_words: torch.LongTensor, user_reviews_lengths: torch.LongTensor,
# item_reviews_words: torch.LongTensor, item_reviews_lengths: torch.LongTensor,
# query: torch.LongTensor,
# mode,
# negative_item_reviews_words: torch.LongTensor = None,
# negative_item_reviews_lengths: torch.LongTensor = None):
# if mode == 'output_embedding':
# item_reviews_embedding = self.embedding(item_reviews_words, item_reviews_lengths)
# query_embedding = self.embedding(query.unsqueeze(dim=0), torch.LongTensor([len(query)]))
# item_entity = self.attention_layer(item_reviews_embedding, query_embedding)
# return item_entity
#
# user_reviews_embedding = self.embedding(user_reviews_words, user_reviews_lengths)
# item_reviews_embedding = self.embedding(item_reviews_words, item_reviews_lengths)
# query_embedding = self.embedding(query.unsqueeze(dim=0), torch.LongTensor([len(query)]))
#
# user_entity = self.attention_layer(user_reviews_embedding, query_embedding)
# item_entity = self.attention_layer(item_reviews_embedding, query_embedding)
# # user_entity = torch.sum(user_reviews_embedding, dim=0)
# # item_entity = torch.sum(item_reviews_embedding, dim=0)
#
# query_embedding = query_embedding.squeeze(dim=0)
# personalized_model = user_entity + self.personalized_factor * query_embedding
#
# # positive = torch.cosine_similarity(personalized_model, item_entity, dim=0, eps=1e-10)
#
# if mode == 'train':
# negative_item_reviews_embedding = self.embedding(negative_item_reviews_words, negative_item_reviews_lengths)
# negative_item_entity = self.attention_layer(negative_item_reviews_embedding, query_embedding)
# # negative_item_entity = torch.sum(negative_item_reviews_embedding, dim=0)
# # negative = torch.cosine_similarity(personalized_model, negative_item_entity, dim=0, eps=1e-10)
# # pair_loss = torch.relu(self.gamma - positive + negative)
# # return torch.relu(pair_loss)
# return personalized_model.unsqueeze(dim=0), item_entity.unsqueeze(dim=0), negative_item_entity.unsqueeze(dim=0)
# elif mode == 'test':
# return personalized_model, item_entity
| 47.110599 | 125 | 0.663015 | 1,207 | 10,223 | 5.26512 | 0.102734 | 0.028954 | 0.030212 | 0.024076 | 0.505901 | 0.354524 | 0.301652 | 0.230055 | 0.190716 | 0.18096 | 0 | 0.007805 | 0.222929 | 10,223 | 216 | 126 | 47.328704 | 0.79217 | 0.601487 | 0 | 0.123077 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0 | 0.061538 | 0 | 0.307692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b89a9f7b2f5d37983704ba39d7303b2b36c67f42 | 9,420 | py | Python | tests/utils/test_config.py | RiverSafeUK/eze-cli | ad1cce9edd2be28e1681b7c7379ac79f94d32bd3 | [
"MIT"
] | 4 | 2021-07-26T18:44:11.000Z | 2021-12-07T19:59:57.000Z | tests/utils/test_config.py | RiverSafeUK/eze-cli | ad1cce9edd2be28e1681b7c7379ac79f94d32bd3 | [
"MIT"
] | 23 | 2021-07-26T16:56:59.000Z | 2022-03-11T15:21:25.000Z | tests/utils/test_config.py | RiverSafeUK/eze-cli | ad1cce9edd2be28e1681b7c7379ac79f94d32bd3 | [
"MIT"
] | 3 | 2021-11-11T15:29:21.000Z | 2021-11-30T10:32:17.000Z | # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring,line-too-long
import copy
from eze.utils.config import (
get_config_key,
extract_embedded_run_type,
get_config_keys,
create_config_help,
merge_configs,
merge_from_root_base,
merge_from_root_flat,
merge_from_root_nested,
)
from eze.utils.io import create_tempfile_path
from tests.__fixtures__.fixture_helper import get_snapshot_directory
def test_extract_embedded_run_type__no_change():
input_plugin_name = "hello"
input_run_type = "world"
expected_output = ["hello", "world"]
output = extract_embedded_run_type(input_plugin_name, input_run_type)
assert output == expected_output
def test_extract_embedded_run_type__colon():
input_plugin_name = "hello:world"
input_run_type = None
expected_output = ["hello", "world"]
output = extract_embedded_run_type(input_plugin_name, input_run_type)
assert output == expected_output
def test_extract_embedded_run_type__underscore():
input_plugin_name = "hello_world"
input_run_type = None
expected_output = ["hello", "world"]
output = extract_embedded_run_type(input_plugin_name, input_run_type)
assert output == expected_output
def test_get_config_key__success():
input_dict = {"hello": "world"}
expected_output = "world"
output = get_config_key(input_dict, "hello", str)
assert output == expected_output
def test_get_config_key__wrong_type_default_case():
input_dict = {"hello": 4}
expected_output = "default_world"
output = get_config_key(input_dict, "hello", str, "default_world")
assert output == expected_output
def test_get_config_key__missing_default_case():
input_dict = {}
expected_output = "default_world"
output = get_config_key(input_dict, "hello", str, "default_world")
assert output == expected_output
def test_get_config_keys__std_case():
# Given
input_dict = {
"SOME_KEY": "1234",
"SOME_LIST": ["dog", "wolf"],
"SOME_DICT": {"cat": "sofia"},
"SOME_UNUSED_KEY": "FOO",
}
input_config = {
"SOME_KEY": {"type": str},
"SOME_LIST": {"type": list},
"SOME_DICT": {"type": dict},
"SOME_MISSING_KEY": {"type": str, "default": "DEFAULT IF MISSING"},
}
expected_output = {
"SOME_DICT": {"cat": "sofia"},
"SOME_KEY": "1234",
"SOME_LIST": ["dog", "wolf"],
"SOME_MISSING_KEY": "DEFAULT IF MISSING",
}
# When
output = get_config_keys(input_dict, input_config)
# Then
assert output == expected_output
def test_get_config_keys__cast_str_as_list():
# Given
input_dict = {"SOME_KEY": "1234"}
input_config = {"SOME_KEY": {"type": list}}
expected_output = {"SOME_KEY": ["1234"]}
# When
output = get_config_keys(input_dict, input_config)
# Then
assert output == expected_output
def test_create_config_help__real_case_snapshot(snapshot):
# Given
input_config = {
"SOURCE": {
"type": str,
"default": ".",
"help_text": """By default it is "." aka local folder
From grype help
Supports the following image sources:
grype yourrepo/yourimage:tag defaults to using images from a Docker daemon
grype path/to/yourproject a Docker tar, OCI tar, OCI directory, or generic filesystem directory
You can also explicitly specify the scheme to use:
grype docker:yourrepo/yourimage:tag explicitly use the Docker daemon
grype docker-archive:path/to/yourimage.tar use a tarball from disk for archives created from "docker save"
grype oci-archive:path/to/yourimage.tar use a tarball from disk for OCI archives (from Podman or otherwise)
grype oci-dir:path/to/yourimage read directly from a path on disk for OCI layout directories (from Skopeo or otherwise)
grype dir:path/to/yourproject read directly from a path on disk (any directory)
grype sbom:path/to/syft.json read Syft JSON from path on disk
grype registry:yourrepo/yourimage:tag pull image directly from a registry (no container runtime required)""",
},
"CONFIG_FILE": {
"type": str,
"help_text": """Grype config file location, by default Empty, maps to grype argument
-c, --config string application config file""",
},
"GRYPE_IGNORE_UNFIXED": {
"type": bool,
"default": False,
"help_text": """if true ignores state = "not-fixed""" "",
},
"REPORT_FILE": {
"type": str,
"default": create_tempfile_path("tmp-grype-report.json"),
"default_help_value": "<tempdir>/.eze-temp/tmp-grype-report.json",
"help_text": "output report location (will default to tmp file otherwise)",
},
}
input_common_config: dict = {
"ADDITIONAL_ARGUMENTS": {
"type": str,
"default": "",
"help_text": "common field that can be used to postfix arbitrary arguments onto any plugin cli tooling",
},
"IGNORE_BELOW_SEVERITY": {
"type": str,
"help_text": """vulnerabilities severities to ignore, by CVE severity level
aka if set to medium, would ignore medium/low/none/na
available levels: critical, high, medium, low, none, na""",
},
"IGNORED_VULNERABILITIES": {
"type": list,
"help_text": """vulnerabilities to ignore, by CVE code or by name
feature only for use when vulnerability mitigated or on track to be fixed""",
},
"IGNORED_FILES": {
"type": list,
"help_text": """vulnerabilities in files or prefix folders to ignore
feature only for use when vulnerability mitigated or on track to be fixed""",
},
"DEFAULT_SEVERITY": {
"type": str,
"help_text": """Severity to set vulnerabilities, when tool doesn't provide a severity, defaults to na
available levels: critical, high, medium, low, none, na""",
},
}
# When
output = create_config_help("anchore-grype", input_config, input_common_config)
# Then
# WARNING: this is a snapshot test, any changes to format will edit this and the snapshot will need to be updated
snapshot.snapshot_dir = get_snapshot_directory()
snapshot.assert_match(output, "core/config__create_config_help--real-case.txt")
def test_create_config_help__std_case_snapshot(snapshot):
# Given
input_config = {
"SOME_KEY": {"type": str},
"SOME_LIST": {"type": list},
"SOME_DICT": {"type": dict},
"SOME_MISSING_KEY": {"type": str, "default": "DEFAULT IF MISSING"},
"HELP_TEST": {"type": str, "help_text": "some misc help text"},
"HELP_EXAMPLE": {"type": str, "help_example": "some-value"},
"HELP_EXAMPLE_BOOL": {"type": bool, "help_example": True},
"HELP_EXAMPLE_LIST": {"type": list, "help_example": ["something"]},
}
# When
output = create_config_help("mctool/jimmy-the-plugin", input_config)
# Then
# WARNING: this is a snapshot test, any changes to format will edit this and the snapshot will need to be updated
snapshot.snapshot_dir = get_snapshot_directory()
snapshot.assert_match(output, "core/config__create_config_help--std-case.txt")
def test_merge_from_root_base__std():
# Given
expected_config = {"hello": "world"}
input_dict = {"xxx-plugin": expected_config}
input_plugin_name = "xxx-plugin"
testee = {}
# When
merge_from_root_base(input_dict, testee, input_plugin_name)
# Then
assert testee == expected_config
def test_merge_from_root_flat__std():
# Given
expected_config = {"hello": "world"}
input_dict = {"xxx-plugin": {}, "xxx-plugin_run1": expected_config}
input_plugin_name = "xxx-plugin"
input_run_type = "run1"
testee = {}
# When
merge_from_root_flat(input_dict, testee, input_plugin_name, input_run_type)
# Then
assert testee == expected_config
def test_merge_from_root_nested__std():
# Given
expected_config = {"hello": "world"}
input_dict = {"xxx-plugin": {"run1": expected_config}, "xxx-plugin_run1": {}}
input_plugin_name = "xxx-plugin"
input_run_type = "run1"
testee = {}
# When
merge_from_root_nested(input_dict, testee, input_plugin_name, input_run_type)
# Then
assert testee == expected_config
def test_merge_configs__simple_merging():
input_dict = {"hello": "world", "foo": {"bar": 1}}
expected_config = copy.deepcopy(input_dict)
testee = {}
merge_configs(input_dict, testee)
assert testee == expected_config
def test_merge_configs__nested_deep_merging():
# Given
inital_dict = {
"hello": "world",
"foo": {"bar": 1, "bar2": "should be same value"},
"old_key": "do not touch me",
}
input_dict = {"hello": "updated world", "foo": {"bar": "new value"}, "new_key": "new key value"}
expected_config = {
"hello": "updated world",
"foo": {"bar": "new value", "bar2": "should be same value"},
"new_key": "new key value",
"old_key": "do not touch me",
}
testee = {}
merge_configs(inital_dict, testee)
# When
merge_configs(input_dict, testee)
# Then
assert testee == expected_config
| 36.091954 | 136 | 0.655308 | 1,190 | 9,420 | 4.893277 | 0.202521 | 0.032458 | 0.030912 | 0.03572 | 0.588185 | 0.500773 | 0.460416 | 0.423665 | 0.404431 | 0.369225 | 0 | 0.003583 | 0.229724 | 9,420 | 260 | 137 | 36.230769 | 0.798925 | 0.048514 | 0 | 0.375 | 0 | 0.015 | 0.385115 | 0.049356 | 0 | 0 | 0 | 0 | 0.075 | 1 | 0.075 | false | 0 | 0.02 | 0 | 0.095 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8a1b95958ff0540fcb7bdf78c8f25a97053ecf9 | 5,068 | py | Python | tests/test_select_columns.py | eikevons/pandas-paddles | 98e519ce847d015b76bc3401d534b8b752dd583d | [
"MIT"
] | 4 | 2022-02-24T09:35:37.000Z | 2022-03-19T19:50:47.000Z | tests/test_select_columns.py | eikevons/pandas-paddles | 98e519ce847d015b76bc3401d534b8b752dd583d | [
"MIT"
] | null | null | null | tests/test_select_columns.py | eikevons/pandas-paddles | 98e519ce847d015b76bc3401d534b8b752dd583d | [
"MIT"
] | null | null | null | import pickle
import pandas as pd
import pytest
from pandas_paddles import C
from pandas_paddles.axis import OpComposerBase
def cols(df: pd.DataFrame, col_sel: OpComposerBase) -> list:
"""Get list of column names after applying column selection."""
return df.loc[:, col_sel].columns.to_list()
@pytest.fixture
def simple_df():
return pd.DataFrame(
{
"x": range(5),
"y": range(5),
"z": list("abcde"),
"u": 1.0,
}
)
@pytest.fixture
def mi_df():
return pd.DataFrame(
columns=pd.MultiIndex.from_product(
[list("abc"), list("XYZ")],
names=["one", "two"],
)
)
def test_basic(simple_df):
col_sel = C["y", "z", "x"]
assert cols(simple_df, col_sel) == ["y", "z", "x"]
def test_combine(simple_df):
col_sel = C["y"] | C["z"] | C["x"]
assert cols(simple_df, col_sel) == ["y", "z", "x"]
def test_ellipsis(simple_df):
col_sel = C["y"] | ...
assert cols(simple_df, col_sel) == ["y", "x", "z", "u"]
def test_startswith(simple_df):
col_sel = C.startswith("x")
assert cols(simple_df, col_sel) == ["x"]
def test_endswith(simple_df):
col_sel = C.endswith("x")
assert cols(simple_df, col_sel) == ["x"]
def test_str_dtype(simple_df):
col_sel = C.dtype == str
assert cols(simple_df, col_sel) == ["z"]
def test_int_dtype(simple_df):
col_sel = C.dtype == int
assert cols(simple_df, col_sel) == ["x", "y"]
def test_dtype_isin(simple_df):
col_sel = C.dtype.isin((str, float))
assert cols(simple_df, col_sel) == ["z", "u"]
def test_level0_subset(mi_df):
expected = [
("c", "X"),
("c", "Y"),
("c", "Z"),
("a", "X"),
("a", "Y"),
("a", "Z"),
]
col_sel = C.levels[0]["c", "a"]
assert cols(mi_df, col_sel) == expected
col_sel = C.levels["one"]["c", "a"]
assert cols(mi_df, col_sel) == expected
def test_level0_str_methods(mi_df):
expected = [
("c", "X"),
("c", "Y"),
("c", "Z"),
]
col_sel = C.levels[0].startswith("c")
assert cols(mi_df, col_sel) == expected
col_sel = C.levels[0].endswith("c")
assert cols(mi_df, col_sel) == expected
col_sel = C.levels[0].contains("c")
assert cols(mi_df, col_sel) == expected
col_sel = C.levels[0].match("c")
assert cols(mi_df, col_sel) == expected
def test_level0_composition(mi_df):
col_sel = C.levels[0]['c'] | ...
expected = [
("c", "X"),
("c", "Y"),
("c", "Z"),
("a", "X"),
("a", "Y"),
("a", "Z"),
("b", "X"),
("b", "Y"),
("b", "Z"),
]
assert cols(mi_df, col_sel) == expected
def test_combine_complex(mi_df):
# Move (c, [X, Y]) to the left
col_sel = (C.levels[0]['c'] & C.levels[1]["X", "Y"]) | ...
expected = [
("c", "X"),
("c", "Y"),
("a", "X"),
("a", "Y"),
("a", "Z"),
("b", "X"),
("b", "Y"),
("b", "Z"),
("c", "Z"),
]
assert cols(mi_df, col_sel) == expected
# Select (b, [Y, X, Z]) to the left
col_sel = (C.levels[0]["b"] & (C.levels[1]["Y"] | ...)) | ...
expected = [
("b", "Y"),
("b", "X"),
("b", "Z"),
("a", "X"),
("a", "Y"),
("a", "Z"),
("c", "X"),
("c", "Y"),
("c", "Z"),
]
assert cols(mi_df, col_sel) == expected
# Select (b, [Y]) + (c, *)
col_sel = (C.levels[0]["b"] & C.levels[1]["Y"]) | C.levels[0]["c"]
expected = [
("b", "Y"),
("c", "X"),
("c", "Y"),
("c", "Z"),
]
assert cols(mi_df, col_sel) == expected
def test_inversion_basic(simple_df):
col_sel = ~C["y"]
assert cols(simple_df, col_sel) == ["x", "z", "u"]
def test_inversion_complex(mi_df):
# Exclude (b, [Y]) + (c, *)
col_sel = ~((C.levels[0]["b"] & C.levels[1]["Y"]) | C.levels[0]["c"])
expected = [
("a", "X"),
("a", "Y"),
("a", "Z"),
("b", "X"),
("b", "Z"),
]
assert cols(mi_df, col_sel) == expected
def test_inversion_composition(mi_df):
# Select (*, ~[Y, Z])
sel_1 = ... & ~C.levels[1]["Y", "Z"]
expected_1 = [
("a", "X"),
("b", "X"),
("c", "X"),
]
assert cols(mi_df, sel_1) == expected_1
# Select (b, *)
sel_2 = C.levels[0]["b"]
expected_2 = [
("b", "X"),
("b", "Y"),
("b", "Z"),
]
assert cols(mi_df, sel_2) == expected_2
# Select (b, *) + (*, ~[Y, Z])
sel_composed = sel_2 | sel_1
expected_composed = [
("b", "X"),
("b", "Y"),
("b", "Z"),
("a", "X"),
("c", "X"),
]
test = cols(mi_df, sel_composed)
assert test == expected_composed
@pytest.mark.parametrize(
"sel",
[
C["y", "z", "x"],
C.dtype.isin((str, float)),
... & ~C.levels[1]["Y", "Z"],
],
)
def test_serializable(sel):
buf = pickle.dumps(sel)
pickle.loads(buf)
| 21.751073 | 73 | 0.463694 | 699 | 5,068 | 3.180258 | 0.120172 | 0.11336 | 0.107962 | 0.11336 | 0.593342 | 0.530814 | 0.492128 | 0.420153 | 0.405308 | 0.390014 | 0 | 0.010144 | 0.299724 | 5,068 | 232 | 74 | 21.844828 | 0.61623 | 0.046369 | 0 | 0.471264 | 0 | 0 | 0.040863 | 0 | 0 | 0 | 0 | 0 | 0.132184 | 1 | 0.109195 | false | 0 | 0.028736 | 0.011494 | 0.155172 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8a339b178f2e73df63318f667560f3eca585446 | 1,089 | py | Python | storeManage/views.py | FreeGodCode/store | 1ea1d6f0d6030fb58bce9a4e2d428342a0c3ad19 | [
"MIT"
] | null | null | null | storeManage/views.py | FreeGodCode/store | 1ea1d6f0d6030fb58bce9a4e2d428342a0c3ad19 | [
"MIT"
] | 1 | 2021-03-05T15:00:38.000Z | 2021-03-05T15:00:38.000Z | storeManage/views.py | FreeGodCode/store | 1ea1d6f0d6030fb58bce9a4e2d428342a0c3ad19 | [
"MIT"
] | null | null | null | import json
from rest_framework.views import APIView
from rest_framework.response import Response
# from django.shortcuts import render
from base.models import UserNow
from . import models
from .serializer import TotalStockSerializer
class TotalStockView(APIView):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.user_now_name = ""
self.area_name = ""
def post(self, request):
data = json.loads(request.body.decode("utf-8"))
user_identify = data['user_now_identify']
user_now = UserNow.objects.get(user_identify=user_identify)
if user_now:
self.user_now_name = user_now.user_name
self.area_name = user_now.area_name
total_stocks = models.TotalStock.objects.filter(totalwarehouse__organization__area_name=self.area_name)
if total_stocks:
total_stocks_serializer = TotalStockSerializer(total_stocks, many=True)
return Response({"total_stocks": total_stocks_serializer.data})
else:
return Response({"message": "未查询到当地仓储信息"})
| 35.129032 | 111 | 0.702479 | 130 | 1,089 | 5.569231 | 0.392308 | 0.06768 | 0.049724 | 0.066298 | 0.088398 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001161 | 0.209366 | 1,089 | 30 | 112 | 36.3 | 0.839721 | 0.03214 | 0 | 0 | 0 | 0 | 0.048479 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.25 | 0 | 0.458333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8a51eeba192d253a6e5e4b742a387fe9e3b8e6a | 856 | py | Python | src/yammy/input.py | xmnlab/pyge | 3c36dd399b0d75c4989e3b04c10a84bf71884949 | [
"BSD-3-Clause"
] | null | null | null | src/yammy/input.py | xmnlab/pyge | 3c36dd399b0d75c4989e3b04c10a84bf71884949 | [
"BSD-3-Clause"
] | null | null | null | src/yammy/input.py | xmnlab/pyge | 3c36dd399b0d75c4989e3b04c10a84bf71884949 | [
"BSD-3-Clause"
] | null | null | null | from typing import Callable, Dict
import pygame
# keys
TYPE_KEYDOWN = pygame.KEYDOWN
KEY_ARROW_UP = pygame.K_UP
KEY_ARROW_DOWN = pygame.K_DOWN
KEY_ARROW_LEFT = pygame.K_LEFT
KEY_ARROW_RIGHT = pygame.K_RIGHT
class Input:
...
class Keyboard(Input):
key_map = {
KEY_ARROW_LEFT: "ARROW-LEFT",
KEY_ARROW_RIGHT: "ARROW-RIGHT",
KEY_ARROW_UP: "ARROW-UP",
KEY_ARROW_DOWN: "ARROW-DOWN",
}
def __init__(self, sprite, callbacks: Dict[str, Callable]):
self.sprite = sprite
self.callbacks = callbacks
def events(self, event, game):
if event.type == TYPE_KEYDOWN:
event_map = self.key_map.get(event.key)
if not event_map:
return
callback = self.callbacks.get(event_map)
if callback:
callback(self.sprite)
| 21.948718 | 63 | 0.626168 | 111 | 856 | 4.54955 | 0.306306 | 0.126733 | 0.039604 | 0.055446 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.280374 | 856 | 38 | 64 | 22.526316 | 0.819805 | 0.004673 | 0 | 0 | 0 | 0 | 0.045882 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.074074 | 0 | 0.296296 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8a7212bda203d8152a52a654efb75f04a412089 | 10,249 | py | Python | shrieker/nethack.py | lmjohns3/shrieker | 6fd41b2c9a049b7739afdd6edcfa2a280ef5752e | [
"MIT"
] | 3 | 2018-05-29T22:12:45.000Z | 2021-07-03T23:47:32.000Z | shrieker/nethack.py | lmjohns3/py-nethack | 6fd41b2c9a049b7739afdd6edcfa2a280ef5752e | [
"MIT"
] | null | null | null | shrieker/nethack.py | lmjohns3/py-nethack | 6fd41b2c9a049b7739afdd6edcfa2a280ef5752e | [
"MIT"
] | 1 | 2018-05-04T23:02:42.000Z | 2018-05-04T23:02:42.000Z | import collections
import logging
import numpy as np
import os
import pty
import random
import re
import select
import tempfile
import vt102
ROWS = 25
COLS = 80
class CMD:
class DIR:
NW, N, NE, E, SE, S, SW, W = 'ykulnjbh'
UP, DOWN = '<>'
APPLY, CLOSE, DROP, EAT, ENGRAVE, FIRE, INVENTORY, OPEN = 'acdeEfio'
PAY, PUTON, QUAFF, QUIVER, READ, REMOVE, SEARCH, THROW = 'pPqQrRst'
TAKEOFF, WIELD, WEAR, EXCHANGE, ZAP, CAST, PICKUP, WAIT = 'TwWxzZ,.'
MORE = '\x0d' # ENTER
KICK = '\x03' # ^D
TELEPORT = '\x14' # ^T
class SPECIAL:
CHAT = '#chat'
DIP = '#dip'
FORCE = '#force'
INVOKE = '#invoke'
JUMP = '#jump'
LOOT = '#loot'
MONSTER = '#monster'
OFFER = '#offer'
PRAY = '#pray'
RIDE = '#ride'
RUB = '#rub'
SIT = '#sit'
TURN = '#turn'
WIPE = '#wipe'
class InventoryItem:
CATEGORIES = ('Amulets', 'Weapons', 'Armor', 'Comestibles',
'Scrolls', 'Spellbooks', 'Potions', 'Rings',
'Wands', 'Tools', 'Gems')
def __init__(self, raw):
self.raw = raw.strip()
def __str__(self):
return self.raw
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self)
@property
def is_cursed(self):
return re.search(r'\bcursed\b', self.raw)
@property
def is_uncursed(self):
return re.search(r'\buncursed\b', self.raw)
@property
def is_blessed(self):
return re.search(r'\bblessed\b', self.raw)
@property
def is_being_worn(self):
return re.search(r'\(being worn\)', self.raw)
@property
def is_in_use(self):
return re.search(r'\((?:in use|lit)\)', self.raw)
@property
def duplicates(self):
m = re.match(r'^(\d+)', self.raw)
if not m:
return 1
return int(m.group(1))
@property
def charges(self):
m = re.match(r'\((\d+):(\d+)\)', self.raw)
if not m:
return None, None
return int(m.group(1)), int(m.group(2))
@property
def enchantment(self):
m = re.match(r' ([-+]\d+) ', self.raw)
if not m:
return None
return int(m.group(1))
@property
def named(self):
m = re.match(r' named ([^\(]+)', self.raw)
if not m:
return None
return m.group(1)
class AmuletsItem(InventoryItem):
pass
class ArmorItem(InventoryItem):
pass
class WeaponsItem(InventoryItem):
@property
def is_wielded(self):
return re.search(r'\(weapon in hands?\)', self.raw)
@property
def is_alternate(self):
return re.search(r'\(alternate weapon; not wielded\)', self.raw)
@property
def is_quivered(self):
return re.search(r'\(in quiver\)', self.raw)
class ComestiblesItem(InventoryItem):
pass
class ScrollsItem(InventoryItem):
pass
class SpellbooksItem(InventoryItem):
pass
class PotionsItem(InventoryItem):
pass
class RingsItem(InventoryItem):
pass
class WandsItem(InventoryItem):
pass
class ToolsItem(InventoryItem):
pass
class GemsItem(InventoryItem):
pass
class Player:
OPTIONS = ('CHARACTER={character}\n'
'OPTIONS=hilite_pet,pickup_types:$?+!=/,'
'gender:{gender},race:{race},align:{align}')
def __init__(self, **kwargs):
self._stream = vt102.stream()
self._screen = vt102.screen((ROWS, COLS))
self._screen.attach(self._stream)
self._need_inventory = True
self._has_more = False
self._command = None
self.messages = collections.deque(maxlen=1000)
self.stats = {}
self.inventory = {}
self.spells = {}
opts = dict(character=random.choice('bar pri ran val wiz'.split()),
gender=random.choice('mal fem'.split()),
race=random.choice('elf hum'.split()),
align=random.choice('cha neu'.split()))
opts.update(kwargs)
handle = tempfile.NamedTemporaryFile()
handle.write(self.OPTIONS.format(**opts).encode('utf-8'))
handle.flush()
os.environ['NETHACKOPTIONS'] = '@' + handle.name
def play(self):
pty.spawn(['nethack'], self._observe, self._act)
def choose_action(self):
raise NotImplementedError
def choose_answer(self):
raise NotImplementedError
def neighborhood(self, radius=3):
x, y = self._screen.cursor()
ylo, yhi = y - radius, y + radius + 1
xlo, xhi = x - radius, x + radius + 1
ulo, uhi = 0, 2 * radius + 1
vlo, vhi = 0, 2 * radius + 1
if y < radius:
ylo, ulo = 0, radius - y
if x < radius:
xlo, vlo = 0, radius - x
if y > ROWS - radius:
yhi, uhi = ROWS, radius - (ROWS - y)
if x > COLS - radius:
xhi, vhi = COLS, radius - (COLS - x)
hood = np.zeros((2 * radius + 1, 2 * radius + 1), np.uint8)
hood[ulo:uhi, vlo:vhi] = self._screen.display[ylo:yhi][xlo:xhi]
return hood
def _parse_inventory(self, raw):
found_inventory = False
for category in InventoryItem.CATEGORIES:
klass = eval('%sItem' % category)
contents = self.inventory.setdefault(category, {})
i = raw.find(category.encode('utf-8'))
if i > 0:
s = raw[i:].split(b'\x1b[7m')[0]
for letter, name in re.findall(br' (\w) - (.*?)(?=\x1b\[)', s):
contents[letter.decode('utf-8')] = klass(name.decode('utf-8'))
logging.error('inventory for %s: %s', category, contents)
found_inventory = True
self._need_inventory = not found_inventory
def _parse_glyphs(self, raw):
self._stream.process(raw)
logging.info('current map:\n%s', self._screen.display)
logging.warn('current neighborhood:\n%s', '\n'.join(
''.join(chr(c) for c in r) for r in self.neighborhood(3)))
self._parse_message()
self._parse_attributes()
self._parse_stats()
def _parse_message(self):
'''Parse a message from the first line on the screen.'''
l = self._screen.display[0]
if l.strip() and l[0].strip():
logging.warn('message: %s', l)
self.messages.append(l)
def _parse_attributes(self):
'''Parse character attributes.'''
l = self._screen.display[22]
m = re.search(r'St:(?P<st>[/\d]+)\s*'
r'Dx:(?P<dx>\d+)\s*'
r'Co:(?P<co>\d+)\s*'
r'In:(?P<in>\d+)\s*'
r'Wi:(?P<wi>\d+)\s*'
r'Ch:(?P<ch>\d+)\s*'
r'(?P<align>\S+)', l)
if m:
self.attributes = m.groupdict()
logging.warn('parsed attributes: %s', ', '.join('%s: %s' % (
k, self.attributes[k]) for k in sorted(self.attributes)))
def _parse_stats(self):
'''Parse stats from the penultimate line.'''
l = self._screen.display[23]
m = re.search(r'Dlvl:(?P<dlvl>\S+)\s*'
r'\$:(?P<money>\d+)\s*'
r'HP:(?P<hp>\d+)\((?P<hp_max>\d+)\)\s*'
r'Pw:(?P<pw>\d+)\((?P<pw_max>\d+)\)\s*'
r'AC:(?P<ac>\d+)\s*'
r'Exp:(?P<exp>\d+)\s*'
r'(?P<hunger>Satiated|Hungry|Weak|Fainting)?\s*'
r'(?P<stun>Stun)?\s*'
r'(?P<conf>Conf)?\s*'
r'(?P<blind>Blind)?\s*'
r'(?P<burden>Burdened|Stressed|Strained|Overtaxed|Overloaded)?\s*'
r'(?P<hallu>Hallu)?\s*', l)
if m:
self.stats = m.groupdict()
for k, v in self.stats.items():
if v and v.isdigit():
self.stats[k] = int(v)
logging.warn('parsed stats: %s', ', '.join(
'%s: %s' % (k, self.stats[k]) for k in sorted(self.stats)))
def _observe(self, raw):
self._parse_glyphs(raw)
if self._command is CMD.INVENTORY:
if not self._has_more:
self.inventory = {}
self._parse_inventory(raw)
self._command = None
self._has_more = b'--More--' in raw or b'(end)' in raw
def _act(self):
msg = self.messages and self.messages[-1] or ''
if self._has_more:
self._command = CMD.MORE
elif 'You die' in msg:
self._command = 'q'
elif '? ' in msg and ' written ' not in msg:
self._command = self.choose_answer()
elif self._need_inventory:
self._command = CMD.INVENTORY
else:
self._command = self.choose_action()
logging.warn('sending command "%s"', self._command)
return self._command
class RandomMover(Player):
def choose_answer(self):
return 'n'
def choose_action(self):
return random.choice([
CMD.DIR.N, CMD.DIR.NE, CMD.DIR.E, CMD.DIR.SE,
CMD.DIR.S, CMD.DIR.SW, CMD.DIR.W, CMD.DIR.NW,
])
# drain all available bytes from the given file descriptor, until a complete
# timeout goes by with no new data.
def _drain(fd, timeout=0.3):
more, _, _ = select.select([fd], [], [], timeout)
buf = b''
while more:
buf += os.read(fd, 1024)
more, _, _ = select.select([fd], [], [], timeout)
return buf
# we almost want to do what pty.spawn does, except that we know how our child
# process works. so, we forever loop: read world state from nethack, then issue
# an action to nethack. repeat.
def _copy(fd, observe, act):
while True:
buf = _drain(fd)
if buf:
observe(buf)
os.write(1, buf)
pty._writen(fd, act().encode('utf-8'))
# monkeys ahoy !
pty._copy = _copy
if __name__ == '__main__':
#import sys
logging.basicConfig(
stream=open('/tmp/nethack-bot.log', 'w'),
level=logging.DEBUG,
format='%(levelname).1s %(asctime)s %(message)s')
rm = RandomMover()
rm.play()
| 28.312155 | 88 | 0.531759 | 1,278 | 10,249 | 4.167449 | 0.292645 | 0.023658 | 0.006196 | 0.027037 | 0.1181 | 0.070222 | 0.033233 | 0.024221 | 0.012392 | 0.012392 | 0 | 0.009989 | 0.316226 | 10,249 | 361 | 89 | 28.390582 | 0.75 | 0.043712 | 0 | 0.16 | 0 | 0 | 0.13349 | 0.031097 | 0 | 0 | 0 | 0 | 0 | 1 | 0.112727 | false | 0.036364 | 0.036364 | 0.043636 | 0.312727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8a81e5676fc558bd50c791fc9fad3d4fde69a22 | 1,652 | py | Python | jiant/tasks/lib/newsqa.py | yzpang/jiant | 192d6b525c06f33010b59044df40cb86bbfba4ea | [
"MIT"
] | 1,108 | 2019-04-22T09:19:19.000Z | 2022-03-31T13:23:51.000Z | jiant/tasks/lib/newsqa.py | yzpang/jiant | 192d6b525c06f33010b59044df40cb86bbfba4ea | [
"MIT"
] | 737 | 2019-04-22T14:30:36.000Z | 2022-03-31T22:22:17.000Z | jiant/tasks/lib/newsqa.py | yzpang/jiant | 192d6b525c06f33010b59044df40cb86bbfba4ea | [
"MIT"
] | 273 | 2019-04-23T01:42:11.000Z | 2022-03-25T15:59:38.000Z | from dataclasses import dataclass
from jiant.shared.constants import PHASE
from jiant.tasks.lib.templates.squad_style import core as squad_style_template
from jiant.utils.python.io import read_jsonl
@dataclass
class Example(squad_style_template.Example):
def tokenize(self, tokenizer):
raise NotImplementedError("SQuaD is weird")
@dataclass
class DataRow(squad_style_template.DataRow):
pass
@dataclass
class Batch(squad_style_template.Batch):
pass
class NewsQATask(squad_style_template.BaseSquadStyleTask):
Example = Example
DataRow = DataRow
Batch = Batch
def get_train_examples(self):
return self.read_examples(path=self.train_path, set_type=PHASE.TRAIN)
def get_val_examples(self):
return self.read_examples(path=self.val_path, set_type=PHASE.VAL)
@classmethod
def read_examples(cls, path: str, set_type: str):
examples = []
for entry in read_jsonl(path):
for qa in entry["qas"]:
answer_text = entry["text"][qa["answer"]["s"] : qa["answer"]["e"]]
examples.append(
Example(
qas_id=f"{set_type}-{len(examples)}",
question_text=qa["question"],
context_text=entry["text"],
answer_text=answer_text,
start_position_character=qa["answer"]["s"],
title="",
is_impossible=False,
answers=[{"answer_start": qa["answer"]["s"], "text": answer_text}],
)
)
return examples
| 30.592593 | 91 | 0.600484 | 184 | 1,652 | 5.190217 | 0.375 | 0.062827 | 0.094241 | 0.046073 | 0.087958 | 0.087958 | 0.087958 | 0.087958 | 0 | 0 | 0 | 0 | 0.296005 | 1,652 | 53 | 92 | 31.169811 | 0.821152 | 0 | 0 | 0.121951 | 0 | 0 | 0.062349 | 0.015739 | 0 | 0 | 0 | 0 | 0 | 1 | 0.097561 | false | 0.04878 | 0.097561 | 0.04878 | 0.439024 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8aa75aa7ac1131d3f2375c96446f080992fa887 | 19,562 | py | Python | api/api/problem.py | stuyCTF/stuyCTF-Platform | 992f2eebb78b6cca72c46d9de1585558a36fa94e | [
"MIT"
] | 2 | 2015-05-22T12:36:46.000Z | 2017-03-30T16:38:41.000Z | api/api/problem.py | stuyCTF/stuyCTF-Platform | 992f2eebb78b6cca72c46d9de1585558a36fa94e | [
"MIT"
] | null | null | null | api/api/problem.py | stuyCTF/stuyCTF-Platform | 992f2eebb78b6cca72c46d9de1585558a36fa94e | [
"MIT"
] | 3 | 2015-12-16T20:57:44.000Z | 2020-07-14T17:29:36.000Z | """ Module for interacting with the problems """
import imp
import pymongo
import api
from datetime import datetime
from api.common import validate, check, safe_fail, InternalException, SevereInternalException, WebException
from voluptuous import Schema, Length, Required, Range
from bson import json_util
from os.path import join, isfile
from collections import Counter
from api.annotations import log_action
grader_base_path = "./graders"
submission_schema = Schema({
Required("tid"): check(
("This does not look like a valid tid.", [str, Length(max=100)])),
Required("pid"): check(
("This does not look like a valid pid.", [str, Length(max=100)])),
Required("key"): check(
("This does not look like a valid key.", [str, Length(max=300)]))
})
problem_schema = Schema({
Required("name"): check(
("The problem's display name must be a string.", [str])),
Required("score"): check(
("Score must be a positive integer.", [int, Range(min=0)])),
Required("category"): check(
("Category must be a string.", [str])),
Required("grader"): check(
("The grader path must be a string.", [str])),
Required("description"): check(
("The problem description must be a string.", [str])),
Required("threshold"): check(
("Threshold must be a positive integer.", [int, Range(min=0)])),
"disabled": check(
("A problem's disabled state is either True or False.", [
lambda disabled: type(disabled) == bool])),
"autogen": check(
("A problem should either be autogenerated or not, True/False", [
lambda autogen: type(autogen) == bool])),
"related_problems": check(
("Related problems should be a list of related problems.", [list])),
"pid": check(
("You should not specify a pid for a problem.", [lambda _: False])),
"weightmap": check(
("Weightmap should be a dict.", [dict])),
"tags": check(
("Tags must be described as a list.", [list])),
"hint": check(
("A hint must be a string.", [str])),
"generator": check(
("A generator must be a path.", [str])),
"_id": check(
("Your problems should not already have _ids.", [lambda id: False]))
})
def get_all_categories(show_disabled=False):
"""
Gets the set of distinct problem categories.
Args:
show_disabled: Whether to include categories that are only on disabled problems
Returns:
The set of distinct problem categories.
"""
db = api.common.get_conn()
match = {}
if not show_disabled:
match.update({"disabled": False})
return db.problems.find(match).distinct("category")
#TODO: Sanity checks for autogen
def analyze_problems():
"""
Checks the sanity of inserted problems.
Includes weightmap and grader verification.
Returns:
A list of error strings describing the problems.
"""
grader_missing_error = "{}: Missing grader at '{}'."
unknown_weightmap_pid = "{}: Has weightmap entry '{}' which does not exist."
problems = get_all_problems()
errors = []
for problem in problems:
if not isfile(join(grader_base_path, problem["grader"])):
errors.append(grader_missing_error.format(problem["name"], problem["grader"]))
for pid in problem["weightmap"].keys():
if safe_fail(get_problem, pid=pid) is None:
errors.append(unknown_weightmap_pid.format(problem["name"], pid))
return errors
def insert_problem(problem):
"""
Inserts a problem into the database. Does sane validation.
Args:
Problem dict.
score: points awarded for completing the problem.
category: problem's category
description: description of the problem.
grader: path relative to grader_base_path
threshold: Amount of points necessary for a team to unlock this problem.
Optional:
disabled: True or False. Defaults to False.
hint: hint for completing the problem.
tags: list of problem tags.
relatedproblems: list of related problems.
weightmap: problem's unlock weightmap
autogen: Whether or not the problem will be auto generated.
Returns:
The newly created problem id.
"""
db = api.common.get_conn()
validate(problem_schema, problem)
problem["disabled"] = problem.get("disabled", False)
problem["pid"] = api.common.hash(problem["name"])
weightmap = {}
if problem.get("weightmap"):
for name, weight in problem["weightmap"].items():
name_hash = api.common.hash(name)
weightmap[name_hash] = weight
problem["weightmap"] = weightmap
if safe_fail(get_problem, pid=problem["pid"]) is not None:
raise WebException("Problem with identical pid already exists.")
if safe_fail(get_problem, name=problem["name"]) is not None:
raise WebException("Problem with identical name already exists.")
db.problems.insert(problem)
api.cache.fast_cache.clear()
return problem["pid"]
def remove_problem(pid):
"""
Removes a problem from the given database.
Args:
pid: the pid of the problem to remove.
Returns:
The removed problem object.
"""
db = api.common.get_conn()
problem = get_problem(pid=pid)
db.problems.remove({"pid": pid})
api.cache.fast_cache.clear()
return problem
def update_problem(pid, updated_problem):
"""
Updates a problem with new properties.
Args:
pid: the pid of the problem to update.
updated_problem: an updated problem object.
Returns:
The updated problem object.
"""
db = api.common.get_conn()
if updated_problem.get("name", None) is not None:
if safe_fail(get_problem, name=updated_problem["name"]) is not None:
raise WebException("Problem with identical name already exists.")
problem = get_problem(pid=pid, show_disabled=True).copy()
problem.update(updated_problem)
# pass validation by removing/readding pid
problem.pop("pid", None)
validate(problem_schema, problem)
problem["pid"] = pid
db.problems.update({"pid": pid}, problem)
api.cache.fast_cache.clear()
return problem
def search_problems(*conditions):
"""
Aggregates all problems that contain all of the given properties from the list specified.
Args:
conditions: multiple mongo queries to search.
Returns:
The list of matching problems.
"""
db = api.common.get_conn()
return list(db.problems.find({"$or": list(conditions)}, {"_id":0}))
def insert_problem_from_json(blob):
"""
Converts json blob of problem(s) into dicts. Runs insert_problem on each one.
See insert_problem for more information.
Returns:
A list of the created problem pids if an array of problems is specified.
"""
result = json_util.loads(blob)
if type(result) == list:
return [insert_problem(problem) for problem in result]
elif type(result) == dict:
return insert_problem(result)
else:
raise InternalException("JSON blob does not appear to be a list of problems or a single problem.")
@api.cache.memoize(timeout=60, fast=True)
def get_grader(pid):
"""
Returns the grader module for a given problem.
Args:
pid: the problem id
Returns:
The grader module
"""
try:
path = get_problem(pid=pid, show_disabled=True)["grader"]
return imp.load_source(path[:-3], join(grader_base_path, path))
except FileNotFoundError:
raise InternalException("Problem grader for {} is offline.".format(get_problem(pid=pid)['name']))
def grade_problem(pid, key, tid=None):
"""
Grades the problem with its associated grader script.
Args:
tid: tid if provided
pid: problem's pid
key: user's submission
Returns:
A dict.
correct: boolean
points: number of points the problem is worth.
message: message returned from the grader.
"""
if tid is None:
tid = api.user.get_user()["tid"]
#If the problem is autogenerated, let
#api.autogen deal with it.
if api.autogen.is_autogen_problem(pid):
return api.autogen.grade_problem_instance(pid, tid, key)
problem = get_problem(pid=pid, show_disabled=True)
grader = get_grader(pid)
(correct, message) = grader.grade(tid, key)
return {
"correct": correct,
"points": problem["score"],
"message": message
}
@log_action
def submit_key(tid, pid, key, uid=None, ip=None):
"""
User problem submission. Problem submission is inserted into the database.
Args:
tid: user's team id
pid: problem's pid
key: answer text
uid: user's uid
Returns:
A dict.
correct: boolean
points: number of points the problem is worth.
message: message returned from the grader.
"""
db = api.common.get_conn()
validate(submission_schema, {"tid": tid, "pid": pid, "key": key})
if pid not in get_unlocked_pids(tid):
raise InternalException("You can't submit flags to problems you haven't unlocked.")
if pid in get_solved_pids(tid=tid):
exp = WebException("You have already solved this problem.")
exp.data = {'code': 'solved'}
raise exp
user = api.user.get_user(uid=uid)
if user is None:
raise InternalException("User submitting flag does not exist.")
uid = user["uid"]
result = grade_problem(pid, key, tid)
problem = get_problem(pid=pid)
eligibility = api.team.get_team(tid=tid)['eligible']
submission = {
'uid': uid,
'tid': tid,
'timestamp': datetime.utcnow(),
'pid': pid,
'ip': ip,
'key': key,
'eligible': eligibility,
'category': problem['category'],
'correct': result['correct']
}
if (key, pid) in [(submission["key"], submission["pid"]) for submission in get_submissions(tid=tid)]:
exp = WebException("You or one of your teammates has already tried this solution.")
exp.data = {'code': 'repeat'}
raise exp
db.submissions.insert(submission)
if submission["correct"]:
api.cache.invalidate_memoization(api.stats.get_score, {"kwargs.tid":tid}, {"kwargs.uid":uid})
api.cache.invalidate_memoization(get_unlocked_pids, {"args":tid})
api.cache.invalidate_memoization(get_solved_pids, {"kwargs.tid":tid}, {"kwargs.uid":uid})
api.cache.invalidate_memoization(get_all_problem_solves, {})
api.cache.invalidate_memoization(api.stats.get_score_progression, {"kwargs.tid":tid}, {"kwargs.uid":uid})
return result
def count_submissions(pid=None, uid=None, tid=None, category=None, correctness=None, eligibility=None):
db = api.common.get_conn()
match = {}
if uid is not None:
match.update({"uid": uid})
elif tid is not None:
match.update({"tid": tid})
if pid is not None:
match.update({"pid": pid})
if category is not None:
match.update({"category": category})
if correctness is not None:
match.update({"correct": correctness})
if eligibility is not None:
match.update({"eligible": eligibility})
return db.submissions.find(match, {"_id": 0}).count()
def get_submissions(pid=None, uid=None, tid=None, category=None, correctness=None, eligibility=None):
"""
Gets the submissions from a team or user.
Optional filters of pid or category.
Args:
uid: the user id
tid: the team id
category: category filter.
pid: problem filter.
correctness: correct filter
Returns:
A list of submissions from the given entity.
"""
db = api.common.get_conn()
match = {}
if uid is not None:
match.update({"uid": uid})
elif tid is not None:
match.update({"tid": tid})
if pid is not None:
match.update({"pid": pid})
if category is not None:
match.update({"category": category})
if correctness is not None:
match.update({"correct": correctness})
if eligibility is not None:
match.update({"eligible": eligibility})
return list(db.submissions.find(match, {"_id":0}))
def clear_all_submissions():
"""
Removes all submissions from the database.
"""
db = api.common.get_conn()
db.submissions.remove()
def clear_submissions(uid=None, tid=None, pid=None):
"""
Clear submissions for a given team, user, or problems.
Args:
uid: the user's uid to clear from.
tid: the team's tid to clear from.
pid: the pid to clear from.
"""
db = api.common.get_conn()
match = {}
if pid is not None:
match.update({"pid", pid})
elif uid is not None:
match.update({"uid": uid})
elif tid is not None:
match.update({"tid": tid})
else:
raise InternalException("You must supply either a tid, uid, or pid")
return db.submissions.remove(match)
def invalidate_submissions(pid=None, uid=None, tid=None):
"""
Invalidates the submissions for a given problem. Can be filtered by uid or tid.
Passing no arguments will invalidate all submissions.
Args:
pid: the pid of the problem.
uid: the user's uid that will his submissions invalidated.
tid: the team's tid that will have their submissions invalidated.
"""
db = api.common.get_conn()
match = {}
if pid is not None:
match.update({"pid": pid})
if uid is not None:
match.update({"uid": uid})
elif tid is not None:
match.update({"tid": tid})
db.submissions.update(match, {"correct": False})
def reevaluate_submissions_for_problem(pid):
"""
In the case of the problem or grader being updated, this will reevaluate submissions for a problem.
Args:
pid: the pid of the problem to be reevaluated.
"""
db = api.common.get_conn()
get_problem(pid=pid, show_disabled=True)
keys = {}
for submission in get_submissions(pid=pid):
key = submission["key"]
if key not in keys:
result = grade_problem(pid, key, submission["tid"])
if result["correct"] != submission["correct"]:
keys[key] = result["correct"]
else:
keys[key] = None
for key, change in keys.items():
if change is not None:
db.submissions.update({"key": key}, {"$set": {"correct": change}}, multi=True)
def reevaluate_all_submissions():
"""
In the case of the problem or grader being updated, this will reevaluate all submissions.
"""
api.cache.clear_all()
for problem in get_all_problems(show_disabled=True):
reevaluate_submissions_for_problem(problem["pid"])
@api.cache.memoize(timeout=60, fast=True)
def get_problem(pid=None, name=None, tid=None, show_disabled=False):
"""
Gets a single problem.
Args:
pid: The problem id
name: The name of the problem
show_disabled: Boolean indicating whether or not to show disabled problems.
Returns:
The problem dictionary from the database
"""
db = api.common.get_conn()
match = {}
if pid is not None:
match.update({'pid': pid})
elif name is not None:
match.update({'name': name})
else:
raise InternalException("Must supply pid or display name")
if tid is not None and pid not in get_unlocked_pids(tid):
raise InternalException("You cannot get this problem")
if not show_disabled:
match.update({"disabled": False})
db = api.common.get_conn()
problem = db.problems.find_one(match, {"_id":0})
if problem is None:
raise SevereInternalException("Could not find problem! You gave " + str(match))
return problem
def get_all_problems(category=None, show_disabled=False):
"""
Gets all of the problems in the database.
Args:
category: Optional parameter to restrict which problems are returned
show_disabled: Boolean indicating whether or not to show disabled problems.
Returns:
List of problems from the database
"""
db = api.common.get_conn()
match = {}
if category is not None:
match.update({'category': category})
if not show_disabled:
match.update({'disabled': False})
return list(db.problems.find(match, {"_id":0}).sort('score', pymongo.ASCENDING))
@api.cache.memoize()
def get_all_problem_solves():
"""
Gets the number of solves for all problems
"""
db = api.common.get_conn()
match = {'correct': True}
correct_submissions = db.submissions.find(match, {"_id":0})
solves = {}
for submission in correct_submissions:
if submission["pid"] in solves:
if submission["tid"] not in solves[submission["pid"]]:
solves[submission["pid"]].append(submission["tid"])
else:
solves[submission["pid"]] = [submission["tid"]]
solves = {problem: len(solves[problem]) for problem in solves}
return solves
@api.cache.memoize()
def get_solved_pids(tid=None, uid=None, category=None):
"""
Gets the solved pids for a given team or user.
Args:
tid: The team id
category: Optional parameter to restrict which problems are returned
Returns:
List of solved problem ids
"""
return list(set([sub['pid'] for sub in get_submissions(tid=tid, uid=uid, category=category, correctness=True)]))
def get_solved_problems(tid=None, uid=None, category=None):
"""
Gets the solved problems for a given team or user.
Args:
tid: The team id
category: Optional parameter to restrict which problems are returned
Returns:
List of solved problem dictionaries
"""
return [get_problem(pid=pid) for pid in get_solved_pids(tid=tid, uid=uid, category=category)]
@api.cache.memoize()
def get_unlocked_pids(tid, category=None):
"""
Gets the unlocked pids for a given team.
Args:
tid: The team id
category: Optional parameter to restrict which problems are returned
Returns:
List of unlocked problem ids
"""
solved = get_solved_problems(tid=tid, category=category)
unlocked = []
for problem in get_all_problems():
if 'weightmap' not in problem or 'threshold' not in problem:
unlocked.append(problem['pid'])
else:
weightsum = sum(problem['weightmap'].get(p['pid'], 0) for p in solved)
if weightsum >= problem['threshold']:
unlocked.append(problem['pid'])
return unlocked
def get_unlocked_problems(tid, category=None):
"""
Gets the unlocked problems for a given team.
Args:
tid: The team id
category: Optional parameter to restrict which problems are returned
Returns:
List of unlocked problem dictionaries
"""
solved = get_solved_problems(tid=tid)
problems = get_all_problems()
unlocked = []
for problem in problems:
if api.autogen.is_autogen_problem(problem["pid"]):
problem.update(api.autogen.get_problem_instance(problem["pid"], tid))
problem['solved'] = problem in solved
problem['solves'] = len(api.stats.get_problem_solves(pid=problem["pid"]))
unlocked.append(problem)
return unlocked
| 29.284431 | 116 | 0.638176 | 2,520 | 19,562 | 4.87619 | 0.125 | 0.022786 | 0.019775 | 0.023926 | 0.423503 | 0.360189 | 0.293132 | 0.275065 | 0.223958 | 0.193604 | 0 | 0.001565 | 0.248799 | 19,562 | 667 | 117 | 29.328336 | 0.834638 | 0.251406 | 0 | 0.311728 | 0 | 0 | 0.14931 | 0 | 0 | 0 | 0 | 0.001499 | 0 | 1 | 0.074074 | false | 0 | 0.030864 | 0 | 0.17284 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8aa79c9a980e1c8dcedff46c09d50cb167d5e66 | 763 | py | Python | Bio-StrongHold/src/Identifying_Reversing_Substitutions.py | crf1111/Bio-Informatics-Learning | 2ccc02d7a23584c12aee44c5620160cdcaf70bd4 | [
"MIT"
] | 1 | 2018-10-10T19:03:52.000Z | 2018-10-10T19:03:52.000Z | Bio-StrongHold/src/Identifying_Reversing_Substitutions.py | crf1111/Bio-Informatics-Learning | 2ccc02d7a23584c12aee44c5620160cdcaf70bd4 | [
"MIT"
] | null | null | null | Bio-StrongHold/src/Identifying_Reversing_Substitutions.py | crf1111/Bio-Informatics-Learning | 2ccc02d7a23584c12aee44c5620160cdcaf70bd4 | [
"MIT"
] | null | null | null | import newick
from Bio import SeqIO
from StringIO import StringIO
def find_rev(t,dnas):
r = []
for i in range(len(dnas[t.u])):
r += [(p[0],p[-1],i,dnas[p[0].u][i]) for p in t.find_rev(dnas,i)]
return r
if __name__ == '__main__':
with open('data/data.dat') as f:
nw = f.readline()
nw.split()
tree = newick.read(StringIO(nw))
fst = f.read()
fst = StringIO(fst)
dnas,_ = SeqIO.parse(fst,'fasta')
nodes = tree.nodes()
for node in nodes:
revs = find_rev(node,dnas)
for fc, dest, pos, mid in revs:
print("%s %s %d %s->%s->%s" % (fc.u, dest.u, pos + 1, dnas[node.u][pos], mid, dnas[dest.u][pos]))
assert(dnas[node.u][pos] == dnas[dest.u][pos]) | 26.310345 | 109 | 0.537353 | 126 | 763 | 3.15873 | 0.396825 | 0.050251 | 0.060302 | 0.060302 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007246 | 0.27654 | 763 | 29 | 110 | 26.310345 | 0.713768 | 0 | 0 | 0 | 0 | 0 | 0.058901 | 0 | 0 | 0 | 0 | 0 | 0.045455 | 1 | 0.045455 | false | 0 | 0.136364 | 0 | 0.227273 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8ab7a3fa213ce4e42e666cc2f252f5696e60995 | 531 | py | Python | duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/2_features/numtrees_30/rule_29.py | apcarrik/kaggle | 6e2d4db58017323e7ba5510bcc2598e01a4ee7bf | [
"MIT"
] | null | null | null | duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/2_features/numtrees_30/rule_29.py | apcarrik/kaggle | 6e2d4db58017323e7ba5510bcc2598e01a4ee7bf | [
"MIT"
] | null | null | null | duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/2_features/numtrees_30/rule_29.py | apcarrik/kaggle | 6e2d4db58017323e7ba5510bcc2598e01a4ee7bf | [
"MIT"
] | null | null | null | def findDecision(obj): #obj[0]: Coupon, obj[1]: Education
# {"feature": "Coupon", "instances": 34, "metric_value": 0.9597, "depth": 1}
if obj[0]>1:
# {"feature": "Education", "instances": 24, "metric_value": 0.8709, "depth": 2}
if obj[1]>1:
return 'True'
elif obj[1]<=1:
return 'True'
else: return 'True'
elif obj[0]<=1:
# {"feature": "Education", "instances": 10, "metric_value": 0.971, "depth": 2}
if obj[1]<=1:
return 'False'
elif obj[1]>1:
return 'True'
else: return 'True'
else: return 'False'
| 29.5 | 81 | 0.600753 | 80 | 531 | 3.95 | 0.3125 | 0.063291 | 0.063291 | 0.139241 | 0.531646 | 0.518987 | 0.329114 | 0.208861 | 0.208861 | 0 | 0 | 0.084668 | 0.177024 | 531 | 17 | 82 | 31.235294 | 0.638444 | 0.495292 | 0 | 0.357143 | 0 | 0 | 0.113636 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0 | 0 | 0.357143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8aca41ccaf83212d24fd60dba794a9d3fe4792b | 4,152 | py | Python | src/env_wrapper/dummy_vec_env.py | sagerpascal/rl-bootcamp-hackathon | 71c4ab1ecab13058a74bc66027b79de4afc35c73 | [
"MIT"
] | null | null | null | src/env_wrapper/dummy_vec_env.py | sagerpascal/rl-bootcamp-hackathon | 71c4ab1ecab13058a74bc66027b79de4afc35c73 | [
"MIT"
] | null | null | null | src/env_wrapper/dummy_vec_env.py | sagerpascal/rl-bootcamp-hackathon | 71c4ab1ecab13058a74bc66027b79de4afc35c73 | [
"MIT"
] | null | null | null | """
All the env-wrapper were copied from the OpenAI Baseline and slightly adjusted (for MPI-support, see results -> ppo2)
Source: https://github.com/openai/baselines
"""
from collections import OrderedDict
import gym
import numpy as np
from .vec_env import VecEnv
def copy_obs_dict(obs):
"""
Deep-copy an observation dict.
"""
return {k: np.copy(v) for k, v in obs.items()}
def dict_to_obs(obs_dict):
"""
Convert an observation dict into a raw array if the
original observation space was not a Dict space.
"""
if set(obs_dict.keys()) == {None}:
return obs_dict[None]
return obs_dict
def obs_space_info(obs_space):
"""
Get dict-structured information about a gym.Space.
Returns:
A tuple (keys, shapes, dtypes):
keys: a list of dict keys.
shapes: a dict mapping keys to shapes.
dtypes: a dict mapping keys to dtypes.
"""
if isinstance(obs_space, gym.spaces.Dict):
assert isinstance(obs_space.spaces, OrderedDict)
subspaces = obs_space.spaces
else:
subspaces = {None: obs_space}
keys = []
shapes = {}
dtypes = {}
for key, box in subspaces.items():
keys.append(key)
shapes[key] = box.shape
dtypes[key] = box.dtype
return keys, shapes, dtypes
class DummyVecEnv(VecEnv):
"""
VecEnv that does runs multiple environments sequentially, that is,
the step and reset commands are send to one environment at a time.
Useful when debugging and when num_env == 1 (in the latter case,
avoids communication overhead)
"""
def __init__(self, env_fns):
"""
Arguments:
env_fns: iterable of callables functions that build environments
"""
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
VecEnv.__init__(self, len(env_fns), env.observation_space, env.action_space)
obs_space = env.observation_space
self.keys, shapes, dtypes = obs_space_info(obs_space)
self.buf_obs = {k: np.zeros((self.num_envs,) + tuple(shapes[k]), dtype=dtypes[k]) for k in self.keys}
self.buf_dones = np.zeros((self.num_envs,), dtype=np.bool)
self.buf_rews = np.zeros((self.num_envs,), dtype=np.float32)
self.buf_infos = [{} for _ in range(self.num_envs)]
self.actions = None
self.spec = self.envs[0].spec
def step_async(self, actions):
listify = True
try:
if len(actions) == self.num_envs:
listify = False
except TypeError:
pass
if not listify:
self.actions = actions
else:
assert self.num_envs == 1, "actions {} is either not a list or has a wrong size - cannot match to {} environments".format(
actions, self.num_envs)
self.actions = [actions]
def step_wait(self):
for e in range(self.num_envs):
action = self.actions[e]
# if isinstance(self.envs[e].action_space, spaces.Discrete):
# action = int(action)
obs, self.buf_rews[e], self.buf_dones[e], self.buf_infos[e] = self.envs[e].step(action)
if self.buf_dones[e]:
obs = self.envs[e].reset()
self._save_obs(e, obs)
return (self._obs_from_buf(), np.copy(self.buf_rews), np.copy(self.buf_dones),
self.buf_infos.copy())
def reset(self):
for e in range(self.num_envs):
obs = self.envs[e].reset()
self._save_obs(e, obs)
return self._obs_from_buf()
def _save_obs(self, e, obs):
for k in self.keys:
if k is None:
self.buf_obs[k][e] = obs
else:
self.buf_obs[k][e] = obs[k]
def _obs_from_buf(self):
return dict_to_obs(copy_obs_dict(self.buf_obs))
def get_images(self):
return [env.render(mode='rgb_array') for env in self.envs]
def render(self, mode='human'):
if self.num_envs == 1:
return self.envs[0].render(mode=mode)
else:
return super().render(mode=mode)
| 30.086957 | 134 | 0.600434 | 577 | 4,152 | 4.17331 | 0.27383 | 0.040698 | 0.045681 | 0.013704 | 0.169851 | 0.098007 | 0.085548 | 0.064784 | 0.043189 | 0.043189 | 0 | 0.00305 | 0.289258 | 4,152 | 137 | 135 | 30.306569 | 0.812945 | 0.217245 | 0 | 0.126582 | 0 | 0 | 0.031812 | 0 | 0 | 0 | 0 | 0 | 0.025316 | 1 | 0.139241 | false | 0.012658 | 0.050633 | 0.025316 | 0.329114 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8ade1bdad16fb30d06e3d0e88319100dd4c9bf7 | 7,861 | py | Python | csr_utils/csr_utils.py | narges-rzv/csr_utils | 9a643b4a7ab2ccc889664ea63e9bff764f8cbc8b | [
"MIT"
] | null | null | null | csr_utils/csr_utils.py | narges-rzv/csr_utils | 9a643b4a7ab2ccc889664ea63e9bff764f8cbc8b | [
"MIT"
] | null | null | null | csr_utils/csr_utils.py | narges-rzv/csr_utils | 9a643b4a7ab2ccc889664ea63e9bff764f8cbc8b | [
"MIT"
] | null | null | null | import numpy as np
from scipy.sparse import csr_matrix
__version__ = '0.1.1'
def normalize_csr_matrix(x, meanarr=None, stdarr=None, ixnormed=None, threshold_to_clip=2000, verbose=True):
""" Normalizes a CSR matrix only based on non-zero values, without turning it into dense array.
In the CSR matrix, rows correspond to samples, and columns correspond to features.
Normalization will be such that each column (feature)'s non-zero values will have mean of 0.0 and standard deviation of 1.0.
Tips: Useful for machine learning - most algorithms need normalized input
Will return the scalable equivalent of x = x.toarray(); x[(x==0)] = np.nan; (x - np.nanmean(x, axis=0)) / np.nanstd(x, axis=0)
We compute a faster and equivalent definition of standard deviation:
sigma = SquareRoot(ExpectedValue(|X - mean|^2)) # slow
sigma = SquareRoot(ExpectedValue(X^2) - ExpectedValue(X)^2) # fast
(https://en.wikipedia.org/wiki/Standard_deviation#Definition_of_population_values)
Assumptions:
- If we don't have any observations in a column i, mean_array[i] be set to 0.0, and std_array[i] will be set to 1.0.
- If we have a single observation, or if standard deviation is 0.0 for a column, we only subtract the mean for that column and effectively eliminate that column.
The function allows the normalization to be based on pre-specified mean and standard deviation arrays.
The function also allows only a given subset of features to be normalized.
Parameters
----------
x : csr_matrix (scipy.sparse.csr_matrix)
The CSR matrix to normalize. x should have shape N x D where N is number of samples and D is number of features.
meanarr : array_like (np.array), or None
If not None, meanarr needs to be one dimensional and of shape D
Default is None.
- If None, the mean array will be computed according to the non-zero values of the CSR input x, and will be returned.
(Useful when normalizing 'training set'. Pickle this mean array for future use.)
- If not None, normalization will be done according to this mean array rather than a computed mean array.
(Useful when normalizing 'test set')
stdarr : array_like (np.array), or None
If not None, stdarr needs to be one dimensional and of shape D
Default is None.
- If None, the std array will be computed according to the non-zero values of the CSR input x, and will be returned.
(Useful when normalizing 'training set'. Pickle this std array for future use.)
- If not None, normalization will be done according to this std array rather than a computed mean array.
(Useful when normalizing 'test set').
ixnormed : array_like (np.array), or None
Indicated which columns of the CSR should be normalized at all.
Only relevant if stdarr and meanarr are both not None.
If None, all columns will be normalized.
If not None, only the ixnormed subset of the columns will be normalized.
Note: This is not a binary returned values, but rather, includes the indecies of the columns that were normalized.
threshold_to_clip: scalar number either float or int.
If standard deviation of each column is above this value, we won't normalize that column.
Set to np.inf, if you don't desire th is functionality.
verbose: bool (Default: True)
If True, print status while normalizing.
Returns
-------
xnorm : csr_matrix (scipy.sparse.csr_matrix)
Normalized csr array of nonzero values of x.
xnorm has shape N x D where N is number of samples and D is number of features.
Normalization will be such that each column (feature)'s non-zero values will have mean of 0.0 and standard deviation of 1.0.
xnorm is scalable equivalent of x = x.todense(); x[(x==0)] = np.nan; (x - np.nanmean(x, axis=0)) / np.nanstd(x, axis=0)
Only ix_done_normalized columns are normalized (i.e. columns where standard deviation is not zero.)
mean_array : array_like (np.array)
mean_array is one dimensional and of shape D
The mean_array[i] is the mean value of nonzero values of column i in input x.
std_array : array_like (np.array)
std_array is one dimensional and of shape D
The std_array[i] is the standard deviation value of nonzero values of column i in input x.
ix_done_normalized : array_like (np.array)
ix_done_normalized is one dimensional array of size K <= D,
and contains the index of columns that were normalized afterall.
(If a column has standard deviation of zero or standard deviation above threshold_to_clip, it is NOT normalized.)
Example
-------
>>> import numpy as np
>>> from scipy.sparse import csr_matrix
>>> x = csr_matrix(np.array([[1, 0, 0], [3, 0, 4], [2, 5, 2]], dtype=float))
>>> print(x.toarray())
[[ 1. 0. 0.]
[ 3. 0. 4.]
[ 2. 5. 2.]]
>>> xnorm, xmean, xstd, xixnormed = csr_utils.normalize_csr_matrix(x)
>>> print(xnorm.todense())
[[-1.22474487 0. 0. ]
[ 1.22474487 0. 1. ]
[ 0. 0. -1. ]]
>>> xmean
array([2., 5., 3.])
>>> xstd
array([0.81649658, 1. , 1. ])
>>> xixnormed
array([0, 2])
"""
xnorm = x.copy()
if meanarr is None and stdarr is None:
nnz_cnt_denominator = np.array((x != 0).sum(axis=0)).ravel()
nnz_columns_ix_cnt_zro = (nnz_cnt_denominator == 0).ravel() # these are columns that have no non-zero values.
nnz_cnt_denominator[nnz_columns_ix_cnt_zro] = 1.0 # setting these columns to 1.0 to turn 0/0 into 0/1 (and get 0)
nnz_mean_sum = np.array(x.sum(axis=0)).ravel()
mean_array = nnz_mean_sum/nnz_cnt_denominator
nnz_std_sum_sqr = np.array((x.multiply(x)).sum(axis=0)).ravel() # sum of squares of nonzero values of csr X[:,i]
std_array = np.sqrt((nnz_std_sum_sqr/nnz_cnt_denominator) - (mean_array ** 2))
ix_to_normalize_std = (std_array != 0) & (std_array < threshold_to_clip)
ix_to_not_normalize_std = (ix_to_normalize_std == False ).nonzero()[0].ravel()
std_array[ix_to_not_normalize_std] = 1.0
ix_to_normalize = ix_to_normalize_std
if ixnormed is not None:
ixnormed_inverse = (ixnormed == False)
mean_array[ixnormed_inverse] = 0.0
std_array[ixnormed_inverse] = 1.0
ix_to_normalize[ixnormed_inverse] = False
else:
if verbose:
print('mean and sd already specified. normalizing with given mean/sd.')
if meanarr is None or stdarr is None:
raise ValueError('Both meanarr and stdarr are required, if you want to normalized based on pre-computed values. Aborting.')
mean_array = meanarr
std_array = stdarr
if ixnormed is not None:
ix_to_normalize = np.zeros((std_array.shape), dtype=bool)
ix_to_normalize[ixnormed] = True
else:
ix_to_normalize = np.ones((std_array.shape), dtype=bool)
# normalize the nonzero values: Note that we won't eliminate_zeros() here to keep the values that were equal to the mean available for next steps.
xnorm.data = (xnorm.data - mean_array[xnorm.indices])/std_array[xnorm.indices]
ix_done_normalized = ix_to_normalize.nonzero()[0].ravel()
return xnorm, mean_array, std_array, ix_done_normalized
| 51.045455 | 169 | 0.641394 | 1,164 | 7,861 | 4.219931 | 0.20189 | 0.027687 | 0.023819 | 0.019544 | 0.34202 | 0.280945 | 0.264658 | 0.264658 | 0.264658 | 0.234121 | 0 | 0.019894 | 0.277446 | 7,861 | 153 | 170 | 51.379085 | 0.844894 | 0.660094 | 0 | 0.108108 | 0 | 0.027027 | 0.084034 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027027 | false | 0 | 0.054054 | 0 | 0.108108 | 0.027027 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8ae8ccd11908d6e104566a91b77c660f3e7171b | 1,124 | py | Python | mnist_fully_connected/utils.py | Loricanal/DeepLearning | e3ae7cce2c1beed3fac5152b0a1fe14c21e9b180 | [
"MIT"
] | null | null | null | mnist_fully_connected/utils.py | Loricanal/DeepLearning | e3ae7cce2c1beed3fac5152b0a1fe14c21e9b180 | [
"MIT"
] | null | null | null | mnist_fully_connected/utils.py | Loricanal/DeepLearning | e3ae7cce2c1beed3fac5152b0a1fe14c21e9b180 | [
"MIT"
] | null | null | null | import numpy as np
import gzip
import pickle
import matplotlib.pyplot as plt
def load_data():
np.random.seed(1990)
print("Loading MNIST data .....")
# Load the MNIST dataset
with gzip.open('Data/mnist.pkl.gz', 'r') as f:
train_set, valid_set, test_set = pickle.load(f)
learn_data = [(train_set[0][i], [1 if j == train_set[1][i] else 0 for j in range(10)]) \
for i in np.arange(len(train_set[0]))]
test_data = [(test_set[0][i], [1 if j == test_set[1][i] else 0 for j in range(10)]) \
for i in np.arange(len(test_set[0]))]
validation_data = [(valid_set[0][i], [1 if j == valid_set[1][i] else 0 for j in range(10)]) \
for i in np.arange(len(valid_set[0]))]
print("Done.")
return learn_data , test_data, validation_data
def plot_curve(t,s,metric):
plt.plot(t, s)
plt.ylabel(metric) # or ERROR
plt.xlabel('Epoch')
plt.title('Learning Curve_'+str(metric))
#curve_name=str(metric)+"LC.png"
#plt.savefig(Figures/curve_name)
plt.show()
| 30.378378 | 102 | 0.575623 | 180 | 1,124 | 3.466667 | 0.361111 | 0.038462 | 0.024038 | 0.028846 | 0.235577 | 0.235577 | 0.192308 | 0.192308 | 0.192308 | 0.192308 | 0 | 0.030637 | 0.274021 | 1,124 | 36 | 103 | 31.222222 | 0.734069 | 0.08274 | 0 | 0 | 0 | 0 | 0.065366 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.173913 | 0 | 0.304348 | 0.086957 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8b14f85c5015de9b74568b3ca3675b07d561c35 | 1,009 | py | Python | setup.py | textioHQ/textio-pyInflect | 1c311b6e008af6566643b74eddfb1ad96f841cb3 | [
"MIT"
] | null | null | null | setup.py | textioHQ/textio-pyInflect | 1c311b6e008af6566643b74eddfb1ad96f841cb3 | [
"MIT"
] | 1 | 2021-06-11T21:16:17.000Z | 2021-07-08T01:02:43.000Z | setup.py | textioHQ/textio-pyinflect | 1c311b6e008af6566643b74eddfb1ad96f841cb3 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import setuptools
# There are currently no dependencies so this is fine but note that if
# dependencies are added, this is a bad technique because setup will
# fail if those aren't installed first.
from textio_pyinflect import __version__
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="textio-pyinflect",
version=__version__,
author="Brad Jascob",
author_email="bjascob@msn.com",
description="A python module for word inflections designed for use with Spacy.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/bjascob/pyinflect",
include_package_data=True,
package_data={"": ["*.csv"]},
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 32.548387 | 84 | 0.696729 | 124 | 1,009 | 5.508065 | 0.709677 | 0.087848 | 0.055637 | 0.087848 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003663 | 0.188305 | 1,009 | 30 | 85 | 33.633333 | 0.830281 | 0.189296 | 0 | 0 | 0 | 0 | 0.384521 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.086957 | 0 | 0.086957 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8b3be6e8b6e108e0807640877dfa10491654884 | 4,858 | py | Python | kb.py | jean-philippe-martin/semantic-notes | 5228ce3173681095647d31532cd790d1b8327cfe | [
"Apache-2.0"
] | null | null | null | kb.py | jean-philippe-martin/semantic-notes | 5228ce3173681095647d31532cd790d1b8327cfe | [
"Apache-2.0"
] | null | null | null | kb.py | jean-philippe-martin/semantic-notes | 5228ce3173681095647d31532cd790d1b8327cfe | [
"Apache-2.0"
] | null | null | null | # "Knowledge Base"
from typing import List, Iterable, Dict, Set, Union, Any
from collections import defaultdict
# Check the type annotations like this:
# mypy --py2 graph.py
# The type that goes into a KB, and what a KB
# still mostly feels like (modulo the few added features).
KBDict = Dict[str, Dict[str, List[Any]]]
class KB(dict):
"""A very simple "knowledge base".
It's organized in pages. Each page has a name and a set of key/value pairs.
The keys are called "attributes" of the page.
Each value is a list (possibly with a single element).
The KB can be used like a normal dict, but it offers a few convenience
features and includes the idea that pages may have multiple names.
To set an alternate name for a page, set its 'aka' property (see tests).
Page lookup is also case-insensitive:
if you have pages 'aaa' and 'AAA' then a lookup for 'AAA' will return
the latter, but a lookup for 'aAA' will return the former.
Two names refer to the same page if and only if normalize_page returns
the same value for both.
Examples:
>>> ppl={'John':{'eye_color': ['blue']}}
>>> k=KB(ppl)
>>> k['john']['eye_color']
['blue']
>>> ppl['Bob']={'aka': ['Bobby tables'], 'eye_color': ['brown']}
>>> k=KB(ppl)
>>> k['bobby tables']['eye_color']
['brown']
>>> k.get_attribute('bobby tables', 'eye_color')
['brown']
"""
def __init__(self, dict_of_dict):
# type: (KBDict) -> None
self.aka = {} # type: Dict[str, str]
self.update(dict_of_dict)
self._fill_aka()
def __getitem__(self, key):
# type: (str) -> Dict[str, List[Any]]
return dict.__getitem__(self,self.normalize_page(key))
def get(self, key, default=None):
# type: (str, Any) -> Dict[str, List[Any]]
return dict.get(self,self.normalize_page(key), default)
def has_key(self, page):
# type: (str) -> bool
return dict.has_key(self, self.normalize_page(page))
def normalize_page(self, key):
# type: (str) -> str
"""page name or alias -> page name"""
if self.aka.has_key(key):
return self.aka[key]
if dict.has_key(self, key):
return key
# If the page name has capitals, and we do too but in different places,
# should still find the page name.
# If the page doesn't exist, return the name unmodified.
return self.aka.get(key.lower(), key)
def is_same_page(self, a, b):
# type: (str, str) -> bool
"""true if a,b are names of the same page, even if aliases."""
return self.normalize_page(a) == self.normalize_page(b)
def get_attribute(self, key, attribute, default=None):
# type: (str, str, List[Any]) -> List[Any]
"""kb[key][attribute], or None if either's missing."""
page = self.get(key, None)
if not page: return default
return page.get(attribute, default)
def get_unique_attribute(self, key, attribute, default=None):
# type: (str, str, List[Any]) -> Any
"""kb[key][attribute][0], or None if either's missing."""
return unique(self.get_attribute(key,attribute,default))
def _fill_aka(self):
# type: () -> None
for k,v in dict.items(self):
a = v.get("aka", [])
for b in a:
if self.aka.get(b)!=None:
print(str(b)+" is defined twice: as "+self.aka[b] + " and "+k)
continue
self.aka[b]=k
# put in 'baby' as an aka for the 'Baby' page, *unless* 'baby' is already
# a registered aka for something else.
for k in dict.keys(self):
if not self.aka.has_key(k.lower()):
self.aka[k.lower()] = k
a = self.get(k).get("aka", [])
for b in a:
if not self.aka.has_key(b.lower()):
self.aka[b.lower()] = k
KB_or_Dict = Union[KB, KBDict]
def merge(kblist):
# type: (List[KBDict]) -> KBDict
"""Merges the dicts together into a single one by appending all the keys.
Example:
>>> a = {'mars': {'isa': ['planet']}}
>>> b = {'mars': {'color': ['red']}}
>>> sorted(merge([a,b])['mars'].keys())
['color', 'isa']
"""
ret = {} # type: KBDict
for kb in kblist:
for k, v in kb.items():
if k not in ret:
ret[k] = defaultdict(list)
for attrib, values in v.items():
ret[k][attrib] += values
return ret
def unique(value):
"""Check that there is only one value in the list, and return it.
>>> kb = KB({'John':{'eye_color': ['blue']}})
>>> unique(kb.get_attribute('John', 'eye_color'))
'blue'
This is handy in the context of KB, where everything's a list but
it's common to expect that there's only one value.
"""
if not value: return None
if len(value)!=1:
raise ValueError('Expected a single value, got multiple (%s)' % len(value))
return value[0]
def unlist(value_or_list):
x=value_or_list
if isinstance(x, str) or isinstance(x, unicode):
return x
try:
if len(x)==1:
return x[0]
except:
pass
return x | 30.553459 | 79 | 0.626184 | 770 | 4,858 | 3.880519 | 0.263636 | 0.02577 | 0.016734 | 0.021419 | 0.15261 | 0.121821 | 0.062918 | 0.035475 | 0.035475 | 0.035475 | 0 | 0.00159 | 0.223137 | 4,858 | 159 | 80 | 30.553459 | 0.790143 | 0.499382 | 0 | 0.058824 | 0 | 0 | 0.03258 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.176471 | false | 0.014706 | 0.029412 | 0.044118 | 0.426471 | 0.014706 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8b56ef2b83328469be60616a305d6373e4bf866 | 23,489 | py | Python | openpype/hosts/nuke/api/plugin.py | Tilix4/OpenPype | 8909bd890170880aa7ec8b673abaa25a9bdf40f2 | [
"MIT"
] | 1 | 2022-03-23T06:24:24.000Z | 2022-03-23T06:24:24.000Z | openpype/hosts/nuke/api/plugin.py | Tilix4/OpenPype | 8909bd890170880aa7ec8b673abaa25a9bdf40f2 | [
"MIT"
] | null | null | null | openpype/hosts/nuke/api/plugin.py | Tilix4/OpenPype | 8909bd890170880aa7ec8b673abaa25a9bdf40f2 | [
"MIT"
] | null | null | null | import os
import random
import string
from collections import OrderedDict
from abc import abstractmethod
import nuke
from openpype.api import get_current_project_settings
from openpype.pipeline import (
LegacyCreator,
LoaderPlugin,
)
from .lib import (
Knobby,
check_subsetname_exists,
reset_selection,
maintained_selection,
set_avalon_knob_data,
add_publish_knob,
get_nuke_imageio_settings,
set_node_knobs_from_settings
)
class OpenPypeCreator(LegacyCreator):
"""Pype Nuke Creator class wrapper"""
node_color = "0xdfea5dff"
def __init__(self, *args, **kwargs):
super(OpenPypeCreator, self).__init__(*args, **kwargs)
if check_subsetname_exists(
nuke.allNodes(),
self.data["subset"]):
msg = ("The subset name `{0}` is already used on a node in"
"this workfile.".format(self.data["subset"]))
self.log.error(msg + "\n\nPlease use other subset name!")
raise NameError("`{0}: {1}".format(__name__, msg))
return
def process(self):
from nukescripts import autoBackdrop
instance = None
if (self.options or {}).get("useSelection"):
nodes = nuke.selectedNodes()
if not nodes:
nuke.message("Please select nodes that you "
"wish to add to a container")
return
elif len(nodes) == 1:
# only one node is selected
instance = nodes[0]
if not instance:
# Not using selection or multiple nodes selected
bckd_node = autoBackdrop()
bckd_node["tile_color"].setValue(int(self.node_color, 16))
bckd_node["note_font_size"].setValue(24)
bckd_node["label"].setValue("[{}]".format(self.name))
instance = bckd_node
# add avalon knobs
set_avalon_knob_data(instance, self.data)
add_publish_knob(instance)
return instance
def get_review_presets_config():
settings = get_current_project_settings()
review_profiles = (
settings["global"]
["publish"]
["ExtractReview"]
["profiles"]
)
outputs = {}
for profile in review_profiles:
outputs.update(profile.get("outputs", {}))
return [str(name) for name, _prop in outputs.items()]
class NukeLoader(LoaderPlugin):
container_id_knob = "containerId"
container_id = None
def reset_container_id(self):
self.container_id = "".join(random.choice(
string.ascii_uppercase + string.digits) for _ in range(10))
def get_container_id(self, node):
id_knob = node.knobs().get(self.container_id_knob)
return id_knob.value() if id_knob else None
def get_members(self, source):
"""Return nodes that has same "containerId" as `source`"""
source_id = self.get_container_id(source)
return [node for node in nuke.allNodes(recurseGroups=True)
if self.get_container_id(node) == source_id
and node is not source] if source_id else []
def set_as_member(self, node):
source_id = self.get_container_id(node)
if source_id:
node[self.container_id_knob].setValue(source_id)
else:
HIDEN_FLAG = 0x00040000
_knob = Knobby(
"String_Knob",
self.container_id,
flags=[
nuke.READ_ONLY,
HIDEN_FLAG
])
knob = _knob.create(self.container_id_knob)
node.addKnob(knob)
def clear_members(self, parent_node):
members = self.get_members(parent_node)
dependent_nodes = None
for node in members:
_depndc = [n for n in node.dependent() if n not in members]
if not _depndc:
continue
dependent_nodes = _depndc
break
for member in members:
self.log.info("removing node: `{}".format(member.name()))
nuke.delete(member)
return dependent_nodes
class ExporterReview(object):
"""
Base class object for generating review data from Nuke
Args:
klass (pyblish.plugin): pyblish plugin parent
instance (pyblish.instance): instance of pyblish context
"""
data = None
publish_on_farm = False
def __init__(self,
klass,
instance,
multiple_presets=True
):
self.log = klass.log
self.instance = instance
self.multiple_presets = multiple_presets
self.path_in = self.instance.data.get("path", None)
self.staging_dir = self.instance.data["stagingDir"]
self.collection = self.instance.data.get("collection", None)
self.data = dict({
"representations": list()
})
def get_file_info(self):
if self.collection:
self.log.debug("Collection: `{}`".format(self.collection))
# get path
self.fname = os.path.basename(self.collection.format(
"{head}{padding}{tail}"))
self.fhead = self.collection.format("{head}")
# get first and last frame
self.first_frame = min(self.collection.indexes)
self.last_frame = max(self.collection.indexes)
if "slate" in self.instance.data["families"]:
self.first_frame += 1
else:
self.fname = os.path.basename(self.path_in)
self.fhead = os.path.splitext(self.fname)[0] + "."
self.first_frame = self.instance.data.get("frameStartHandle", None)
self.last_frame = self.instance.data.get("frameEndHandle", None)
if "#" in self.fhead:
self.fhead = self.fhead.replace("#", "")[:-1]
def get_representation_data(self, tags=None, range=False):
add_tags = tags or []
repre = {
"name": self.name,
"ext": self.ext,
"files": self.file,
"stagingDir": self.staging_dir,
"tags": [self.name.replace("_", "-")] + add_tags
}
if range:
repre.update({
"frameStart": self.first_frame,
"frameEnd": self.last_frame,
})
if self.multiple_presets:
repre["outputName"] = self.name
if self.publish_on_farm:
repre["tags"].append("publish_on_farm")
self.data["representations"].append(repre)
def get_view_input_process_node(self):
"""
Will get any active view process.
Arguments:
self (class): in object definition
Returns:
nuke.Node: copy node of Input Process node
"""
reset_selection()
ipn_orig = None
for v in nuke.allNodes(filter="Viewer"):
ip = v["input_process"].getValue()
ipn = v["input_process_node"].getValue()
if "VIEWER_INPUT" not in ipn and ip:
ipn_orig = nuke.toNode(ipn)
ipn_orig.setSelected(True)
if ipn_orig:
# copy selected to clipboard
nuke.nodeCopy("%clipboard%")
# reset selection
reset_selection()
# paste node and selection is on it only
nuke.nodePaste("%clipboard%")
# assign to variable
ipn = nuke.selectedNode()
return ipn
def get_imageio_baking_profile(self):
from . import lib as opnlib
nuke_imageio = opnlib.get_nuke_imageio_settings()
# TODO: this is only securing backward compatibility lets remove
# this once all projects's anotomy are updated to newer config
if "baking" in nuke_imageio.keys():
return nuke_imageio["baking"]["viewerProcess"]
else:
return nuke_imageio["viewer"]["viewerProcess"]
class ExporterReviewLut(ExporterReview):
"""
Generator object for review lut from Nuke
Args:
klass (pyblish.plugin): pyblish plugin parent
instance (pyblish.instance): instance of pyblish context
"""
_temp_nodes = []
def __init__(self,
klass,
instance,
name=None,
ext=None,
cube_size=None,
lut_size=None,
lut_style=None,
multiple_presets=True):
# initialize parent class
super(ExporterReviewLut, self).__init__(
klass, instance, multiple_presets)
# deal with now lut defined in viewer lut
if hasattr(klass, "viewer_lut_raw"):
self.viewer_lut_raw = klass.viewer_lut_raw
else:
self.viewer_lut_raw = False
self.name = name or "baked_lut"
self.ext = ext or "cube"
self.cube_size = cube_size or 32
self.lut_size = lut_size or 1024
self.lut_style = lut_style or "linear"
# set frame start / end and file name to self
self.get_file_info()
self.log.info("File info was set...")
self.file = self.fhead + self.name + ".{}".format(self.ext)
self.path = os.path.join(
self.staging_dir, self.file).replace("\\", "/")
def clean_nodes(self):
for node in self._temp_nodes:
nuke.delete(node)
self._temp_nodes = []
self.log.info("Deleted nodes...")
def generate_lut(self):
bake_viewer_process = kwargs["bake_viewer_process"]
bake_viewer_input_process_node = kwargs[
"bake_viewer_input_process"]
# ---------- start nodes creation
# CMSTestPattern
cms_node = nuke.createNode("CMSTestPattern")
cms_node["cube_size"].setValue(self.cube_size)
# connect
self._temp_nodes.append(cms_node)
self.previous_node = cms_node
self.log.debug("CMSTestPattern... `{}`".format(self._temp_nodes))
if bake_viewer_process:
# Node View Process
if bake_viewer_input_process_node:
ipn = self.get_view_input_process_node()
if ipn is not None:
# connect
ipn.setInput(0, self.previous_node)
self._temp_nodes.append(ipn)
self.previous_node = ipn
self.log.debug(
"ViewProcess... `{}`".format(self._temp_nodes))
if not self.viewer_lut_raw:
# OCIODisplay
dag_node = nuke.createNode("OCIODisplay")
# connect
dag_node.setInput(0, self.previous_node)
self._temp_nodes.append(dag_node)
self.previous_node = dag_node
self.log.debug("OCIODisplay... `{}`".format(self._temp_nodes))
# GenerateLUT
gen_lut_node = nuke.createNode("GenerateLUT")
gen_lut_node["file"].setValue(self.path)
gen_lut_node["file_type"].setValue(".{}".format(self.ext))
gen_lut_node["lut1d"].setValue(self.lut_size)
gen_lut_node["style1d"].setValue(self.lut_style)
# connect
gen_lut_node.setInput(0, self.previous_node)
self._temp_nodes.append(gen_lut_node)
self.log.debug("GenerateLUT... `{}`".format(self._temp_nodes))
# ---------- end nodes creation
# Export lut file
nuke.execute(
gen_lut_node.name(),
int(self.first_frame),
int(self.first_frame))
self.log.info("Exported...")
# ---------- generate representation data
self.get_representation_data()
self.log.debug("Representation... `{}`".format(self.data))
# ---------- Clean up
self.clean_nodes()
return self.data
class ExporterReviewMov(ExporterReview):
"""
Metaclass for generating review mov files
Args:
klass (pyblish.plugin): pyblish plugin parent
instance (pyblish.instance): instance of pyblish context
"""
_temp_nodes = {}
def __init__(self,
klass,
instance,
name=None,
ext=None,
multiple_presets=True
):
# initialize parent class
super(ExporterReviewMov, self).__init__(
klass, instance, multiple_presets)
# passing presets for nodes to self
self.nodes = klass.nodes if hasattr(klass, "nodes") else {}
# deal with now lut defined in viewer lut
self.viewer_lut_raw = klass.viewer_lut_raw
self.write_colorspace = instance.data["colorspace"]
self.name = name or "baked"
self.ext = ext or "mov"
# set frame start / end and file name to self
self.get_file_info()
self.log.info("File info was set...")
self.file = self.fhead + self.name + ".{}".format(self.ext)
self.path = os.path.join(
self.staging_dir, self.file).replace("\\", "/")
def clean_nodes(self, node_name):
for node in self._temp_nodes[node_name]:
nuke.delete(node)
self._temp_nodes[node_name] = []
self.log.info("Deleted nodes...")
def render(self, render_node_name):
self.log.info("Rendering... ")
# Render Write node
nuke.execute(
render_node_name,
int(self.first_frame),
int(self.last_frame))
self.log.info("Rendered...")
def save_file(self):
import shutil
with maintained_selection():
self.log.info("Saving nodes as file... ")
# create nk path
path = os.path.splitext(self.path)[0] + ".nk"
# save file to the path
shutil.copyfile(self.instance.context.data["currentFile"], path)
self.log.info("Nodes exported...")
return path
def generate_mov(self, farm=False, **kwargs):
self.publish_on_farm = farm
read_raw = kwargs["read_raw"]
reformat_node_add = kwargs["reformat_node_add"]
reformat_node_config = kwargs["reformat_node_config"]
bake_viewer_process = kwargs["bake_viewer_process"]
bake_viewer_input_process_node = kwargs[
"bake_viewer_input_process"]
viewer_process_override = kwargs[
"viewer_process_override"]
baking_view_profile = (
viewer_process_override or self.get_imageio_baking_profile())
fps = self.instance.context.data["fps"]
self.log.debug(">> baking_view_profile `{}`".format(
baking_view_profile))
add_tags = kwargs.get("add_tags", [])
self.log.info(
"__ add_tags: `{0}`".format(add_tags))
subset = self.instance.data["subset"]
self._temp_nodes[subset] = []
# ---------- start nodes creation
# Read node
r_node = nuke.createNode("Read")
r_node["file"].setValue(self.path_in)
r_node["first"].setValue(self.first_frame)
r_node["origfirst"].setValue(self.first_frame)
r_node["last"].setValue(self.last_frame)
r_node["origlast"].setValue(self.last_frame)
r_node["colorspace"].setValue(self.write_colorspace)
if read_raw:
r_node["raw"].setValue(1)
# connect
self._temp_nodes[subset].append(r_node)
self.previous_node = r_node
self.log.debug("Read... `{}`".format(self._temp_nodes[subset]))
# add reformat node
if reformat_node_add:
# append reformated tag
add_tags.append("reformated")
rf_node = nuke.createNode("Reformat")
set_node_knobs_from_settings(rf_node, reformat_node_config)
# connect
rf_node.setInput(0, self.previous_node)
self._temp_nodes[subset].append(rf_node)
self.previous_node = rf_node
self.log.debug(
"Reformat... `{}`".format(self._temp_nodes[subset]))
# only create colorspace baking if toggled on
if bake_viewer_process:
if bake_viewer_input_process_node:
# View Process node
ipn = self.get_view_input_process_node()
if ipn is not None:
# connect
ipn.setInput(0, self.previous_node)
self._temp_nodes[subset].append(ipn)
self.previous_node = ipn
self.log.debug(
"ViewProcess... `{}`".format(
self._temp_nodes[subset]))
if not self.viewer_lut_raw:
# OCIODisplay
dag_node = nuke.createNode("OCIODisplay")
dag_node["view"].setValue(str(baking_view_profile))
# connect
dag_node.setInput(0, self.previous_node)
self._temp_nodes[subset].append(dag_node)
self.previous_node = dag_node
self.log.debug("OCIODisplay... `{}`".format(
self._temp_nodes[subset]))
# Write node
write_node = nuke.createNode("Write")
self.log.debug("Path: {}".format(self.path))
write_node["file"].setValue(str(self.path))
write_node["file_type"].setValue(str(self.ext))
# Knobs `meta_codec` and `mov64_codec` are not available on centos.
# TODO shouldn't this come from settings on outputs?
try:
write_node["meta_codec"].setValue("ap4h")
except Exception:
self.log.info("`meta_codec` knob was not found")
try:
write_node["mov64_codec"].setValue("ap4h")
write_node["mov64_fps"].setValue(float(fps))
except Exception:
self.log.info("`mov64_codec` knob was not found")
write_node["mov64_write_timecode"].setValue(1)
write_node["raw"].setValue(1)
# connect
write_node.setInput(0, self.previous_node)
self._temp_nodes[subset].append(write_node)
self.log.debug("Write... `{}`".format(self._temp_nodes[subset]))
# ---------- end nodes creation
# ---------- render or save to nk
if self.publish_on_farm:
nuke.scriptSave()
path_nk = self.save_file()
self.data.update({
"bakeScriptPath": path_nk,
"bakeWriteNodeName": write_node.name(),
"bakeRenderPath": self.path
})
else:
self.render(write_node.name())
# ---------- generate representation data
self.get_representation_data(
tags=["review", "delete"] + add_tags,
range=True
)
self.log.debug("Representation... `{}`".format(self.data))
self.clean_nodes(subset)
nuke.scriptSave()
return self.data
class AbstractWriteRender(OpenPypeCreator):
"""Abstract creator to gather similar implementation for Write creators"""
name = ""
label = ""
hosts = ["nuke"]
n_class = "Write"
family = "render"
icon = "sign-out"
defaults = ["Main", "Mask"]
knobs = []
prenodes = {}
def __init__(self, *args, **kwargs):
super(AbstractWriteRender, self).__init__(*args, **kwargs)
data = OrderedDict()
data["family"] = self.family
data["families"] = self.n_class
for k, v in self.data.items():
if k not in data.keys():
data.update({k: v})
self.data = data
self.nodes = nuke.selectedNodes()
self.log.debug("_ self.data: '{}'".format(self.data))
def process(self):
inputs = []
outputs = []
instance = nuke.toNode(self.data["subset"])
selected_node = None
# use selection
if (self.options or {}).get("useSelection"):
nodes = self.nodes
if not (len(nodes) < 2):
msg = ("Select only one node. "
"The node you want to connect to, "
"or tick off `Use selection`")
self.log.error(msg)
nuke.message(msg)
return
if len(nodes) == 0:
msg = (
"No nodes selected. Please select a single node to connect"
" to or tick off `Use selection`"
)
self.log.error(msg)
nuke.message(msg)
return
selected_node = nodes[0]
inputs = [selected_node]
outputs = selected_node.dependent()
if instance:
if (instance.name() in selected_node.name()):
selected_node = instance.dependencies()[0]
# if node already exist
if instance:
# collect input / outputs
inputs = instance.dependencies()
outputs = instance.dependent()
selected_node = inputs[0]
# remove old one
nuke.delete(instance)
# recreate new
write_data = {
"nodeclass": self.n_class,
"families": [self.family],
"avalon": self.data,
"subset": self.data["subset"],
"knobs": self.knobs
}
# add creator data
creator_data = {"creator": self.__class__.__name__}
self.data.update(creator_data)
write_data.update(creator_data)
write_node = self._create_write_node(
selected_node,
inputs,
outputs,
write_data
)
# relinking to collected connections
for i, input in enumerate(inputs):
write_node.setInput(i, input)
write_node.autoplace()
for output in outputs:
output.setInput(0, write_node)
write_node = self._modify_write_node(write_node)
return write_node
def is_legacy(self):
"""Check if it needs to run legacy code
In case where `type` key is missing in singe
knob it is legacy project anatomy.
Returns:
bool: True if legacy
"""
imageio_nodes = get_nuke_imageio_settings()["nodes"]
node = imageio_nodes["requiredNodes"][0]
if "type" not in node["knobs"][0]:
# if type is not yet in project anatomy
return True
elif next(iter(
_k for _k in node["knobs"]
if _k.get("type") == "__legacy__"
), None):
# in case someone re-saved anatomy
# with old configuration
return True
@abstractmethod
def _create_write_node(self, selected_node, inputs, outputs, write_data):
"""Family dependent implementation of Write node creation
Args:
selected_node (nuke.Node)
inputs (list of nuke.Node) - input dependencies (what is connected)
outputs (list of nuke.Node) - output dependencies
write_data (dict) - values used to fill Knobs
Returns:
node (nuke.Node): group node with data as Knobs
"""
pass
@abstractmethod
def _modify_write_node(self, write_node):
"""Family dependent modification of created 'write_node'
Returns:
node (nuke.Node): group node with data as Knobs
"""
pass
| 31.741892 | 80 | 0.565626 | 2,591 | 23,489 | 4.91702 | 0.15631 | 0.017582 | 0.023469 | 0.016405 | 0.306986 | 0.250942 | 0.207457 | 0.183046 | 0.16044 | 0.16044 | 0 | 0.004119 | 0.328111 | 23,489 | 739 | 81 | 31.784844 | 0.80313 | 0.124697 | 0 | 0.235537 | 0 | 0 | 0.100064 | 0.004654 | 0 | 0 | 0.00099 | 0.002706 | 0 | 1 | 0.053719 | false | 0.004132 | 0.024793 | 0 | 0.161157 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8b595bcc38bd813e948fee2fa101e859322a65b | 2,132 | py | Python | liferay/settings.py | caihaoyu/scrapy-liferay | 5b83404a7eeb8976f4e0e786b8120c7febf03e86 | [
"MIT"
] | null | null | null | liferay/settings.py | caihaoyu/scrapy-liferay | 5b83404a7eeb8976f4e0e786b8120c7febf03e86 | [
"MIT"
] | 1 | 2017-04-10T08:23:54.000Z | 2017-04-10T09:18:27.000Z | liferay/settings.py | caihaoyu/scrapy-liferay | 5b83404a7eeb8976f4e0e786b8120c7febf03e86 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Scrapy settings for liferay project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'liferay'
SPIDER_MODULES = ['liferay.spiders']
NEWSPIDER_MODULE = 'liferay.spiders'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# COOKIES
COOKIES_ENABLES = False
COOKIES_DEBUG = False
# The maximum number of concurrent (ie. simultaneous) requests that will
# be performed to any single domain.
# CONCURRENT_REQUESTS_PER_DOMAIN = 100
# CONCURRENT_REQUESTS_PER_IP = 0
# CONCURRENT_REQUESTS_PER_SPIDER = 100
DNSCACHE_ENABLED = True
# DOWNLOAD_DELAY = 2
DOWNLOAD_TIMEOUT = 20
# DEFAULT_REQUEST_HEADERS = {
# 'Referer': 'http://Google.com'
# }
# Retry many times since proxies often fail
RETRY_TIMES = 20
# Retry on most error codes since proxies fail for different reasons
RETRY_HTTP_CODES = [500, 503, 504, 400, 403, 404, 408, 302, 304]
ITEM_PIPELINES = {
'liferay.pipelines.MongoPipeline': 300,
'scrapy_redis.pipelines.RedisPipeline': 400
}
ROBOTSTXT_OBEY = False
#
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
#
SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.SpiderQueue'
DOWNLOADER_MIDDLEWARES = {
'scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware': None,
'scrapy.contrib.downloadermiddleware.httpproxy.HttpProxyMiddleware': 110,
'scrapy.contrib.downloadermiddleware.retry.RetryMiddleware': 300,
'liferay.rotate_useragent.RotateUserAgentMiddleware': 400,
# 'magic_mirror.spiders.rotate_useragent.RotateUserAgentMiddleware': 400,
'scrapy.contrib.downloadermiddleware.cookies.CookiesMiddleware': 700
}
# custom settings
REDIS_HOST = '0.0.0.0'
REDIS_PORT = 6379
LOG_PATH = ''
MONGO_HOST = 'localhost'
MONGO_PORT = 27017
MONGO_DATABASE_NAME = {'default': 'liferay'}
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
| 28.810811 | 79 | 0.769231 | 256 | 2,132 | 6.238281 | 0.5625 | 0.027552 | 0.082655 | 0.031935 | 0.052599 | 0.052599 | 0.052599 | 0.052599 | 0 | 0 | 0 | 0.039806 | 0.128049 | 2,132 | 73 | 80 | 29.205479 | 0.819258 | 0.437617 | 0 | 0 | 0 | 0 | 0.453072 | 0.395904 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8b66ad25381df89f4946aae36d26b51123c308e | 586 | py | Python | graphs/find_path_between_two_vertices_single_parent.py | hariharanragothaman/pymaster | b3d033b4d5c75c69f587c94d9d12cd4a349a6a69 | [
"Apache-2.0"
] | 10 | 2020-09-21T22:23:09.000Z | 2022-01-25T16:58:44.000Z | graphs/find_path_between_two_vertices_single_parent.py | hariharanragothaman/pymaster | b3d033b4d5c75c69f587c94d9d12cd4a349a6a69 | [
"Apache-2.0"
] | null | null | null | graphs/find_path_between_two_vertices_single_parent.py | hariharanragothaman/pymaster | b3d033b4d5c75c69f587c94d9d12cd4a349a6a69 | [
"Apache-2.0"
] | null | null | null | from collections import defaultdict
def find_path(start, end, parents):
"""Constructs a path from given starting index to end index"""
path, parent = [], end
while parent != parents[start]:
path.append(parent)
parent = parents[parent]
return path[::-1]
if __name__ == "__main__":
# Creating a graph with key as node and value as parents
g = defaultdict()
g[1] = 0
g[2] = 1
g[3] = 1
g[4] = 2
g[5] = 2
g[6] = 3
g[7] = 2
g[8] = 5
print(g)
result = find_path(1, 8, g)
print("The result is:", result)
| 20.206897 | 66 | 0.566553 | 89 | 586 | 3.617978 | 0.505618 | 0.018634 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.046341 | 0.300341 | 586 | 28 | 67 | 20.928571 | 0.739024 | 0.191126 | 0 | 0 | 0 | 0 | 0.047009 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.05 | 0 | 0.15 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8b7d43cf8be8618590ec5263271e0232b2f062c | 764 | py | Python | tests/__init__.py | vrcmarcos/flask-filtered-response | ccd1f7294824b4e3284c6c975252b4e9d0d06b87 | [
"MIT"
] | null | null | null | tests/__init__.py | vrcmarcos/flask-filtered-response | ccd1f7294824b4e3284c6c975252b4e9d0d06b87 | [
"MIT"
] | 1 | 2019-11-04T00:29:39.000Z | 2019-11-04T00:29:39.000Z | tests/__init__.py | vrcmarcos/flask-filtered-response | ccd1f7294824b4e3284c6c975252b4e9d0d06b87 | [
"MIT"
] | 1 | 2016-08-30T17:04:43.000Z | 2016-08-30T17:04:43.000Z | # -*- coding: utf-8 -*-
import json
from flask import Flask, Response
from flask_filtered_response import filtered
app = Flask('flask-test-response')
@app.route('/filtered/single_node')
@filtered
def single_node():
response_dict = {
'test_int': 123,
'test_string': 'Hey!',
}
return Response(json.dumps(response_dict))
@app.route('/filtered/multiple_nodes')
@filtered
def multiple_nodes():
response_list = [
{
'test_int': 123,
'test_string': 'Hey!',
},
{
'test_int': 456,
'test_string': 'Yo!',
},
]
return Response(json.dumps(response_list))
@app.route('/filtered/empty_response')
@filtered
def empty_response():
return Response()
| 18.190476 | 46 | 0.600785 | 84 | 764 | 5.25 | 0.333333 | 0.054422 | 0.108844 | 0.063492 | 0.244898 | 0.104308 | 0 | 0 | 0 | 0 | 0 | 0.017575 | 0.255236 | 764 | 41 | 47 | 18.634146 | 0.757469 | 0.027487 | 0 | 0.233333 | 0 | 0 | 0.210526 | 0.093117 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.1 | 0.033333 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8b8db443ff81fc3adbb4c052dcadf5873228cd1 | 849 | py | Python | myGym/train_parallel.py | gabinsane/myGym | a41c6b11a47eaf19d0c69e67aeb48cf7a999d45a | [
"MIT"
] | 1 | 2021-04-23T20:52:39.000Z | 2021-04-23T20:52:39.000Z | myGym/train_parallel.py | gabinsane/myGym | a41c6b11a47eaf19d0c69e67aeb48cf7a999d45a | [
"MIT"
] | null | null | null | myGym/train_parallel.py | gabinsane/myGym | a41c6b11a47eaf19d0c69e67aeb48cf7a999d45a | [
"MIT"
] | 1 | 2021-01-22T16:46:48.000Z | 2021-01-22T16:46:48.000Z | import os
import threading
algos = [' ppo ',' ppo2 ','sac','trpo']
api = '/home/michal/code/myGym/myGym'
configfile = 'configs/train.conf'
script_path = api + '/train.py'
def train(algo):
os.system('cd {api};python {script_path} --config {configfile} --algo {algos}'.format(script_path=script_path, api=api, configfile=configfile, algos=algo))
if __name__ == '__main__':
threads = []
for i, algo in enumerate(algos):
thread = threading.Thread(target=train, args=(algo,))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
# os.system("./train.py --task_type reach --algo ppo2 --reward_type gt --robot kuka -ba step --env_name CrowWorkspaceEnv-v0 --steps 300000 -to hammer -w tensorflow -g 0 -p pybullet -r 0 -c 6 -dt euclidian -i 0 -t 1 -f 0 -d opengl") | 30.321429 | 232 | 0.651355 | 121 | 849 | 4.446281 | 0.603306 | 0.074349 | 0.048327 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021962 | 0.195524 | 849 | 28 | 232 | 30.321429 | 0.765739 | 0.270907 | 0 | 0 | 0 | 0 | 0.239482 | 0.046926 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.125 | 0 | 0.1875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8b8e703d2444d0c0cb5713971cc450d75191bd7 | 8,045 | py | Python | utils/dataprocessor.py | NavePnow/CP3106-NUS | a84b319940d65e98d8014c9c8972cb42c86d5b06 | [
"MIT"
] | 1 | 2020-02-19T11:05:47.000Z | 2020-02-19T11:05:47.000Z | utils/dataprocessor.py | NavePnow/CP3106-Independent-Project-NUS | a84b319940d65e98d8014c9c8972cb42c86d5b06 | [
"MIT"
] | null | null | null | utils/dataprocessor.py | NavePnow/CP3106-Independent-Project-NUS | a84b319940d65e98d8014c9c8972cb42c86d5b06 | [
"MIT"
] | null | null | null | import pandas as pd
import matplotlib.pyplot as plt
import os
import numpy as np
import pytz as tz # better alternatives -> Apache arrow or pendulum
from scipy.spatial import Voronoi
from datetime import datetime
from PIL import Image
import urllib
import urllib.request
# import wget
class DataProcessor:
lat_min, lat_max, lon_min, lon_max = 59.1510, 59.6238, 17.5449, 18.6245
key = ""
def __init__(self, datadir, filename):
self.datadir = datadir
self.filename = filename
def processData(self, sample):
df = self._loadDate()
df = self._filterData(df)
# df = self._parseData(df)
df = df[:sample]
df_kmeans = df.copy()
return df_kmeans[['lat', 'lon']]
def _parseDatetime(self, s):
tzone = tz.timezone("Europe/Stockholm")
utc = datetime.strptime(s, '%Y-%m-%dT%H:%M:%SZ')
return tz.utc.localize(utc).astimezone(tzone)
def _loadDate(self):
print("loading data...")
filename = os.path.join(
self.datadir, self.filename)
df = pd.read_csv(filename, sep='\t', header=None)
df.columns = ['uid', 'timestamp', 'lat', 'lon', 'venue_id']
return df
def _parseData(self, df):
print("parsing data...")
df['ts'] = df['timestamp'].apply(lambda x: self._parseDatetime(x))
df = df.drop('timestamp', axis=1, errors='ignore')
df['date'] = df['ts'].astype(object).apply(lambda x: x.date())
df['time'] = df['ts'].astype(object).apply(lambda x: x.time())
return df
def _filterData(self, df):
print("filtering data...")
df = df[(df['lon'] > DataProcessor.lon_min) & (df['lon'] < DataProcessor.lon_max) &
(df['lat'] > DataProcessor.lat_min) & (df['lat'] < DataProcessor.lat_max)].reset_index(drop=True)
return df
def saveData(self, df, output_file):
print("saving data...")
output_name = './data/' + output_file
with open(output_name, 'w+') as f:
for line in df.values:
f.write((str(line[0]) + '\t'+str(line[1]) + '\n'))
def _get_map(self, x, y, z, size, filename):
static_map = "https://maps.googleapis.com/maps/api/staticmap?center=" + str(x) + "," + str(y) + "&zoom=" + str(z) + \
"&size=" + str(size) + "x" + str(size) +\
"&markers=color:red%7Clabel:C%7C" + str(x) + "," + str(y) + "&maptype=roadmap&key=" + \
DataProcessor.key
print(static_map)
static_map = static_map.format(x, y, z, size)
static_map_filename, headers = urllib.request.urlretrieve(
static_map, filename)
return static_map_filename
def geomap(self, data, df, zoom=13, point_size=3, point_color='r', point_alpha=1):
#corrections to match geo with static map
z = zoom
picsize = 1000
wx = 1.0*360*(picsize/256)/(2**z)
wy = 0.76*360*(picsize/256)/(2**z)
#center of manhattan
y = 18.0649 # lon 18.0847
x = 59.33258 # lat 59.3874
x_min, x_max = x-wx/2, x+wx/2
y_min, y_max = y-wy/2, y+wy/2
static_map_filename = os.path.join(
self.datadir, 'Stockholm_staticmap_{}_{}.png'.format(x, y, z, picsize))
if os.path.isfile(static_map_filename) == False:
self._get_map(x, y, z, picsize, static_map_filename)
img = Image.open(static_map_filename)
#add the static map
plt.imshow(img, zorder=0, extent=[
x_min, x_max, y_min, y_max], interpolation='none', aspect='auto')
#add the scatter plot of events
plt.plot(
data['lat'],
data['lon'],
'.',
markerfacecolor=point_color,
markeredgecolor='k',
markersize=point_size,
alpha=point_alpha)
#limit the plot to the given box
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
from scipy.spatial import Voronoi
def voronoi_polygons_2d(self, vor, radius=None):
"""
Reconstruct infinite voronoi regions in a 2D diagram to finite
regions.
Input_args:
vor : Voronoi
Input diagram
radius : float, optional
Distance to 'points at infinity'.
:returns:
regions : list of tuples
Indices of vertices in each revised Voronoi regions.
vertices : list of tuples
Coordinates for revised Voronoi vertices. Same as coordinates
of input vertices, with 'points at infinity' appended to the
end.
"""
if vor.points.shape[1] != 2:
raise ValueError("Requires 2D input")
new_regions = []
new_vertices = vor.vertices.tolist()
center = vor.points.mean(axis=0)
if radius is None:
radius = vor.points.ptp().max()*2
# Construct a map containing all ridges for a given point
all_ridges = {}
for (p1, p2), (v1, v2) in zip(vor.ridge_points, vor.ridge_vertices):
all_ridges.setdefault(p1, []).append((p2, v1, v2))
all_ridges.setdefault(p2, []).append((p1, v1, v2))
# Reconstruct infinite regions
for p1, region in enumerate(vor.point_region):
vertices = vor.regions[region]
if all([v >= 0 for v in vertices]):
# finite region
new_regions.append(vertices)
continue
# reconstruct a non-finite region
ridges = all_ridges[p1]
new_region = [v for v in vertices if v >= 0]
for p2, v1, v2 in ridges:
if v2 < 0:
v1, v2 = v2, v1
if v1 >= 0:
# finite ridge: already in the region
continue
# Compute the missing endpoint of an infinite ridge
t = vor.points[p2] - vor.points[p1] # tangent
t /= np.linalg.norm(t)
n = np.array([-t[1], t[0]]) # normal
midpoint = vor.points[[p1, p2]].mean(axis=0)
direction = np.sign(np.dot(midpoint - center, n)) * n
far_point = vor.vertices[v2] + direction * radius
new_region.append(len(new_vertices))
new_vertices.append(far_point.tolist())
# sort region counterclockwise
vs = np.asarray([new_vertices[v] for v in new_region])
c = vs.mean(axis=0)
angles = np.arctan2(vs[:, 1] - c[1], vs[:, 0] - c[0])
new_region = np.array(new_region)[np.argsort(angles)]
# finish
new_regions.append(new_region.tolist())
return new_regions, np.asarray(new_vertices)
def presentData(self, center, df):
cluster = center
points = center
# compute Voronoi tesselation
vor = Voronoi(points)
# compute regions
regions, vertices = self.voronoi_polygons_2d(vor)
# prepare figure
plt.style.use('seaborn-white')
fig = plt.figure()
fig.set_size_inches(20, 20)
#geomap
self.geomap(df, df, 13, 2, 'k', 0.1)
# centroids
plt.plot(points[:, 0], points[:, 1], 'wo', markersize=10)
# colorize
for region in regions:
polygon = vertices[region]
plt.fill(*zip(*polygon), alpha=0.4)
plt.show()
if __name__ == '__main__':
dataprocessor = DataProcessor(
'/Users/wangyifan/Google Drive/checkin', 'loc-gowalla_totalCheckins.txt')
#indexer.build_index('../../reuters/training')
# start = time.time()
df = dataprocessor._loadDate()
df = dataprocessor._filterData(df)
df = dataprocessor._parseData(df)
df = df[['lat', 'lon']]
dataprocessor.saveData(df)
# plt.style.use('seaborn-white')
# fig = plt.figure()
# fig.set_size_inches(20, 20)
# dataprocessor.geomap(df[['lat', 'lon']], df=df)
# plt.show()
| 32.971311 | 125 | 0.561715 | 1,015 | 8,045 | 4.336946 | 0.305419 | 0.026579 | 0.027033 | 0.005452 | 0.071331 | 0.05134 | 0.038164 | 0.038164 | 0.024989 | 0.024989 | 0 | 0.027703 | 0.309012 | 8,045 | 243 | 126 | 33.106996 | 0.764166 | 0.154133 | 0 | 0.047297 | 0 | 0 | 0.071042 | 0.020018 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074324 | false | 0 | 0.074324 | 0 | 0.209459 | 0.033784 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8b957c63bcdeb165a905475803eeee1e1f5da46 | 1,871 | py | Python | hypergan/train_hooks/initialize_as_autoencoder.py | stone3311/HyperGAN | eed4869688ac4273a661cbbd03db0ec1dad89049 | [
"MIT"
] | null | null | null | hypergan/train_hooks/initialize_as_autoencoder.py | stone3311/HyperGAN | eed4869688ac4273a661cbbd03db0ec1dad89049 | [
"MIT"
] | null | null | null | hypergan/train_hooks/initialize_as_autoencoder.py | stone3311/HyperGAN | eed4869688ac4273a661cbbd03db0ec1dad89049 | [
"MIT"
] | null | null | null | import hyperchamber as hc
import numpy as np
import inspect
from hypergan.gan_component import ValidationException, GANComponent
from torch.autograd import grad as torch_grad
from operator import itemgetter
from hypergan.train_hooks.base_train_hook import BaseTrainHook
class InitializeAsAutoencoder(BaseTrainHook):
"""
G becomes an autoencoder on step zero.
"""
def __init__(self, gan=None, config=None, trainer=None):
super().__init__(config=config, gan=gan, trainer=trainer)
def before_step(self, step, feed_dict, depth=0):
if self.gan.steps != 1:
return
defn = self.config.encoder.copy()
klass = GANComponent.lookup_function(None, defn['class'])
encode = klass(self.gan, defn).cuda()
defn = self.config.optimizer.copy()
klass = GANComponent.lookup_function(None, defn['class'])
del defn["class"]
self.optimizer = klass(list(encode.parameters()) + list(self.gan.generator.parameters()), **defn)
for i in range(self.config.steps or 1000):
self.optimizer.zero_grad()
inp = self.gan.inputs.next()
e = encode(inp)
fake = self.gan.generator(e)
loss = ((inp - fake)**2).mean()
loss.backward()
for p in (list(self.gan.g_parameters())+list(encode.parameters())):
p.requires_grad = True
#move = torch_grad(outputs=loss, inputs=list(self.gan.g_parameters())+list(encode.parameters()), retain_graph=True, create_graph=True)
#print(list(self.gan.g_parameters())+list(encode.parameters()))
self.optimizer.step()
if self.config.verbose:
print("[autoencode]", i, "loss", loss.item())
if self.config.info and (i % 100) == 0:
print("[autoencode]", i, "loss", loss.item())
| 41.577778 | 146 | 0.628541 | 229 | 1,871 | 5.026201 | 0.39738 | 0.054735 | 0.069505 | 0.031277 | 0.241529 | 0.241529 | 0.192876 | 0.192876 | 0 | 0 | 0 | 0.007752 | 0.241582 | 1,871 | 44 | 147 | 42.522727 | 0.803383 | 0.125067 | 0 | 0.117647 | 0 | 0 | 0.029084 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.205882 | 0 | 0.323529 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8ba04efa5fe331b32b0cf1d3ce220c04594f5e7 | 969 | py | Python | court_scraper/platforms/oscn/runner.py | gitter-badger/court-scraper | f89b86d07d39d99c8c123717227fbc773d4933c3 | [
"0BSD"
] | null | null | null | court_scraper/platforms/oscn/runner.py | gitter-badger/court-scraper | f89b86d07d39d99c8c123717227fbc773d4933c3 | [
"0BSD"
] | null | null | null | court_scraper/platforms/oscn/runner.py | gitter-badger/court-scraper | f89b86d07d39d99c8c123717227fbc773d4933c3 | [
"0BSD"
] | null | null | null | import logging
from court_scraper.base.runner import BaseRunner
from .site import Site
logger = logging.getLogger(__name__)
class Runner(BaseRunner):
"""
Facade class to simplify invocation and usage of scrapers.
Arguments:
- cache_dir -- Path to cache directory for scraped file artifacts (default: {})
- config_path -- Path to location of config file
- place_id -- Scraper ID made up of state and county (e.g. ga_dekalb)
"""
def search(self, case_numbers =[], **kwargs):
"""
For a given scraper, executes the search, acquisition
and processing of case info.
Keyword arguments:
- case_numbers - List of case numbers
Returns: List of dicts containing case metadata
"""
site = Site(self.place_id)
logger.info(
"Executing search for {}".format(self.place_id)
)
data = site.search(case_numbers=case_numbers)
return data
| 24.846154 | 83 | 0.646027 | 120 | 969 | 5.091667 | 0.55 | 0.090016 | 0.036007 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.272446 | 969 | 38 | 84 | 25.5 | 0.866667 | 0.475748 | 0 | 0 | 0 | 0 | 0.054762 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.25 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8baa63ff92cd7eb9e74c05643e666d168858048 | 5,834 | py | Python | cups-backend/quotator.py | acristoffers/Quotator | 321aec83b0b455872033f5936502a1d7895d641a | [
"MIT"
] | 1 | 2019-04-24T21:12:32.000Z | 2019-04-24T21:12:32.000Z | cups-backend/quotator.py | acristoffers/Quotator | 321aec83b0b455872033f5936502a1d7895d641a | [
"MIT"
] | null | null | null | cups-backend/quotator.py | acristoffers/Quotator | 321aec83b0b455872033f5936502a1d7895d641a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8; -*-
#
# Copyright (c) 2019 Álan Crístoffer
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import re
import subprocess
import sys
import time
from pymongo import MongoClient
client = MongoClient()
db = client.quotator
def log(msg, severity='error'):
db.log.insert_one({'message': msg, 'severity': severity})
def list_exec_backends():
cwd = os.path.dirname(__file__)
cwf = os.path.basename(__file__)
bs = (b for b in os.listdir(cwd) if os.access(b, os.X_OK))
blacklist = [cwf, 'ibquota', 'ibquota2', 'ibquota3']
return [os.path.join(cwd, b) for b in bs if b not in blacklist]
def wrap_backend(raw_output):
cwf = os.path.basename(__file__)
r = '(\\S+)\\s(\\S+)\\s"([^"]+)"\\s"([^"]+)"\\s?("[^"]+")?\\s?("[^"]+")?'
r = re.compile(r)
m = r.match(raw_output)
a = [g for g in m.groups() if g]
a.insert(1, cwf)
a = tuple(a)
if len(a) == 5:
device = '%s %s:%s "%s" "%s (Quotator)"' % a
elif len(a) == 6:
device = '%s %s:%s "%s" "%s (Quotator)" %s' % a
elif len(a) == 7:
device = '%s %s:%s "%s" "%s (Quotator)" %s %s' % a
else:
return None
log('Adding: %s' % device, severity='info')
return device
def run(process, args=[]):
p = subprocess.PIPE
subp = subprocess.Popen([process, *args], stdin=p, stderr=p, stdout=p)
out, _ = subp.communicate()
return out.decode('utf8')
def list_wrapped():
bs = list_exec_backends()
rs = (o for b in bs for o in run(b).split('\n') if o)
return (w for w in map(wrap_backend, rs) if w)
def auth_mongodb(username, page_count, printer):
user = db.users.find_one(
{'username': {'$regex': username, '$options': 'i'}
})
if not user:
return False
ps = list(
db.polices.find({
'groups': {
'$in': user['groups']
},
'printers': printer
}))
if any([p['ifty_quota'] for p in ps]):
return True
qs = list(
db.quotas.find({
'user': str(user['_id']),
'policy': {
'$in': [str(p['_id']) for p in ps]
}
}))
for q in qs:
if q['quantity'] >= page_count:
db.quotas.update_one(
{'_id': q['_id']},
{'$set': {
'quantity': q['quantity'] - page_count
}})
return True
return False
def count_pages(filename):
ret = run('/usr/bin/pkpgcounter', [filename])
return int(ret.strip())
def main(argv):
cwd = os.path.dirname(__file__)
cwf = os.path.basename(__file__)
if len(argv) == 1:
# discovery mode
out = open(1, 'w', encoding='utf-8', closefd=False) # fd 1 is stdout
for p in db.printers.find():
print(p['str'], file=out, flush=True)
elif '--gen' in argv:
# find devices and populate database
db.printers.drop()
db.printers.insert_many([{'str': p} for p in list_wrapped()])
elif len(argv) in [6, 7]:
# check user quota and print if ok
next_backend = os.environ['DEVICE_URI'].replace(cwf + ':', '')
os.environ['DEVICE_URI'] = next_backend
_, jobid, cupsuser, jobtitle, jobcopies, joboptions, *jobfile = argv
if jobfile:
jobfile = jobfile[0]
else:
jobfile = os.environ['TMPDIR'] + '/job' + jobid
data = sys.stdin.buffer.read()
if not data:
return 0
with open(jobfile, 'wb+') as f:
f.write(data)
f.flush()
pages = int(count_pages(jobfile) / int(jobcopies))
printer = os.environ['PRINTER']
if auth_mongodb(cupsuser, pages * int(jobcopies), printer):
cmd = os.path.join(cwd, next_backend.split(':')[0])
args = jobid, cupsuser, jobtitle, '1', joboptions, jobfile
db.jobs.insert_one({
'status': 'sucess',
'user': cupsuser,
'job': jobid,
'title': jobtitle,
'copies': jobcopies,
'pages': pages,
'env': os.environ,
'time': time.time()
})
run(cmd, args)
return 0
else:
db.jobs.insert_one({
'status': 'fail',
'user': cupsuser,
'job': jobid,
'title': jobtitle,
'copies': jobcopies,
'pages': pages,
'env': os.environ,
'time': time.time()
})
return 5
else:
log('Wrong number of arguments: ' + len(argv))
return 1
return 0
if __name__ == '__main__':
exit(main(sys.argv))
| 31.706522 | 79 | 0.553822 | 750 | 5,834 | 4.222667 | 0.356 | 0.011999 | 0.013262 | 0.01263 | 0.118409 | 0.098516 | 0.096306 | 0.090306 | 0.077676 | 0.077676 | 0 | 0.00667 | 0.306136 | 5,834 | 183 | 80 | 31.879781 | 0.775692 | 0.205348 | 0 | 0.272059 | 0 | 0 | 0.115401 | 0.014534 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.044118 | 0 | 0.213235 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8bb6a4a3c792f17da077a9c4491f855ddf2a5a0 | 1,470 | py | Python | projects/gnomad/data/export_aggregate_quality_metrics_to_es.py | monkollek/mito_browser | 01bd0326bcb17357232cf47098f63f6ce549dbeb | [
"MIT"
] | 2 | 2019-07-18T18:12:16.000Z | 2019-07-25T15:23:09.000Z | projects/gnomad/data/export_aggregate_quality_metrics_to_es.py | monkollek/mito_browser | 01bd0326bcb17357232cf47098f63f6ce549dbeb | [
"MIT"
] | 25 | 2020-10-21T21:16:24.000Z | 2022-02-28T04:09:52.000Z | projects/gnomad/data/export_aggregate_quality_metrics_to_es.py | monkollek/mito_browser | 01bd0326bcb17357232cf47098f63f6ce549dbeb | [
"MIT"
] | 2 | 2019-08-04T14:13:32.000Z | 2020-01-22T13:39:02.000Z | import argparse
import json
import elasticsearch
p = argparse.ArgumentParser()
p.add_argument("--metrics-file", help="Path to metrics JSON file", required=True, type=argparse.FileType("r"))
p.add_argument("--tag", help="Tag metrics with the dataset they belong to", required=True)
p.add_argument("--host", help="Elasticsearch host or IP", required=True)
p.add_argument("--port", help="Elasticsearch port", default=9200, type=int)
p.add_argument("--index-name", help="Elasticsearch index name", required=True)
args = p.parse_args()
es = elasticsearch.Elasticsearch(args.host, port=args.port)
if not es.indices.exists(index=args.index_name):
mapping = {
"mappings": {
"metric": {
"_all": {"enabled": "false"},
"properties": {
"bin_edges": {"type": "double"},
"bin_freq": {"type": "double"},
"n_smaller": {"type": "integer"},
"n_larger": {"type": "integer"},
"metric": {"type": "keyword"},
"tag": {"type": "keyword"},
},
}
}
}
es.indices.create(index=args.index_name, body=mapping)
metrics = json.loads(args.metrics_file.read())
for metric in metrics:
metric["tag"] = args.tag
bulk_request = "\n".join([f"{{\"index\": {{}}}}\n{json.dumps(metric)}" for metric in metrics])
es.bulk(body=bulk_request, index=args.index_name, doc_type="metric")
| 35 | 110 | 0.591156 | 174 | 1,470 | 4.890805 | 0.390805 | 0.023502 | 0.070505 | 0.063455 | 0.056404 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003546 | 0.232653 | 1,470 | 41 | 111 | 35.853659 | 0.750887 | 0 | 0 | 0 | 0 | 0 | 0.25102 | 0.019048 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.090909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8bca3ca8ce9ce0b9466fae7ebad5691440fa319 | 7,188 | py | Python | src/means/approximation/mea/closure_scalar.py | nicktimko/means | fe164916a1d84ab2a4fa039871d38ccdf638b1db | [
"MIT"
] | 10 | 2016-05-25T08:28:39.000Z | 2020-06-04T03:19:50.000Z | src/means/approximation/mea/closure_scalar.py | nicktimko/means | fe164916a1d84ab2a4fa039871d38ccdf638b1db | [
"MIT"
] | 5 | 2015-12-08T14:01:15.000Z | 2020-01-10T22:42:18.000Z | src/means/approximation/mea/closure_scalar.py | nicktimko/means | fe164916a1d84ab2a4fa039871d38ccdf638b1db | [
"MIT"
] | 6 | 2015-12-10T17:24:11.000Z | 2021-03-22T16:12:17.000Z | """
Scalar moment closure
------
This part of the package provides the original
(and default) closure :class:`~means.approximation.mea.closure_scalar.ScalarClosure`
as well as the base class for all closers.
"""
import sympy as sp
from means.util.sympyhelpers import substitute_all
class ClosureBase(object):
"""
A virtual class for closure methods. An implementation of `_compute_raw_moments()`
must be provided in subclasses.
"""
_max_order = None
_min_order = 1
def __init__(self,max_order, multivariate=True):
"""
:param max_order: the maximal order of moments to be modelled.
:type max_order: `int`
:param multivariate: whether to consider covariances
:return:
"""
self._max_order = max_order
self.__is_multivariate = multivariate
if self._min_order > max_order:
raise ValueError("This closure method requires `max_order` to be >= {0}".format(self._min_order))
@property
def is_multivariate(self):
return self.__is_multivariate
@property
def max_order(self):
return self._max_order
def _compute_raw_moments(self, n_counter, k_counter):
raise NotImplementedError("ParametricCloser is an abstract class.\
`compute_closed_raw_moments()` is not implemented. ")
def _compute_closed_central_moments(self, central_from_raw_exprs, n_counter, k_counter):
r"""
Computes parametric expressions (e.g. in terms of mean, variance, covariances) for all central moments
up to max_order + 1 order.
:param central_from_raw_exprs: the expression of central moments in terms of raw moments
:param n_counter: a list of :class:`~means.core.descriptors.Moment`\s representing central moments
:type n_counter: list[:class:`~means.core.descriptors.Moment`]
:param k_counter: a list of :class:`~means.core.descriptors.Moment`\s representing raw moments
:type k_counter: list[:class:`~means.core.descriptors.Moment`]
:return: the central moments where raw moments have been replaced by parametric expressions
:rtype: `sympy.Matrix`
"""
closed_raw_moments = self._compute_raw_moments(n_counter, k_counter)
assert(len(central_from_raw_exprs) == len(closed_raw_moments))
# raw moment lef hand side symbol
raw_symbols = [raw.symbol for raw in k_counter if raw.order > 1]
# we want to replace raw moments symbols with closed raw moment expressions (in terms of variances/means)
substitution_pairs = zip(raw_symbols, closed_raw_moments)
# so we can obtain expression of central moments in terms of low order raw moments
closed_central_moments = substitute_all(central_from_raw_exprs, substitution_pairs)
return closed_central_moments
def close(self, mfk, central_from_raw_exprs, n_counter, k_counter):
"""
In MFK, replaces symbol for high order (order == max_order+1) by parametric expressions.
That is expressions depending on lower order moments such as means, variances, covariances and so on.
:param mfk: the right hand side equations containing symbols for high order central moments
:param central_from_raw_exprs: expressions of central moments in terms of raw moments
:param n_counter: a list of :class:`~means.core.descriptors.Moment`\s representing central moments
:type n_counter: list[:class:`~means.core.descriptors.Moment`]
:param k_counter: a list of :class:`~means.core.descriptors.Moment`\s representing raw moments
:type k_counter: list[:class:`~means.core.descriptors.Moment`]
:return: the modified MFK
:rtype: `sympy.Matrix`
"""
# we obtain expressions for central moments in terms of variances/covariances
closed_central_moments = self._compute_closed_central_moments(central_from_raw_exprs, n_counter, k_counter)
# set mixed central moment to zero iff univariate
closed_central_moments = self._set_mixed_moments_to_zero(closed_central_moments, n_counter)
# retrieve central moments from problem moment. Typically, :math: `[yx2, yx3, ...,yxN]`.
# now we want to replace the new mfk (i.e. without highest order moment) any
# symbol for highest order central moment by the corresponding expression (computed above)
positive_n_counter = [n for n in n_counter if n.order > 0]
substitutions_pairs = [(n.symbol, ccm) for n,ccm in
zip(positive_n_counter, closed_central_moments) if n.order > self.max_order]
new_mfk = substitute_all(mfk, substitutions_pairs)
return new_mfk
def _set_mixed_moments_to_zero(self, closed_central_moments, n_counter):
r"""
In univariate case, set the cross-terms to 0.
:param closed_central_moments: matrix of closed central moment
:param n_counter: a list of :class:`~means.core.descriptors.Moment`\s representing central moments
:type n_counter: list[:class:`~means.core.descriptors.Moment`]
:return: a matrix of new closed central moments with cross-terms equal to 0
"""
positive_n_counter = [n for n in n_counter if n.order > 1]
if self.is_multivariate:
return closed_central_moments
else:
return [0 if n.is_mixed else ccm for n,ccm in zip(positive_n_counter, closed_central_moments)]
class ScalarClosure(ClosureBase):
"""
A class providing scalar closure to
:class:`~means.approximation.mea.moment_expansion_approximation.MomentExpansionApproximation`.
Expression for higher order (max_order + 1) central moments are set to a scalar.
Typically, higher order central moments are replaced by zero.
"""
def __init__(self,max_order,value=0):
"""
:param max_order: the maximal order of moments to be modelled.
:type max_order: `int`
:param value: a scalar value for higher order moments
"""
super(ScalarClosure, self).__init__(max_order, False)
self.__value = value
@property
def value(self):
return self.__value
def _compute_closed_central_moments(self, central_from_raw_exprs, n_counter, k_counter):
r"""
Replace raw moment terms in central moment expressions by parameters (e.g. mean, variance, covariances)
:param central_from_raw_exprs: the expression of central moments in terms of raw moments
:param n_counter: a list of :class:`~means.core.descriptors.Moment`\s representing central moments
:type n_counter: list[:class:`~means.core.descriptors.Moment`]
:param k_counter: a list of :class:`~means.core.descriptors.Moment`\s representing raw moments
:type k_counter: list[:class:`~means.core.descriptors.Moment`]
:return: the central moments where raw moments have been replaced by parametric expressions
:rtype: `sympy.Matrix`
"""
closed_central_moments = sp.Matrix([sp.Integer(self.__value)] * len(central_from_raw_exprs))
return closed_central_moments | 45.783439 | 115 | 0.698525 | 959 | 7,188 | 5.026069 | 0.18561 | 0.092946 | 0.06639 | 0.072614 | 0.424274 | 0.377178 | 0.377178 | 0.369917 | 0.354149 | 0.354149 | 0 | 0.002506 | 0.222871 | 7,188 | 157 | 116 | 45.783439 | 0.860365 | 0.535058 | 0 | 0.150943 | 0 | 0 | 0.018409 | 0 | 0 | 0 | 0 | 0 | 0.018868 | 1 | 0.188679 | false | 0 | 0.037736 | 0.056604 | 0.45283 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8bf7be4d53448dcf5c8347f0f0550006c4ab666 | 1,597 | py | Python | script/markdown_gen.py | brandonganem/cocktail_guide | ebaab565683e927b8fbdaa9d7b89a19e964c08f8 | [
"MIT"
] | null | null | null | script/markdown_gen.py | brandonganem/cocktail_guide | ebaab565683e927b8fbdaa9d7b89a19e964c08f8 | [
"MIT"
] | null | null | null | script/markdown_gen.py | brandonganem/cocktail_guide | ebaab565683e927b8fbdaa9d7b89a19e964c08f8 | [
"MIT"
] | null | null | null | import yaml
import os
import re
import glob
from jinja2 import Environment, FileSystemLoader
def _short_desc(style):
if re.match('(?i)^[a,e,i,o,u]', style):
return f"_Style_: _{style}_"
return f"_Style_: {style}"
def _generate_ingredients(ingredients):
return_val = []
for ingredient in ingredients:
return_val.append({"name": ingredient, "pour": ingredients[ingredient]})
return return_val
def load_cocktails(path):
cocktail_yamls = glob.iglob(f'{path}/**/*.yaml', recursive=True)
cocktails = []
for cocktail in cocktail_yamls:
with open(cocktail, 'r') as f:
try:
cocktail_dict = yaml.safe_load(f)
cocktail_dict['cocktail_desc_short'] = _short_desc(cocktail_dict['Style'])
cocktail_dict['Ingredients'] =_generate_ingredients(cocktail_dict['Ingredients'])
cocktails.append(cocktail_dict)
except yaml.YAMLError as exc:
print(exc)
raise("Unable to load cocktail.")
return cocktails
def main():
j2_env = Environment(loader=FileSystemLoader('.'), trim_blocks=True)
template = j2_env.get_template('script/cocktail.jinja2')
if not os.path.exists('./pages'):
os.makedirs('./pages')
if not os.path.exists('./pages/cocktails'):
os.makedirs('./pages/cocktails')
for cocktail in load_cocktails("./cocktails"):
out = template.render(cocktail)
with open(f"./pages/cocktails/{cocktail['Name']}.md", "w") as f:
f.write(out)
if __name__ == "__main__":
main()
| 33.270833 | 97 | 0.634941 | 190 | 1,597 | 5.115789 | 0.373684 | 0.074074 | 0.024691 | 0.034979 | 0.085391 | 0.045267 | 0 | 0 | 0 | 0 | 0 | 0.003252 | 0.229806 | 1,597 | 47 | 98 | 33.978723 | 0.786992 | 0 | 0 | 0 | 0 | 0 | 0.172198 | 0.038197 | 0 | 0 | 0 | 0 | 0 | 1 | 0.097561 | false | 0 | 0.121951 | 0 | 0.317073 | 0.02439 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8bff39681e8d7e98403641f4eb1ca435ca8fcb5 | 1,146 | py | Python | app/gws/gis/ows/formats/geobak.py | ewie/gbd-websuite | 6f2814c7bb64d11cb5a0deec712df751718fb3e1 | [
"Apache-2.0"
] | null | null | null | app/gws/gis/ows/formats/geobak.py | ewie/gbd-websuite | 6f2814c7bb64d11cb5a0deec712df751718fb3e1 | [
"Apache-2.0"
] | null | null | null | app/gws/gis/ows/formats/geobak.py | ewie/gbd-websuite | 6f2814c7bb64d11cb5a0deec712df751718fb3e1 | [
"Apache-2.0"
] | null | null | null | import re
import gws.tools.xml2
import gws.gis.shape
import gws.gis.feature
# GeoBAK (https://www.egovernment.sachsen.de/geodaten.html)
#
# <geobak_20:Sachdatenabfrage...
# <geobak_20:Kartenebene>....
# <geobak_20:Inhalt>
# <geobak_20:Datensatz>
# <geobak_20:Attribut>
# <geobak_20:Name>...
# <geobak_20:Wert>...
# <geobak_20:Inhalt>
# <geobak_20:Datensatz>
# ...
#
def parse(text, first_el, crs=None, invert_axis=None, **kwargs):
if 'geobak_20' not in first_el.namespaces:
return None
# some services have bare &'s in xml
text = re.sub(r'&(?![#\w])', '', text)
el = gws.tools.xml2.from_string(text)
fs = []
layer_name = el.get_text('Kartenebene')
for content in el.all('Inhalt'):
for ds in content.all('Datensatz'):
atts = {
a.get_text('Name').strip(): a.get_text('Wert').strip()
for a in ds.all('Attribut')
}
fs.append(gws.gis.feature.Feature(
category=layer_name,
attributes=atts
))
return fs
| 25.466667 | 70 | 0.552356 | 139 | 1,146 | 4.417266 | 0.453237 | 0.130293 | 0.039088 | 0.065147 | 0.100977 | 0.100977 | 0 | 0 | 0 | 0 | 0 | 0.027363 | 0.298429 | 1,146 | 44 | 71 | 26.045455 | 0.736318 | 0.331588 | 0 | 0 | 0 | 0 | 0.081225 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.181818 | 0 | 0.318182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8c265440493d2090b1e3ae798a8c1615380e129 | 1,752 | py | Python | imix/data/reader/feature_reader/pth_feature_reader.py | linxi1158/iMIX | af87a17275f02c94932bb2e29f132a84db812002 | [
"Apache-2.0"
] | 23 | 2021-06-26T08:45:19.000Z | 2022-03-02T02:13:33.000Z | imix/data/reader/feature_reader/pth_feature_reader.py | XChuanLee/iMIX | 99898de97ef8b45462ca1d6bf2542e423a73d769 | [
"Apache-2.0"
] | null | null | null | imix/data/reader/feature_reader/pth_feature_reader.py | XChuanLee/iMIX | 99898de97ef8b45462ca1d6bf2542e423a73d769 | [
"Apache-2.0"
] | 9 | 2021-06-10T02:36:20.000Z | 2021-11-09T02:18:16.000Z | from pathlib import Path
from .feature_reader import FEATURE_READERS, FeatureReader
import torch
from typing import Dict
@FEATURE_READERS.register_module()
class PthFeatureReader(FeatureReader):
def __init__(self, dataset_type, feat_path=None, max_features=None):
super().__init__(dataset_type, feat_path=feat_path, max_features=max_features)
self.img_features = self._load_img_features()
@staticmethod
def file_name_to_path(file_dir: str, file_format: str) -> Dict:
img_feats = {}
feat_files = Path(file_dir).glob(pattern=file_format)
for file in feat_files:
file_name = str(file.name).split('.')[0]
img_feats[file_name] = str(file)
return img_feats
def _load_img_features(self):
assert Path(self.feat_path).is_dir()
img_features = self.file_name_to_path(file_dir=self.feat_path, file_format='*.pt')
if len(img_features) == 0:
img_features = self.file_name_to_path(file_dir=self.feat_path, file_format='*.pth')
self.logger.info(f'loading {self.feat_path} features {len(img_features)}')
return img_features
def read(self, img_annotation):
return self._get_img_feat(img_annotation)
def _get_img_feat(self, img_annotation):
if self.dataset_type == 'VQAReader':
key = Path(img_annotation['feature_path']).name.split('.')[0]
img_feat_path = self.img_features.get(key)
img_feat = torch.load(img_feat_path)
return {'features': img_feat[0]}
else:
self.logger.error('Please add the image feature reader of the corresponding dataset type!')
raise TypeError(f'{self.dataset_type} is an unsupported data type')
| 39.818182 | 103 | 0.683219 | 240 | 1,752 | 4.65 | 0.295833 | 0.064516 | 0.053763 | 0.037634 | 0.12276 | 0.12276 | 0.103943 | 0.103943 | 0.103943 | 0.103943 | 0 | 0.002899 | 0.212329 | 1,752 | 43 | 104 | 40.744186 | 0.805797 | 0 | 0 | 0 | 0 | 0 | 0.119863 | 0 | 0 | 0 | 0 | 0 | 0.028571 | 1 | 0.142857 | false | 0 | 0.114286 | 0.028571 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8c3e5aeb8f6649db0b0c023b5c46781dd5f8dcd | 1,705 | py | Python | buienradar/buienradar.py | corneyl/python-buienradar | 34ca10f00ef1e3096fbd8d6c1c278409248891e1 | [
"MIT"
] | 9 | 2019-01-17T15:52:31.000Z | 2022-03-03T16:27:09.000Z | buienradar/buienradar.py | corneyl/python-buienradar | 34ca10f00ef1e3096fbd8d6c1c278409248891e1 | [
"MIT"
] | 14 | 2017-06-24T09:11:55.000Z | 2021-12-12T16:50:28.000Z | buienradar/buienradar.py | corneyl/python-buienradar | 34ca10f00ef1e3096fbd8d6c1c278409248891e1 | [
"MIT"
] | 9 | 2017-06-24T09:09:34.000Z | 2021-11-21T18:17:08.000Z | """Buienradar library to get parsed weather data from buienradar.nl."""
import logging
from buienradar.buienradar_json import get_json_data, parse_json_data
from buienradar.buienradar_xml import get_xml_data, parse_xml_data
from buienradar.constants import (
__BRCONDITIONS,
CONDCODE,
CONDITION,
DETAILED,
EXACT,
EXACTNL
)
log = logging.getLogger(__name__)
def get_data(latitude=52.091579, longitude=5.119734, usexml=False):
"""Get buienradar xml data and return results."""
if usexml:
log.info("Getting buienradar XML data for latitude=%s, longitude=%s",
latitude, longitude)
return get_xml_data(latitude, longitude)
else:
log.info("Getting buienradar JSON data for latitude=%s, longitude=%s",
latitude, longitude)
return get_json_data(latitude, longitude)
def parse_data(content, raincontent, latitude=52.091579,
longitude=5.119734, timeframe=60, usexml=False):
"""Parse the raw data and return as data dictionary."""
if usexml:
return parse_xml_data(content, raincontent,
latitude, longitude, timeframe)
else:
return parse_json_data(content, raincontent,
latitude, longitude, timeframe)
def condition_from_code(condcode):
"""Get the condition name from the condition code."""
if condcode in __BRCONDITIONS:
cond_data = __BRCONDITIONS[condcode]
return {CONDCODE: condcode,
CONDITION: cond_data[0],
DETAILED: cond_data[1],
EXACT: cond_data[2],
EXACTNL: cond_data[3],
}
return None
| 32.169811 | 78 | 0.648094 | 194 | 1,705 | 5.505155 | 0.298969 | 0.039326 | 0.050562 | 0.08427 | 0.247191 | 0.247191 | 0.097378 | 0.097378 | 0.097378 | 0.097378 | 0 | 0.028869 | 0.268622 | 1,705 | 52 | 79 | 32.788462 | 0.827586 | 0.121408 | 0 | 0.205128 | 0 | 0 | 0.077861 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.102564 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2109612959b5c254810ddf6761b95d6a6cf5ffa | 1,384 | py | Python | stashbox.py | stg-annon/stashapi | 490c849accdc0751682d53ebc0882966e4845fd2 | [
"MIT"
] | null | null | null | stashbox.py | stg-annon/stashapi | 490c849accdc0751682d53ebc0882966e4845fd2 | [
"MIT"
] | null | null | null | stashbox.py | stg-annon/stashapi | 490c849accdc0751682d53ebc0882966e4845fd2 | [
"MIT"
] | null | null | null | import sys
from requests.structures import CaseInsensitiveDict
from .classes import GQLWrapper
from . import gql_fragments
from . import log
class StashBoxInterface(GQLWrapper):
port = None
url = None
headers = {
"Accept-Encoding": "gzip, deflate",
"Content-Type": "application/json",
"Accept": "application/json",
"Connection": "keep-alive",
"DNT": "1"
}
cookies = {}
def __init__(self, conn={}, fragments={}):
global log
conn = CaseInsensitiveDict(conn)
log = conn.get("logger", log)
if not log:
raise Exception("No Logger Provided")
self.url = conn.get('endpoint', "https://stashdb.org/graphql")
api_key = conn.get('api_key', None)
if not api_key:
raise Exception("no api_key provided")
self.headers['ApiKey'] = api_key
try:
# test query to check connection
r = self._callGraphQL("query Me{me {name email}}")
log.info(f'Connected to "{self.url}" as {r["me"]["name"]} ({r["me"]["email"]})')
except Exception as e:
log.error(f"Could not connect to Stash-Box at {self.url}")
log.error(e)
sys.exit()
self.fragments = fragments
self.fragments.update(gql_fragments.STASHBOX)
def get_scene_last_updated(self, scene_id):
query = """query sceneLastUpdated($id: ID!) {
findScene(id: $id) {
updated
}
}"""
result = self._callGraphQL(query, {"id": scene_id})
return result["findScene"]["updated"]
| 23.457627 | 83 | 0.671965 | 183 | 1,384 | 4.983607 | 0.464481 | 0.032895 | 0.035088 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.000871 | 0.17052 | 1,384 | 58 | 84 | 23.862069 | 0.793554 | 0.021676 | 0 | 0 | 0 | 0.022727 | 0.315556 | 0.015556 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.113636 | 0 | 0.295455 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b21950752cfb38d46e531239bdd623acc9e9acdb | 7,158 | py | Python | main.py | Kazuhito00/FingerFrameLens | 7acfc263ead3af0b5aa4dae259e355a45769eeb2 | [
"MIT"
] | 6 | 2020-09-19T07:46:32.000Z | 2021-09-25T08:39:29.000Z | main.py | Kazuhito00/FingerFrameLens | 7acfc263ead3af0b5aa4dae259e355a45769eeb2 | [
"MIT"
] | 1 | 2021-04-27T23:28:07.000Z | 2021-04-30T02:28:20.000Z | main.py | Kazuhito00/FingerFrameLens | 7acfc263ead3af0b5aa4dae259e355a45769eeb2 | [
"MIT"
] | 2 | 2020-12-11T09:58:12.000Z | 2021-04-14T11:56:58.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
[summary]
FingerFrame Lens
[description]
-
"""
import argparse
import time
import copy
from collections import deque
import cv2 as cv
import numpy as np
import tensorflow as tf
from utils import CvFpsCalc
from gui.app_gui import AppGui
def get_args():
"""
[summary]
引数解析
Parameters
----------
None
"""
parser = argparse.ArgumentParser()
parser.add_argument("--device", type=int, default=0)
parser.add_argument("--fps", type=float, default=10.1)
parser.add_argument("--width", help='cap width', type=int, default=960)
parser.add_argument("--height", help='cap height', type=int, default=540)
parser.add_argument("--model", default='model/EfficientDetD0/saved_model')
parser.add_argument("--score_th", type=float, default=0.7)
parser.add_argument("--smaller_ratio", type=float, default=0.22)
args = parser.parse_args()
return args
def run_od_inference(inference_func, image):
"""
[summary]
物体検出推論(1枚)
Parameters
----------
inference_func : func
推論用関数
image : image
推論対象の画像
None
"""
image = image[:, :, [2, 1, 0]] # BGR2RGB
image = np.expand_dims(image, axis=0)
tensor = tf.convert_to_tensor(image)
output = inference_func(tensor)
output['num_detections'] = int(output['num_detections'][0])
output['detection_classes'] = output['detection_classes'][0].numpy()
output['detection_boxes'] = output['detection_boxes'][0].numpy()
output['detection_scores'] = output['detection_scores'][0].numpy()
return output
def calc_od_bbox(detection_result, score_th, smaller_ratio, frame_width,
frame_height):
"""
[summary]
物体検出結果からバウンディングボックスを算出
Parameters
----------
detection_result : dict
物体検出結果
score_th : float
物体検出スコア閾値
smaller_ratio : float
縮小割合
frame_width : int
画像幅
frame_height : int
画像高さ
None
"""
x1, y1, x2, y2 = None, None, None, None
num_detections = detection_result['num_detections']
for i in range(num_detections):
score = detection_result['detection_scores'][i]
bbox = detection_result['detection_boxes'][i]
if score < score_th:
continue
# 検出結果可視化 ###################################################
x1, y1 = int(bbox[1] * frame_width), int(bbox[0] * frame_height)
x2, y2 = int(bbox[3] * frame_width), int(bbox[2] * frame_height)
risize_ratio = smaller_ratio
bbox_width = x2 - x1
bbox_height = y2 - y1
x1 = x1 + int(bbox_width * risize_ratio)
y1 = y1 + int(bbox_height * risize_ratio)
x2 = x2 - int(bbox_width * risize_ratio)
y2 = y2 - int(bbox_height * risize_ratio)
break # 有効なバウンディングボックスの1つ目を利用
return x1, y1, x2, y2
def run_classify(model, image):
"""
[summary]
画像クラス分類
Parameters
----------
model : model
クラス分類用モデル
image : image
推論対象の画像
None
"""
inp = cv.resize(image, (224, 224))
inp = inp[:, :, [2, 1, 0]] # BGR2RGB
inp = np.expand_dims(inp, axis=0)
tensor = tf.convert_to_tensor(inp)
tensor = tf.keras.applications.efficientnet.preprocess_input(tensor)
classifications = model.predict(tensor)
classifications = tf.keras.applications.efficientnet.decode_predictions(
classifications,
top=5,
)
classifications = np.squeeze(classifications)
return classifications
def main():
"""
[summary]
main()
Parameters
----------
None
"""
# 引数解析 #################################################################
args = get_args()
cap_device = args.device
cap_width = args.width
cap_height = args.height
fps = args.fps
model_path = args.model
score_th = args.score_th
smaller_ratio = args.smaller_ratio
# GUI準備 #################################################################
app_gui = AppGui(window_name='FingerFrameLens')
# 初期設定
app_gui.set_score_threshold(score_th)
# カメラ準備 ###############################################################
cap = cv.VideoCapture(cap_device)
cap.set(cv.CAP_PROP_FRAME_WIDTH, cap_width)
cap.set(cv.CAP_PROP_FRAME_HEIGHT, cap_height)
# モデルロード #############################################################
# EfficientDet-D0
DEFAULT_FUNCTION_KEY = 'serving_default'
effdet_model = tf.saved_model.load(model_path)
inference_func = effdet_model.signatures[DEFAULT_FUNCTION_KEY]
# EfficientNet-B0
effnet_model = tf.keras.applications.EfficientNetB0(
include_top=True,
weights='imagenet',
input_shape=(224, 224, 3),
)
tensor = tf.convert_to_tensor(np.zeros((1, 224, 224, 3), np.uint8))
effnet_model.predict(tensor)
effnet_model.make_predict_function()
# FPS計測準備 ##############################################################
cvFpsCalc = CvFpsCalc(buffer_len=3)
cropping_image = None
classifications = None
while True:
start_time = time.time()
# GUI設定取得 #########################################################
score_th = app_gui.get_score_threshold()
# カメラキャプチャ #####################################################
ret, frame = cap.read()
if not ret:
continue
frame_width, frame_height = frame.shape[1], frame.shape[0]
debug_image = copy.deepcopy(frame)
# 物体検出実施 #########################################################
detections = run_od_inference(inference_func, frame)
x1, y1, x2, y2 = calc_od_bbox(
detections,
score_th,
smaller_ratio,
frame_width,
frame_height,
)
# cv.putText(debug_image, '{:.3f}'.format(score), (x1, y1 - 10),
# cv.FONT_HERSHEY_SIMPLEX, 0.65, (255, 255, 255), 2,
# cv.LINE_AA)
# cv.rectangle(debug_image, (x1, y1), (x2, y2), (255, 255, 255), 2)
# クラス分類実施 #######################################################
if x1 is not None and y1 is not None and \
x2 is not None and y2 is not None:
cropping_image = copy.deepcopy(frame[y1:y2, x1:x2])
classifications = run_classify(effnet_model, cropping_image)
# GUI描画更新 ##########################################################
fps_result = cvFpsCalc.get()
app_gui.update(
fps_result,
debug_image,
cropping_image,
classifications,
)
app_gui.show()
# キー入力(ESC:プログラム終了) #########################################
key = cv.waitKey(1)
if key == 27: # ESC
break
# FPS調整 #############################################################
elapsed_time = time.time() - start_time
sleep_time = max(0, ((1.0 / fps) - elapsed_time))
time.sleep(sleep_time)
cap.release()
cv.destroyAllWindows()
if __name__ == '__main__':
main()
| 27.85214 | 78 | 0.540933 | 763 | 7,158 | 4.862385 | 0.272608 | 0.016981 | 0.032075 | 0.008625 | 0.093531 | 0.047439 | 0.036658 | 0.021563 | 0 | 0 | 0 | 0.026257 | 0.24979 | 7,158 | 256 | 79 | 27.960938 | 0.664618 | 0.142498 | 0 | 0.046512 | 0 | 0 | 0.063573 | 0.00624 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03876 | false | 0 | 0.069767 | 0 | 0.139535 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b219620b1c35183221c0f3dc744e3b4d55398cdf | 649 | py | Python | restful/lib/annodb.py | always-waiting/Django-Xromate | 1fb5b4bbdfac9549622c5714971095325f201a96 | [
"MIT"
] | null | null | null | restful/lib/annodb.py | always-waiting/Django-Xromate | 1fb5b4bbdfac9549622c5714971095325f201a96 | [
"MIT"
] | null | null | null | restful/lib/annodb.py | always-waiting/Django-Xromate | 1fb5b4bbdfac9549622c5714971095325f201a96 | [
"MIT"
] | null | null | null | # coding: utf-8
from django.shortcuts import render, redirect
from django.core.urlresolvers import reverse
import logging
logger = logging.getLogger('django')
# Create api/annodb views here
def table_locs(request, table, locs):
if request.method == 'GET':
redirect_url = reverse("annodb_table_locs", kwargs={'table':table, 'locs':locs})
query = []
for get in request.GET:
query.append("=".join([get,request.GET[get]]))
#logger.info(get)
if query:
querystr = "&".join(query)
redirect_url = "?".join([redirect_url, querystr])
return redirect(redirect_url)
| 30.904762 | 88 | 0.636364 | 78 | 649 | 5.205128 | 0.474359 | 0.08867 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002004 | 0.231125 | 649 | 20 | 89 | 32.45 | 0.811623 | 0.089368 | 0 | 0 | 0 | 0 | 0.064736 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.214286 | 0 | 0.357143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b219a24eb35d369e099909f0355bbbea0278ee23 | 5,331 | py | Python | xtreme_vision/Segmentation/cdcl/human_seg/flood_fill.py | Adeel-Intizar/Xtreme-Vision | 2e09e6972c6b2752bc37f8356fafda151acacd0d | [
"MIT"
] | 81 | 2020-11-21T07:21:38.000Z | 2022-02-14T18:31:55.000Z | xtreme_vision/Segmentation/cdcl/human_seg/flood_fill.py | Adeel-Intizar/Xtreme-Vision | 2e09e6972c6b2752bc37f8356fafda151acacd0d | [
"MIT"
] | 10 | 2020-12-01T13:00:48.000Z | 2021-07-18T10:40:01.000Z | xtreme_vision/Segmentation/cdcl/human_seg/flood_fill.py | Adeel-Intizar/Xtreme-Vision | 2e09e6972c6b2752bc37f8356fafda151acacd0d | [
"MIT"
] | 23 | 2020-11-24T06:30:23.000Z | 2021-07-05T01:37:58.000Z | import numpy as np
import sys
sys.setrecursionlimit(100000)
def parse_keypoints(keypoints_str):
keypoints = keypoints_str.rstrip().split('\n')
keypoints = list(map(lambda x: list(map(lambda y: list(map(lambda z: int(float(z)), y.rstrip().split(','))), x.rstrip().split(' '))), keypoints))
return keypoints
def check_depth(coord, coord_prev, depth_map, depth_diff_thres):
if np.abs(float(depth_map[tuple(coord)]) - float(depth_map[tuple(coord_prev)])) < depth_diff_thres:
return True
return False
def check_keypoint_idx(coord, keypoint_idx, seg_map):
# no need to land on correct parts
if np.min(seg_map[tuple(coord)] == np.array([0, 0, 0])):
return False
return True
if keypoint_idx in [0, 14, 15, 16, 17] and np.min(seg_map[tuple(coord)] == np.array([127, 127, 127])):
return True
if keypoint_idx in [1] and (np.min(seg_map[tuple(coord)] == np.array([127, 127, 127])) or np.min(seg_map[tuple(coord)] == np.array([0, 0, 255]))):
return True
if keypoint_idx in [2, 5] and (np.min(seg_map[tuple(coord)] == np.array([0, 255, 255])) or np.min(seg_map[tuple(coord)] == np.array([0, 0, 255])) or np.min(seg_map[tuple(coord)] == np.array([0, 127, 255]))):
return True
if keypoint_idx in [3, 6] and (np.min(seg_map[tuple(coord)] == np.array([0, 255, 255])) or np.min(seg_map[tuple(coord)] == np.array([255, 0, 255])) or np.min(seg_map[tuple(coord)] == np.array([0, 127, 255])) or np.min(seg_map[tuple(coord)] == np.array([255, 0, 127]))):
return True
if keypoint_idx in [4, 7] and (np.min(seg_map[tuple(coord)] == np.array([255, 255, 0])) or np.min(seg_map[tuple(coord)] == np.array([255, 0, 255])) or np.min(seg_map[tuple(coord)] == np.array([0, 127, 127])) or np.min(seg_map[tuple(coord)] == np.array([255, 0, 127]))):
return True
if keypoint_idx in [8, 11] and (np.min(seg_map[tuple(coord)] == np.array([0, 0, 255])) or np.min(seg_map[tuple(coord)] == np.array([255, 0, 0])) or np.min(seg_map[tuple(coord)] == np.array([0, 255, 0]))):
return True
if keypoint_idx in [9, 12] and (np.min(seg_map[tuple(coord)] == np.array([0, 255, 0])) or np.min(seg_map[tuple(coord)] == np.array([127, 255, 127])) or np.min(seg_map[tuple(coord)] == np.array([255, 0, 0])) or np.min(seg_map[tuple(coord)] == np.array([255, 127, 0]))):
return True
if keypoint_idx in [10, 13] and (np.min(seg_map[tuple(coord)] == np.array([127, 255, 127])) or np.min(seg_map[tuple(coord)] == np.array([127, 255, 255])) or np.min(seg_map[tuple(coord)] == np.array([255, 127, 0])) or np.min(seg_map[tuple(coord)] == np.array([127, 127, 255]))):
return True
return False
# coordinate should be transposed
def flood_fill(target_map, check_map, coord, coord_prev, keypoint_idx, target_map_instance_id, seg_map, depth_map, depth_diff_thres):
if coord[0] < 0 or coord[1] < 0 or coord[0] >= check_map.shape[0] or coord[1] >= check_map.shape[1]:
return
if not check_map[tuple(coord)]:
#if target_map[tuple(coord)] == 0 or target_map[tuple(coord)] == target_map_instance_id:
if target_map[tuple(coord)] == 0:
if check_keypoint_idx(coord, keypoint_idx, seg_map) and check_depth(coord, coord_prev, depth_map, depth_diff_thres):
check_map[tuple(coord)] = 1
target_map[tuple(coord)] = target_map_instance_id
flood_fill(target_map, check_map, coord + np.array([0, 1]), coord, keypoint_idx, target_map_instance_id, seg_map, depth_map, depth_diff_thres)
flood_fill(target_map, check_map, coord + np.array([1, 0]), coord, keypoint_idx, target_map_instance_id, seg_map, depth_map, depth_diff_thres)
flood_fill(target_map, check_map, coord + np.array([0, -1]), coord, keypoint_idx, target_map_instance_id, seg_map, depth_map, depth_diff_thres)
flood_fill(target_map, check_map, coord + np.array([-1, 0]), coord, keypoint_idx, target_map_instance_id, seg_map, depth_map, depth_diff_thres)
check_map[tuple(coord)] = 2
def skeleton_depth(depth):
def skeleton_depth_wrapper(keypoints):
final_depth = 0
keypoints_cnt = 0
for coord in keypoints:
coord = tuple(map(int, coord))
coord = (coord[1], coord[0])
if coord != (-1, -1):
if depth[coord] >0:
final_depth += depth[coord]
keypoints_cnt += 1
return float(final_depth) / keypoints_cnt
return skeleton_depth_wrapper
def human_instance_seg(seg_map, depth_map, keypoints, depth_diff_thres=50):
tar = np.zeros((seg_map.shape[0], seg_map.shape[1])).astype(np.uint8)
check = np.zeros((seg_map.shape[0], seg_map.shape[1])).astype(np.uint8)
keypoints = sorted(keypoints, key=skeleton_depth(depth_map))
for idx, i in enumerate(keypoints):
for j, coord in enumerate(i):
coord = list(map(int, coord))
#flood_fill(tar, check, np.array([coord[1], coord[0]]), np.array([coord[1], coord[0]]), j, idx + 1, seg_map, depth_map, depth_diff_thres)
flood_fill(tar, np.zeros((seg_map.shape[0], seg_map.shape[1])).astype(np.uint8), np.array([coord[1], coord[0]]), np.array([coord[1], coord[0]]), j, idx + 1, seg_map, depth_map, depth_diff_thres)
return tar | 63.464286 | 281 | 0.645095 | 878 | 5,331 | 3.72779 | 0.109339 | 0.076994 | 0.139016 | 0.087382 | 0.709441 | 0.695387 | 0.673083 | 0.62542 | 0.57898 | 0.554843 | 0 | 0.059367 | 0.187957 | 5,331 | 84 | 282 | 63.464286 | 0.696697 | 0.053836 | 0 | 0.19403 | 0 | 0 | 0.000794 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.104478 | false | 0 | 0.029851 | 0 | 0.402985 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b219c4726e1118fc8565e1222884ac531480a689 | 2,153 | py | Python | tests/unit/test_clock_fps.py | whitestone8214/pyglet-1.4.10-mod | d16e0471dd74309223f8c208b21a977a86fa231a | [
"BSD-3-Clause"
] | null | null | null | tests/unit/test_clock_fps.py | whitestone8214/pyglet-1.4.10-mod | d16e0471dd74309223f8c208b21a977a86fa231a | [
"BSD-3-Clause"
] | null | null | null | tests/unit/test_clock_fps.py | whitestone8214/pyglet-1.4.10-mod | d16e0471dd74309223f8c208b21a977a86fa231a | [
"BSD-3-Clause"
] | null | null | null | """Tests clock timing between frames and estimations
of frames per second.
"""
from __future__ import absolute_import
import time
import pytest
from pyglet import clock
from ..annotations import skip_if_continuous_integration
def sleep(seconds):
"""Busy sleep on the CPU which is very precise"""
pyclock = clock.get_default()
start = pyclock.time()
while pyclock.time() - start < seconds:
pass
# since clock is global, we initialize a new clock on every test
clock.set_default(clock.Clock())
def test_first_tick_is_delta_zero():
"""
Tests that the first tick is dt = 0.
"""
dt = clock.tick()
assert dt == 0
def test_start_at_zero_fps():
"""
Tests that the default clock starts
with zero fps.
"""
assert clock.get_fps() == 0
@skip_if_continuous_integration()
def test_elapsed_time_between_tick():
"""
Test that the tick function returns the correct elapsed
time between frames, in seconds.
Because we are measuring time differences, we
expect a small error (1%) from the expected value.
"""
sleep_time = 0.2
# initialize internal counter
clock.tick()
# test between initialization and first tick
sleep(sleep_time)
delta_time_1 = clock.tick()
# test between non-initialization tick and next tick
sleep(sleep_time)
delta_time_2 = clock.tick()
assert delta_time_1 == pytest.approx(sleep_time, rel=0.01*sleep_time)
assert delta_time_2 == pytest.approx(sleep_time, rel=0.01*sleep_time)
@skip_if_continuous_integration()
def test_compute_fps():
"""
Test that the clock computes a reasonable value of
frames per second when simulated for 120 ticks at 60 frames per second.
Because sleep is not very precise and fps are unbounded, we
expect a moderate error (10%) from the expected value.
"""
ticks = 120 # for averaging
expected_fps = 60
seconds_per_tick = 1./expected_fps
for i in range(ticks):
time.sleep(seconds_per_tick)
clock.tick()
computed_fps = clock.get_fps()
assert computed_fps == pytest.approx(expected_fps, rel=0.1*expected_fps)
| 25.034884 | 76 | 0.699489 | 311 | 2,153 | 4.655949 | 0.324759 | 0.043508 | 0.031077 | 0.055939 | 0.154696 | 0.133978 | 0.049724 | 0.049724 | 0.049724 | 0 | 0 | 0.01843 | 0.218765 | 2,153 | 85 | 77 | 25.329412 | 0.842449 | 0.385044 | 0 | 0.166667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.138889 | 1 | 0.138889 | false | 0.027778 | 0.138889 | 0 | 0.277778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b21ace313b7fab41162ff67a198a8fe08a669c29 | 8,866 | py | Python | config/wngrad/wngrad.py | lx10077/optimpy | 8d3a4faa1e7291297497446fc77df5409acd73b9 | [
"MIT"
] | null | null | null | config/wngrad/wngrad.py | lx10077/optimpy | 8d3a4faa1e7291297497446fc77df5409acd73b9 | [
"MIT"
] | null | null | null | config/wngrad/wngrad.py | lx10077/optimpy | 8d3a4faa1e7291297497446fc77df5409acd73b9 | [
"MIT"
] | null | null | null | import torch.nn as nn
import torch.backends.cudnn as cudnn
import os
import argparse
import csv
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))
from utils.common import prepare_dataset
from config.wngrad.helper import scale_criterion ,make_train_path, mkdir
from config.wngrad.model import *
from config.wngrad.optimizer import *
from tensorboardX import SummaryWriter
parser = argparse.ArgumentParser(description='WNGrad PyTorch tests',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--device', help='selected CUDA device', default=0, type=int)
parser.add_argument('--seed', help='random seed', default=1, type=int)
parser.add_argument('--model', help='model (logreg, mlp, vgg)', default='vgg', type=str)
parser.add_argument('--method', help='method (sgd, sgdn, adam)', default='sgdn', type=str)
parser.add_argument('--mu', help='momentum', default=0.9, type=float)
parser.add_argument('--weightDecay', help='regularization', default=1e-4, type=float)
parser.add_argument('--batchSize', help='minibatch size', default=128, type=int)
parser.add_argument('--alpha_0', help='initial learning rate', default=1, type=float)
parser.add_argument('--lambda_0', help='ratio to scale loss function', default=0.056, type=float)
parser.add_argument('--epochs', help='stop after this many epochs (0: disregard)', default=100, type=int)
parser.add_argument('--iterations', help='stop after this many iterations (0: disregard)', default=0, type=int)
parser.add_argument('--lossThreshold', help='stop after reaching this loss (0: disregard)', default=0, type=float)
parser.add_argument('--workers', help='number of data loading workers', default=4, type=int)
parser.add_argument('--parallel', help='parallelize', action='store_true')
parser.add_argument('--resume', '-r', action='store_true', help='resume from save_checkpoint')
parser.add_argument('--sess', default='wngrad', type=str, help='session id')
args = parser.parse_args()
args.task = 'mnist' if args.model == 'logreg' or args.model == 'mlp' else 'cifar10'
use_cuda = torch.cuda.is_available()
best_acc = 0 # best test accuracy
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
torch.manual_seed(args.seed)
if use_cuda:
torch.cuda.set_device(args.device)
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.enabled = True
# Data
trainloader, testloader, class_num = prepare_dataset(data_name=args.task,
batch_size=args.batchSize,
num_workers=args.workers)
train_path = make_train_path()
exp_name = '{}_{}_{}_{}_{}'.format(args.task, str(args.seed), args.model, args.method, args.alpha_0)
exp_folder = mkdir(os.path.join(train_path, exp_name))
checkpoint_folder = mkdir(os.path.join(exp_folder, 'checkpoint'))
save_name = exp_name
# Model
if args.resume:
# Load save_checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isdir(checkpoint_folder), 'Error: no checkpoint directory found!'
try:
checkpoint = torch.load(os.path.join(checkpoint_folder, save_name))
net = checkpoint['net']
best_acc = checkpoint['acc']
start_epoch = checkpoint['epoch'] + 1
torch.set_rng_state(checkpoint['rng_state'])
except Exception as e:
raise Exception(e)
else:
print('==> Building model..')
if args.model == 'logreg':
net = LogReg(28 * 28, 10)
elif args.model == 'mlp':
net = MLP(28 * 28, 1000, 10)
elif args.model == 'vgg':
net = vgg16_bn()
if args.parallel:
net.features = torch.nn.DataParallel(net.features)
else:
raise Exception('Unknown model: {}'.format(args.model))
if use_cuda:
net.cuda()
print('==> Using CUDA..')
print('==> Using {} th GPU in all {}..'.format(args.device, torch.cuda.device_count()))
else:
print("==> Don't use CUDA..")
criterion = scale_criterion(nn.CrossEntropyLoss(), args.lambda_0)
if args.method == 'sgd':
optimizer = SGDWN(net.parameters(), lr=args.alpha_0, weight_decay=args.weightDecay)
elif args.method == 'sgdn':
optimizer = SGDWN(net.parameters(), lr=args.alpha_0, weight_decay=args.weightDecay,
momentum=args.mu, nesterov=True)
else:
raise Exception('Unknown method: {}'.format(args.method))
print('==> Task: {}, Model: {}, Method: {}, Lambda: {}'.format(
args.task, args.model, args.method, args.lambda_0)
)
# Training
def train(epoch, threshold_iter=0, threshold_loss=-1):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = train_acc = train_total = 0
train_alpha = iteration = 0
for batch_idx, (inputs, targets) in enumerate(trainloader):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
# generate mixed inputs, two one-hot label vectors and mixing coefficient
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
_, predicted = torch.max(outputs, 1)
correct = predicted.eq(targets).sum().item()
alpha = optimizer.param_groups[0]['lr']
target_size = targets.size(0)
train_loss += loss.item()
train_acc += correct
train_alpha += alpha
train_total += target_size
iteration += 1
print('[Train] %d th [%d/%d] sLoss: %.3f | sAcc: %.4f%% | sAlpha: %.5f' % (
epoch, batch_idx, len(trainloader), train_loss/(batch_idx+1), 100.*train_acc/train_total,
train_alpha/(batch_idx+1)))
train_writer.add_scalar('train_loss', loss.item(), epoch * len(trainloader) + batch_idx)
train_writer.add_scalar('train_acc', correct/target_size, epoch * len(trainloader) + batch_idx)
train_writer.add_scalar('train_alpha', alpha/target_size, epoch * len(trainloader) + batch_idx)
if threshold_iter != 0 and iteration > threshold_iter:
print('==> Early stopping: iteration > {}'.format(args.iterations))
break
if threshold_loss >= 0 and loss <= threshold_loss:
print('==> Early stopping: loss <= {}'.format(args.lossThreshold))
break
return train_loss/len(trainloader), 100.*train_acc/train_total, train_alpha/train_total
# Testing
def test(epoch):
global best_acc
net.eval()
test_loss = test_acc = test_total = 0
for batch_idx, (inputs, targets) in enumerate(testloader):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
with torch.no_grad():
outputs = net(inputs)
loss = criterion(outputs, targets)
_, predicted = torch.max(outputs, 1)
correct = predicted.eq(targets).sum().item()
test_loss += loss.item()
test_acc += correct
test_total += targets.size(0)
print('[Val] [%d/%d] sLoss: %.3f | sAcc: %.4f%%' % (
batch_idx, len(testloader), test_loss / (batch_idx + 1), 100. * test_acc / test_total))
val_writer.add_scalar('val_loss', test_loss, epoch * len(testloader) + batch_idx)
val_writer.add_scalar('val_acc', correct/targets.size(0), epoch * len(testloader) + batch_idx)
# Save checkpoint.
acc = 100.*test_acc/test_total
if acc > best_acc:
best_acc = acc
save_checkpoint(acc, epoch)
return test_loss/len(testloader), 100.*test_acc/test_total
def save_checkpoint(acc, epoch):
# Save checkpoint.
print('==> Saving..')
state = {
'net': net,
'acc': acc,
'epoch': epoch,
'rng_state': torch.get_rng_state()
}
save_path = os.path.join(checkpoint_folder, save_name)
if not os.path.exists(checkpoint_folder):
os.makedirs(checkpoint_folder)
torch.save(state, save_path)
result_folder = os.path.join(exp_folder, 'results_')
train_event_folder = mkdir(os.path.join(exp_folder, 'train.event'))
val_event_folder = mkdir(os.path.join(exp_folder, 'val.event'))
logname = result_folder + exp_name + '.csv'
train_writer = SummaryWriter(train_event_folder)
val_writer = SummaryWriter(val_event_folder)
if not os.path.exists(logname):
with open(logname, 'w') as logfile:
logwriter = csv.writer(logfile, delimiter=',')
logwriter.writerow(['epoch', 'train loss', 'train acc', 'train alpha', 'test loss', 'test acc'])
for epoch in range(start_epoch, args.epochs):
train_loss, train_acc, train_alpha = train(epoch, args.iterations, args.lossThreshold)
test_loss, test_acc = test(epoch)
with open(logname, 'a') as logfile:
logwriter = csv.writer(logfile, delimiter=',')
logwriter.writerow([epoch, train_loss, train_acc, train_alpha, test_loss, test_acc])
| 40.117647 | 114 | 0.661516 | 1,156 | 8,866 | 4.903114 | 0.219723 | 0.025406 | 0.047989 | 0.016937 | 0.34192 | 0.213832 | 0.203776 | 0.155963 | 0.114326 | 0.099153 | 0 | 0.012786 | 0.19727 | 8,866 | 220 | 115 | 40.3 | 0.783617 | 0.024588 | 0 | 0.125714 | 0 | 0.005714 | 0.144495 | 0 | 0 | 0 | 0 | 0 | 0.005714 | 1 | 0.017143 | false | 0 | 0.062857 | 0 | 0.091429 | 0.068571 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b21b1a6160a0946f7e58b472e078c7edf2abfb21 | 948 | py | Python | tests/datasource/test_marketdata.py | m3at/backlight | a80ccbe6193e1fc4c169df2ebc2bf49876d4d39d | [
"MIT"
] | 8 | 2018-11-06T16:48:45.000Z | 2021-02-14T18:02:27.000Z | tests/datasource/test_marketdata.py | m3at/backlight | a80ccbe6193e1fc4c169df2ebc2bf49876d4d39d | [
"MIT"
] | 36 | 2018-11-02T23:21:59.000Z | 2021-02-08T10:27:29.000Z | tests/datasource/test_marketdata.py | m3at/backlight | a80ccbe6193e1fc4c169df2ebc2bf49876d4d39d | [
"MIT"
] | 5 | 2018-11-07T06:05:24.000Z | 2021-11-20T08:57:39.000Z | from backlight.datasource import marketdata as module
import pandas as pd
def test_MarketData():
start_dt = pd.Timestamp("2018-06-06")
end_dt = pd.Timestamp("2018-06-08")
df = pd.DataFrame(
index=pd.date_range(start=start_dt, periods=3),
data=[[0, 2], [2, 4], [4, 6]],
columns=["ask", "bid"],
)
md = module.MarketData(df)
assert md.start_dt == start_dt
assert md.end_dt == end_dt
def test_AskBidMarketData():
df = pd.DataFrame(
index=pd.date_range(start="2018-06-06", periods=3),
data=[[0, 2], [2, 4], [4, 6]],
columns=["ask", "bid"],
)
md = module.AskBidMarketData(df)
assert all(md.mid.values == [1, 3, 5])
def test_MidMarketData():
df = pd.DataFrame(
index=pd.date_range(start="2018-06-06", periods=3),
data=[0, 2, 6],
columns=["mid"],
)
md = module.MidMarketData(df)
assert all(md.mid.values == [0, 2, 6])
| 26.333333 | 59 | 0.583333 | 138 | 948 | 3.913043 | 0.304348 | 0.051852 | 0.044444 | 0.1 | 0.540741 | 0.47037 | 0.388889 | 0.388889 | 0.325926 | 0.325926 | 0 | 0.07767 | 0.239451 | 948 | 35 | 60 | 27.085714 | 0.67129 | 0 | 0 | 0.310345 | 0 | 0 | 0.058017 | 0 | 0 | 0 | 0 | 0 | 0.137931 | 1 | 0.103448 | false | 0 | 0.068966 | 0 | 0.172414 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b21cec4d9d4bbc5245035ff346f8bffc202a267f | 9,665 | py | Python | a2ml/api/azure/model.py | arita37/a2ml | 3e92bede2c2ef6e63be74560cc6b904d3ec9d931 | [
"Apache-2.0"
] | 2 | 2020-04-09T16:59:22.000Z | 2020-04-09T17:01:10.000Z | a2ml/api/azure/model.py | arita37/a2ml | 3e92bede2c2ef6e63be74560cc6b904d3ec9d931 | [
"Apache-2.0"
] | null | null | null | a2ml/api/azure/model.py | arita37/a2ml | 3e92bede2c2ef6e63be74560cc6b904d3ec9d931 | [
"Apache-2.0"
] | null | null | null | import os
import json
from azureml.core import Experiment
from azureml.core.model import Model
from azureml.core.model import InferenceConfig
from azureml.core.webservice import Webservice
from azureml.core.webservice import AciWebservice
from azureml.exceptions import WebserviceException
from azureml.train.automl.run import AutoMLRun
from .project import AzureProject
from .exceptions import AzureException
from .decorators import error_handler
from auger.api.cloud.utils.dataframe import DataFrame
class AzureModel(object):
def __init__(self, ctx):
super(AzureModel, self).__init__()
self.ctx = ctx
@error_handler
def deploy(self, model_id, locally):
if locally:
self.ctx.log('Local deployment step is not required for Azure..')
return {'model_id': model_id}
ws = AzureProject(self.ctx)._get_ws()
experiment_name = self.ctx.config.get('experiment/name', None)
if experiment_name is None:
raise AzureException('Please specify Experiment name...')
iteration, run_id = self._get_iteration(model_id)
experiment = Experiment(ws, experiment_name)
experiment_run = AutoMLRun(experiment = experiment, run_id = run_id)
model_run = AutoMLRun(experiment = experiment, run_id = model_id)
model_name = model_run.properties['model_name']
self.ctx.log('Regestiring model: %s' % model_name)
description = '%s-%s' % (model_name, iteration)
model = experiment_run.register_model(
model_name = model_name, iteration=iteration,
description = description, tags = None)
script_file_name = '.azureml/score_script.py'
model_run.download_file(
'outputs/scoring_file_v_1_0_0.py', script_file_name)
# Deploying ACI Service
aci_service_name = self._aci_service_name(model_name)
self.ctx.log('Deploying AciWebservice %s ...' % aci_service_name)
inference_config = InferenceConfig(
environment = model_run.get_environment(),
entry_script = script_file_name)
aciconfig = AciWebservice.deploy_configuration(
cpu_cores = 1,
memory_gb = 2,
tags = {'type': "inference-%s" % aci_service_name},
description = "inference-%s" % aci_service_name)
# Remove any existing service under the same name.
try:
Webservice(ws, aci_service_name).delete()
self.ctx.log('Remove any existing service under the same name...')
except WebserviceException:
pass
aci_service = Model.deploy(
ws, aci_service_name, [model], inference_config, aciconfig)
aci_service.wait_for_deployment(True)
self.ctx.log('%s state %s' % (aci_service_name, str(aci_service.state)))
return {'model_id': model_id, 'aci_service_name': aci_service_name}
@error_handler
def predict(self, filename, model_id, threshold, locally):
ws = AzureProject(self.ctx)._get_ws()
experiment_name = self.ctx.config.get('experiment/name', None)
if experiment_name is None:
raise AzureException('Please specify Experiment name...')
experiment = Experiment(ws, experiment_name)
target = self.ctx.config.get('target', None)
predict_data = DataFrame.load(filename, target)
y_pred = []
if locally:
y_pred, y_proba, proba_classes = self._predict_locally(
experiment, predict_data, model_id, threshold)
else:
y_pred, y_proba, proba_classes = self._predict_remotely(
ws, experiment, predict_data, model_id, threshold)
predict_data[target] = y_pred
if y_proba is not None:
for idx, name in enumerate(proba_classes):
predict_data['proba_'+str(name)] = list(y_proba[:,idx])
predicted = self._save_predictions(predict_data, filename)
return {'predicted': predicted}
@error_handler
def actual(self, filename, model_id):
pass
def _get_iteration(self, model_id):
iteration = None
run_id = model_id
parts = model_id.split('_')
if len(parts) > 2:
run_id = parts[0]+"_"+parts[1]
iteration = parts[2]
return iteration, run_id
def _aci_service_name(self, model_name):
# It must only consist of lowercase letters, numbers, or dashes, start
# with a letter, end with a letter or number, and be between 3 and 32
# characters long.
#TODO - service_name + suffix must satisfy requiremets
return (model_name+'-service').lower()
def _predict_remotely(
self, ws, experiment, predict_data, model_id, threshold):
input_payload = predict_data.to_json(orient='split', index = False)
remote_run = AutoMLRun(experiment = experiment, run_id = model_id)
model_name = remote_run.properties['model_name']
aci_service_name = self._aci_service_name(model_name)
aci_service = AciWebservice(ws, aci_service_name)
input_payload = json.loads(input_payload)
# If you have a classification model, you can get probabilities by changing this to 'predict_proba'.
method = 'predict'
if threshold is not None:
method = 'predict_proba'
input_payload = {
'method': method,
'data': input_payload['data']
}
input_payload = json.dumps(input_payload)
try:
response = aci_service.run(input_data = input_payload)
print(response)
except Exception as e:
print('err log', aci_service.get_logs())
raise e
results_proba = None
proba_classes = None
return json.loads(response)['result'], results_proba, proba_classes
def _predict_locally(self, experiment, predict_data, model_id, threshold):
run_id = model_id
iteration = None
parts = model_id.split('_')
if len(parts) > 2:
run_id = parts[0]+"_"+parts[1]
iteration = parts[2]
remote_run = AutoMLRun(experiment = experiment, run_id = run_id)
best_run, fitted_model = remote_run.get_output(iteration=iteration)
results_proba = None
proba_classes = None
if threshold is not None:
results_proba = fitted_model.predict_proba(predict_data)
proba_classes = list(fitted_model.classes_)
result = self._calculate_proba_target(results_proba,
proba_classes, None, threshold, None)
else:
result = fitted_model.predict(predict_data)
return result, results_proba, proba_classes
def _calculate_proba_target(self, results_proba, proba_classes, proba_classes_orig, threshold, minority_target_class=None):
import json
results = []
if type(threshold) == str:
try:
threshold = float(threshold)
except:
try:
threshold = json.loads(threshold)
except Exception as e:
raise Exception("Threshold '%s' should be float or hash with target classes. Error: %s"%(threshold, str(e)))
if type(threshold) != dict and minority_target_class is not None:
threshold = {minority_target_class:threshold}
print("Prediction threshold: %s, %s"%(threshold, proba_classes_orig))
#print(results_proba)
if type(threshold) == dict:
mapped_threshold = {}
if not proba_classes_orig:
proba_classes_orig = proba_classes
for name, value in threshold.items():
idx_class = None
for idx, item in enumerate(proba_classes_orig):
if item == name:
idx_class = idx
break
if idx_class is None:
raise Exception("Unknown target class in threshold: %s, %s"%(name, proba_classes_orig))
mapped_threshold[idx_class] = value
for item in results_proba:
proba_idx = None
for idx, value in mapped_threshold.items():
if item[idx] >= value:
proba_idx = idx
break
if proba_idx is None:
proba_idx = 0
for idx, value in enumerate(item):
if idx not in mapped_threshold:
proba_idx = idx
break
results.append(proba_classes[proba_idx])
else:
#TODO: support multiclass classification
for item in results_proba:
max_proba_idx = 0
for idx, prob in enumerate(item):
if prob > item[max_proba_idx]:
max_proba_idx = idx
if item[max_proba_idx] < threshold:
if max_proba_idx > 0:
max_proba_idx = 0
else:
max_proba_idx = 1
results.append(proba_classes[max_proba_idx])
return results
def _save_predictions(self, df_predictions, filename):
predicted_path = os.path.abspath(
os.path.splitext(filename)[0] + "_predicted.csv")
df_predictions.to_csv(predicted_path, index=False, encoding='utf-8')
self.ctx.log('Predictions are saved to %s' % predicted_path)
return predicted_path
| 37.607004 | 128 | 0.610864 | 1,106 | 9,665 | 5.083183 | 0.192586 | 0.037353 | 0.034863 | 0.022768 | 0.280505 | 0.198862 | 0.162576 | 0.146567 | 0.105301 | 0.090715 | 0 | 0.003434 | 0.307087 | 9,665 | 256 | 129 | 37.753906 | 0.836046 | 0.045835 | 0 | 0.294416 | 0 | 0 | 0.072297 | 0.00597 | 0 | 0 | 0 | 0.003906 | 0 | 1 | 0.050761 | false | 0.010152 | 0.071066 | 0.005076 | 0.172589 | 0.015228 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b222fa1e690762693a20f621cd41ae68965f2844 | 2,269 | py | Python | roubus/tb3/monitorNoc.py | vhnatyk/vlsistuff | 0981097bd19a0c482728dcc5048a3615ac9a9a90 | [
"MIT"
] | 26 | 2018-03-17T18:14:22.000Z | 2022-03-14T07:23:13.000Z | roubus/tb3/monitorNoc.py | psumesh/vlsistuff | 1fe64b093d0581d99c7d826b74c31b8655fa0b31 | [
"MIT"
] | 1 | 2019-10-16T10:31:11.000Z | 2019-10-17T04:14:53.000Z | roubus/tb3/monitorNoc.py | psumesh/vlsistuff | 1fe64b093d0581d99c7d826b74c31b8655fa0b31 | [
"MIT"
] | 7 | 2018-07-16T07:51:25.000Z | 2022-02-15T14:22:54.000Z |
import logs
import string
TWID = 5
AWID = 32
BWID = 4
DWID = 128
#mngmngt kinds
MSG_RESET = 0
MSG_ENUMERATE = 2
MSG_CONTROL = 4
MSG_REPORT = 6
TagMask = (1<<TWID)-1
BytesMask = (1<<BWID)-1
AddressMask = (1<<AWID)-1
class monitorNocClass:
def __init__(self,Path,Str,Monitors):
self.Path = Path
self.List=[]
self.Nick={}
self.more(Str)
Monitors.append(self)
def more(self,Str):
LL = string.split(Str)
for Item in LL:
if '@' in Item:
ww = string.split(Item,'@')
self.List.append(ww[0])
self.Nick[ww[0]]=ww[1]
else:
self.List.append(Item)
self.Nick[Item]=Item
def peek(self,Sig):
return logs.peek('%s.%s'%(self.Path,Sig))
def run(self):
for Sig in self.List:
Val = self.peek(Sig)
Nick = self.Nick[Sig]
Kind = Val & 3
if Val<0:
logs.log_error('%s is X (%s) '%(Sig,Nick))
elif Kind in [1,2,3]:
Str = self.parse(Val)
logs.log_info('>>>> %s %s %s'%(Sig,Nick,Str))
if (Sig=='net7'):
logs.log_info('>>>> %s 0x%x'%(Sig,Val))
def parse(self,Msg):
Kind = Msg & 3
Msg = Msg >> 2
Tags = Msg & TagMask
Msg = Msg >> TWID
Bytes = Msg & BytesMask
Msg = Msg >> BWID
Addr = Msg & AddressMask
Msg = Msg >> AWID
Data = Msg
if Kind==1: Kind='read'
elif Kind==2: Kind='write'
elif Kind==3: Kind='mngmnt'
else: Kind='idle'
if Kind=='mngmnt':
if Tags==MSG_RESET: Tags='reset'
elif Tags==MSG_ENUMERATE: Tags='enumerate'
elif Tags==MSG_CONTROL: Tags='control'
elif Tags==MSG_REPORT: Tags='report'
if Kind=='read':
Return = Data & AddressMask
DD = Data >> AWID
Rbytes = DD & 0xffff
Str = '%s %s bytes=%d addr=0x%x return=0x%x bytes=%d'%(Kind,Tags,Bytes,Addr,Return,Rbytes)
return Str
Str = '%s %s bytes=%d addr=0x%x data=0x%x'%(Kind,Tags,Bytes,Addr,Data)
return Str
| 24.397849 | 102 | 0.484354 | 297 | 2,269 | 3.649832 | 0.249158 | 0.009225 | 0.030443 | 0.02214 | 0.03321 | 0.03321 | 0.03321 | 0.03321 | 0 | 0 | 0 | 0.026148 | 0.376377 | 2,269 | 92 | 103 | 24.663043 | 0.739929 | 0.005729 | 0 | 0.028169 | 0 | 0 | 0.081741 | 0 | 0 | 0 | 0.002665 | 0 | 0 | 1 | 0.070423 | false | 0 | 0.028169 | 0.014085 | 0.15493 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2236b4518673c8bd54127f6f9d6172d741fee76 | 1,098 | py | Python | minato_namikaze/lib/database/badges.py | EitoZX/yondaime-hokage | c86285b385a60e3e47b9a7205ae36e7249b47eee | [
"Apache-2.0"
] | null | null | null | minato_namikaze/lib/database/badges.py | EitoZX/yondaime-hokage | c86285b385a60e3e47b9a7205ae36e7249b47eee | [
"Apache-2.0"
] | null | null | null | minato_namikaze/lib/database/badges.py | EitoZX/yondaime-hokage | c86285b385a60e3e47b9a7205ae36e7249b47eee | [
"Apache-2.0"
] | null | null | null | from discord.ext.commands import Context
from ..util import ChannelAndMessageId
class Badges:
'''A database handler for the Badges class'''
def __init__(self, ctx: Context):
self.ctx = ctx
self.channel = ctx.get_config_channel_by_name_or_id(
ChannelAndMessageId.badges_channel.value)
async def get_all_badges(self) -> list:
"""|coro|
Returns all the badges
:return: List of all badges data
:rtype: list
"""
return [
dict(
badge_name=i.content,
code=self.get_badge_code(i.content),
file_name=i.attachments[0],
is_inverted=False,
) async for i in self.channel.history(limit=None)
]
@staticmethod
def get_badge_code(badge_name: str) -> str:
"""Returns the badge code from its name
:param badge_name: The name of the badge image
:type badge_name: str
:return: Badge Code
:rtype: str
"""
return "".join(list(badge_name.split(" ")))
| 28.153846 | 61 | 0.576503 | 131 | 1,098 | 4.648855 | 0.450382 | 0.073892 | 0.039409 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001364 | 0.332423 | 1,098 | 38 | 62 | 28.894737 | 0.829468 | 0.162113 | 0 | 0 | 0 | 0 | 0.001328 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.105263 | 0 | 0.368421 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b224bc002e3376f1163d98eaf921d2f1bcc29f03 | 9,288 | py | Python | vim/bundle/vim-pandoc/pythonx/vim_pandoc/bib/citeproc.py | jecxjo/dotfiles | a34884607fd145c9c89712e046095f772f57c206 | [
"CC0-1.0"
] | null | null | null | vim/bundle/vim-pandoc/pythonx/vim_pandoc/bib/citeproc.py | jecxjo/dotfiles | a34884607fd145c9c89712e046095f772f57c206 | [
"CC0-1.0"
] | null | null | null | vim/bundle/vim-pandoc/pythonx/vim_pandoc/bib/citeproc.py | jecxjo/dotfiles | a34884607fd145c9c89712e046095f772f57c206 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python2
# vim: set fdm=marker:
# imports {{{1
from subprocess import check_output
import json
import re
try:
from vim_pandoc.bib.collator import SourceCollator
from vim_pandoc.bib.util import flatten
except:
from collator import SourceCollator
from util import flatten
# _bib_extensions {{{1
# Filetypes that citeproc.py will attempt to parse.
_bib_extensions = ["bib",\
"bibtex",\
"ris",\
"mods",\
"json",\
"enl",\
"wos",\
"medline",\
"copac",\
"xml"]
# _significant_tags {{{1
# Tags that citeproc.py will search in, together with scaling
# factors for relative importance. These are currently non-functional.
_significant_tags = {"id": 0.5,
"author": 1.0,
"issued": 1.0,
"title": 1.0,
"publisher": 1.0,
"abstract": 0.1}
# _variable_type {{{1
# Map of tags -> types.
_variable_type = {
"abstract": "plain",
"annote": "plain",
"archive": "plain",
"archive_location": "plain",
"archive-place": "plain",
"authority": "plain",
"call-number": "plain",
"citation-label": "plain",
"citation-number": "plain",
"collection-title": "plain",
"container-title": "plain",
"container-title-short": "plain",
"dimensions": "plain",
"doi": "plain",
"event": "plain",
"event-place": "plain",
"first-reference-note-number": "plain",
"genre": "plain",
"isbn": "plain",
"issn": "plain",
"jurisdiction": "plain",
"keyword": "plain",
"locator": "plain",
"medium": "plain",
"note": "plain",
"original-publisher": "plain",
"original-publisher-place": "plain",
"original-title": "plain",
"page": "plain",
"page-first": "plain",
"pmcid": "plain",
"pmid": "plain",
"publisher": "plain",
"publisher-place": "plain",
"references": "plain",
"reviewed-title": "plain",
"scale": "plain",
"section": "plain",
"source": "plain",
"status": "plain",
"title": "plain",
"title-short": "plain",
"url": "plain",
"version": "plain",
"year-suffix": "plain",
"chapter-number": "number",
"collection-number": "number",
"edition": "number",
"issue": "number",
"number": "number",
"number-of-pages": "number",
"number-of-volumes": "number",
"volume": "number",
"accessed": "date",
"container": "date",
"event-date": "date",
"issued": "date",
"original-date": "date",
"submitted": "date",
"author": "name",
"collection-editor": "name",
"composer": "name",
"container-author": "name",
"director": "name",
"editor": "name",
"editorial-director": "name",
"illustrator": "name",
"interviewer": "name",
"original-author": "name",
"recipient": "name",
"reviewed-author": "name",
"translator": "name"
}
class CSLItem: #{{{1
# This class implements various helper methods for CSL-JSON formatted bibliography
# entries.
def __init__(self, entry): #{{{2
self.data = entry
def as_array(self, variable_name): #{{{2
def plain(variable_contents): #{{{3
# Takes the contents of a 'plain' variable and splits it into an array.
return unicode(variable_contents).split('\n')
def number(variable_contents): #{{{3
return [unicode(variable_contents)]
def name(variable_contents): #{{{3
# Parses "name" CSL Variables and returns an array of names.
def surname(author):
# Concat dropping particle and non-dropping particle with family name.
return [" ".join((author.get("dropping-particle", ""),
author.get("non-dropping-particle", ""),
author.get("family", ""))).strip()]
def given_names(author):
return [author.get("given", "").strip()]
def literal_name(author):
# It seems likely there is some particular reason for the author being
# a literal, so don't try and do clever stuff like splitting into tokens...
return [author.get("literal", "").strip()]
names = []
for author in variable_contents:
name = ""
if "literal" in author:
name = literal_name(author)
else:
name = surname(author) + given_names(author)
names.append(name)
return names
def date(variable_contents): #{{{3
# Currently a placeholder. Will parse 'date' CSL variables and return an array of
# strings for matches.
def date_parse(raw_date_array):
# Presently, this function returns the date in yyyy-mm-dd format. In future, it
# will provide a variety of alternative forms.
date = [unicode(x) for x in raw_date_array]
return ["-".join(date)]
def date_parts(date_parts_contents):
# Call date_parts for each element.
response = []
for date in date_parts_contents:
response.extend(date_parse(date))
return response
def season(season_type):
# Not actually clear from the spec what is meant to go in here. Zotero doesn't
# 'do' seasons, and I can't work it out from the pandoc-citeproc source. Will
# try and make this work when I have useful internet
season_lookup = {1: "spring",
2: "summer",
3: "autumn",
4: "winter"}
return []
def circa(circa_boolean):
return []
def literal(date_string):
return [date_string]
date_function_lookup = {"date-parts": date_parts,
"season": season,
"circa": circa,
"literal": literal,
"raw": literal}
response = []
for element in variable_contents:
response.extend(date_function_lookup[element](variable_contents[element]))
return response
# }}}3
variable_contents = self.data.get(variable_name, False)
if variable_contents:
return eval(_variable_type.get(variable_name, "plain"))(variable_contents)
else:
return []
def match(self, query): #{{{2
# Matching engine. Returns 1 if match found, 0 otherwise.
# Expects query to be a compiled regexp.
# Very simple, just searches for substrings. Could be updated
# to provide a 'matches' value for ranking? Using numbers here
# so as to permit this future application.
matched = False
for variable in _significant_tags:
for token in self.as_array(variable):
matched = matched or query.search(flatten(token))
if matched:
break
if matched:
return 1
else:
return 0
def matches(self, query): #{{{2
# Provides a boolean match response to query.
# Expects query to be a compiled regexp.
if self.match(query) == 0:
return False
else:
return True
def relevance(self, query): #{{{2
# Returns the relevance of an item for a query
query = re.compile(query, re.I)
relevance = float(0.0)
tags_matched = []
for tag in _significant_tags:
for token in self.as_array(tag):
if query.search(flatten(token)):
tags_matched.append(tag)
break
if tags_matched != []:
relevance = sum([_significant_tags[t] for t in tags_matched])
return relevance
class CiteprocSource: #{{{1
def __init__(self, bib): #{{{2
try:
raw_bib = json.loads(check_output(["pandoc-citeproc", "-j", bib]))
except:
raw_bib = []
self.data = [CSLItem(entry) for entry in raw_bib]
def __iter__(self): #{{{2
for a in self.data:
yield a
class CiteprocCollator(SourceCollator): #{{{1
def collate(self): #{{{2
data = []
for bib in self.find_bibfiles():
for item in CiteprocSource(bib):
if item.matches(re.compile(self.query, re.I)) and item not in data:
data.append(item)
data.sort(key=lambda i: i.relevance(self.query), reverse=True)
return [item.data for item in data]
| 33.530686 | 95 | 0.514643 | 950 | 9,288 | 4.937895 | 0.312632 | 0.040929 | 0.014496 | 0.006822 | 0.029418 | 0.029418 | 0.029418 | 0.016201 | 0.016201 | 0 | 0 | 0.007454 | 0.364449 | 9,288 | 276 | 96 | 33.652174 | 0.787227 | 0.179048 | 0 | 0.092683 | 0 | 0 | 0.172409 | 0.012277 | 0 | 0 | 0 | 0 | 0 | 1 | 0.097561 | false | 0 | 0.034146 | 0.034146 | 0.243902 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2254eeaf2a6a6ceffea463827308958de9e9c53 | 606 | py | Python | tests/test_pagination.py | NeoLight1010/strawberry-graphql-django | 86d0dbb606a1dd0d96bb79a4cdd6902c6a515b2f | [
"MIT"
] | 1 | 2021-08-31T08:06:52.000Z | 2021-08-31T08:06:52.000Z | tests/test_pagination.py | NeoLight1010/strawberry-graphql-django | 86d0dbb606a1dd0d96bb79a4cdd6902c6a515b2f | [
"MIT"
] | null | null | null | tests/test_pagination.py | NeoLight1010/strawberry-graphql-django | 86d0dbb606a1dd0d96bb79a4cdd6902c6a515b2f | [
"MIT"
] | null | null | null | import pytest
import strawberry
import strawberry_django
from strawberry_django import auto
from typing import List
from tests import utils, models
@strawberry_django.type(models.Fruit, pagination=True)
class Fruit:
name: auto
@strawberry.type
class Query:
fruits: List[Fruit] = strawberry_django.field()
@pytest.fixture
def query():
return utils.generate_query(Query)
def test_pagination(query, fruits):
result = query('{ fruits(pagination: { offset: 1, limit:1 }) { name } }')
assert not result.errors
assert result.data['fruits'] == [
{ 'name': 'raspberry'},
]
| 21.642857 | 77 | 0.714521 | 75 | 606 | 5.693333 | 0.453333 | 0.149883 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004 | 0.174917 | 606 | 27 | 78 | 22.444444 | 0.85 | 0 | 0 | 0 | 0 | 0 | 0.122112 | 0 | 0 | 0 | 0 | 0 | 0.095238 | 1 | 0.095238 | false | 0 | 0.285714 | 0.047619 | 0.619048 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b226c1352a18aed2fe4879924d072b66da70fa22 | 2,226 | py | Python | pwnlib/flag/flag.py | cclauss/pwntools | 899baec7048559db65a2e9ad784be4cb60c181da | [
"MIT"
] | 7 | 2017-07-11T01:12:02.000Z | 2017-09-21T23:39:54.000Z | pwnlib/flag/flag.py | cclauss/pwntools | 899baec7048559db65a2e9ad784be4cb60c181da | [
"MIT"
] | null | null | null | pwnlib/flag/flag.py | cclauss/pwntools | 899baec7048559db65a2e9ad784be4cb60c181da | [
"MIT"
] | 2 | 2021-03-02T21:04:58.000Z | 2021-12-20T02:43:21.000Z | """Describes a way to submit a key to a key server.
"""
from __future__ import absolute_import
import os
from pwnlib.args import args
from pwnlib.log import getLogger
from pwnlib.tubes.remote import remote
env_server = args.get('FLAG_HOST', 'flag-submission-server').strip()
env_port = args.get('FLAG_PORT', '31337').strip()
env_proto = args.get('FLAG_PROTO', 'tcp').strip()
env_file = args.get('FLAG_FILE', '/does/not/exist').strip()
env_exploit_name = args.get('EXPLOIT_NAME', 'unnamed-exploit').strip()
env_target_host = args.get('TARGET_HOST', 'unknown-target').strip()
env_team_name = args.get('TEAM_NAME', 'unknown-team').strip()
log = getLogger(__name__)
def submit_flag(flag,
exploit=env_exploit_name,
target=env_target_host,
server=env_server,
port=env_port,
proto=env_proto,
team=env_team_name):
"""
Submits a flag to the game server
Arguments:
flag(str): The flag to submit.
exploit(str): Exploit identifier, optional
target(str): Target identifier, optional
server(str): Flag server host name, optional
port(int): Flag server port, optional
proto(str), Flag server protocol, optional
Optional arguments are inferred from the environment,
or omitted if none is set.
Returns:
A string indicating the status of the key submission,
or an error code.
Doctest:
>>> l = listen()
>>> _ = submit_flag('flag', server='localhost', port=l.lport)
>>> c = l.wait_for_connection()
>>> c.recvall().split()
['flag', 'unnamed-exploit', 'unknown-target', 'unknown-team']
"""
flag = flag.strip()
log.success("Flag: %r" % flag)
data = "\n".join([flag,
exploit,
target,
team,
''])
if os.path.exists(env_file):
write(env_file, data)
return
try:
with remote(server, int(port)) as r:
r.send(data)
return r.recvall(timeout=1)
except Exception:
log.warn("Could not submit flag %r to %s:%s" % (flag, server, port))
| 30.081081 | 76 | 0.596137 | 281 | 2,226 | 4.572954 | 0.341637 | 0.038132 | 0.034241 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003745 | 0.280323 | 2,226 | 73 | 77 | 30.493151 | 0.798377 | 0.34097 | 0 | 0 | 0 | 0 | 0.14421 | 0.016023 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027778 | false | 0 | 0.138889 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b22809997900ab95e29bac8dee2e2d430092d432 | 2,552 | py | Python | gluon/packages/dal/tests/caching.py | GeorgesBrantley/ResistanceGame | 65ec925ec8399af355e176c4814a749fde5f907d | [
"BSD-3-Clause"
] | 408 | 2015-01-01T10:31:47.000Z | 2022-03-26T17:41:21.000Z | gluon/packages/dal/tests/caching.py | GeorgesBrantley/ResistanceGame | 65ec925ec8399af355e176c4814a749fde5f907d | [
"BSD-3-Clause"
] | 521 | 2015-01-08T14:45:54.000Z | 2022-03-24T11:15:22.000Z | gluon/packages/dal/tests/caching.py | GeorgesBrantley/ResistanceGame | 65ec925ec8399af355e176c4814a749fde5f907d | [
"BSD-3-Clause"
] | 158 | 2015-01-25T20:02:00.000Z | 2022-03-01T06:29:12.000Z | import time
import pickle
from pydal import DAL, Field
from ._compat import unittest
from ._adapt import DEFAULT_URI, IS_IMAP, IS_MSSQL
from ._helpers import DALtest
class SimpleCache(object):
storage = {}
def clear(self):
self.storage.clear()
def _encode(self, value):
return value
def _decode(self, value):
return value
def __call__(self, key, f, time_expire=300):
dt = time_expire
now = time.time()
item = self.storage.get(key, None)
if item and f is None:
del self.storage[key]
if f is None:
return None
if item and (dt is None or item[0] > now - dt):
return self._decode(item[1])
value = f()
self.storage[key] = (now, self._encode(value))
return value
class PickleCache(SimpleCache):
def _encode(self, value):
return pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
def _decode(self, value):
return pickle.loads(value)
@unittest.skipIf(IS_IMAP, "TODO: IMAP test")
class TestCache(DALtest):
def testRun(self):
cache = SimpleCache()
db = self.connect()
db.define_table("tt", Field("aa"))
db.tt.insert(aa="1")
r0 = db().select(db.tt.ALL)
r1 = db().select(db.tt.ALL, cache=(cache, 1000))
self.assertEqual(len(r0), len(r1))
r2 = db().select(db.tt.ALL, cache=(cache, 1000))
self.assertEqual(len(r0), len(r2))
r3 = db().select(db.tt.ALL, cache=(cache, 1000), cacheable=True)
self.assertEqual(len(r0), len(r3))
r4 = db().select(db.tt.ALL, cache=(cache, 1000), cacheable=True)
self.assertEqual(len(r0), len(r4))
@unittest.skipIf(IS_MSSQL, "Class nesting in ODBC driver breaks pickle")
def testPickling(self):
db = self.connect()
cache = (PickleCache(), 1000)
db.define_table(
"tt",
Field("aa"),
Field("bb", type="integer"),
Field("cc", type="decimal(5,2)"),
)
db.tt.insert(aa="1", bb=2, cc=3)
r0 = db(db.tt).select(db.tt.ALL)
csv0 = str(r0)
r1 = db(db.tt).select(db.tt.ALL, cache=cache)
self.assertEqual(csv0, str(r1))
r2 = db(db.tt).select(db.tt.ALL, cache=cache)
self.assertEqual(csv0, str(r2))
r3 = db(db.tt).select(db.tt.ALL, cache=cache, cacheable=True)
self.assertEqual(csv0, str(r3))
r4 = db(db.tt).select(db.tt.ALL, cache=cache, cacheable=True)
self.assertEqual(csv0, str(r4))
| 30.023529 | 76 | 0.582288 | 352 | 2,552 | 4.15625 | 0.255682 | 0.04648 | 0.068353 | 0.088859 | 0.462748 | 0.360219 | 0.330144 | 0.317157 | 0.317157 | 0.317157 | 0 | 0.031703 | 0.270768 | 2,552 | 84 | 77 | 30.380952 | 0.754433 | 0 | 0 | 0.130435 | 0 | 0 | 0.035266 | 0 | 0 | 0 | 0 | 0 | 0.115942 | 1 | 0.115942 | false | 0 | 0.086957 | 0.057971 | 0.362319 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b22b5ffcd46036b86acd6eb916fe93660be83eaf | 3,370 | py | Python | app/views/drivers/driver_routes.py | tjdaley/publicdataws | 1aa4a98cf47fae10cc0f59a8d01168df806b4919 | [
"MIT"
] | null | null | null | app/views/drivers/driver_routes.py | tjdaley/publicdataws | 1aa4a98cf47fae10cc0f59a8d01168df806b4919 | [
"MIT"
] | null | null | null | app/views/drivers/driver_routes.py | tjdaley/publicdataws | 1aa4a98cf47fae10cc0f59a8d01168df806b4919 | [
"MIT"
] | null | null | null | """
driver_routes.py - Handle the routes for searching and displaying drivers.
This module provides the views for the following routes:
/driver/<string:db>/<string:ed>/<string:rec>/<string:state>/
/search/dl_address
/search/dl
Copyright (c) 2019 by Thomas J. Daley. All Rights Reserved.
"""
from flask import Blueprint, render_template, redirect, request, session, flash, url_for, jsonify
import random
from passlib.hash import sha256_crypt
from views.decorators import is_logged_in, is_case_set
from webservice import WebService
WEBSERVICE = WebService(None)
# Helper to create Public Data credentials from session variables
def pd_credentials(mysession) -> dict:
return {"username": session["pd_username"], "password": session["pd_password"]}
def search_drivers(search_type, search_terms, search_state):
(success, message, results) = WEBSERVICE.drivers_license(
pd_credentials(session),
search_terms=search_terms,
search_scope=search_type,
us_state=search_state)
if success:
if not results:
message = """
No drivers found that match ALL the search criteria. This can be for two reasons:
(1) There really aren't any drivers that match the combined search criteria; or
(2) The search criteria were too broad which resulted in the search results to be truncated thus
reducing the number of drivers that matched all criteria. If you used a criterion in the "entire record"
field that would return more than 1000 results, the second explanation probably applies.
"""
flash(message, "warning")
return redirect(url_for('driver_routes.search_dl'))
flash("Found {} matching drivers.".format(len(results)), "success")
# if 'case' in session:
# filter_results(results, session['case']['_id'], "PERSON")
results = sorted(results, key=lambda i: (i.case_status, i.driver_name))
return render_template('drivers.html', drivers=results)
form = request.form
return render_template("search_error.html", formvariables=form, operation="Search: DL", message=message)
driver_routes = Blueprint("driver_routes", __name__, template_folder="templates")
@driver_routes.route('/search/dl_address', methods=['GET'])
@is_logged_in
def search_dl_address():
search_type = "main"
search_terms = request.args.get('a')
search_state = request.args.get('s').lower()
return search_drivers(search_type, search_terms, search_state)
@driver_routes.route('/search/dl', methods=['GET', 'POST'])
@is_logged_in
def search_dl():
if request.method == 'GET':
return render_template('search_dl.html')
form = request.form
search_type = form["search_type"]
search_terms = form["search_terms"]
search_state = form["state"]
return search_drivers(search_type, search_terms, search_state)
@driver_routes.route('/driver/<string:db>/<string:ed>/<string:rec>/<string:state>/', methods=['GET'])
@is_logged_in
def driver_details(db, ed, rec, state):
(success, message, result) = WEBSERVICE.driver_details(pd_credentials(session), db, ed, rec, state)
if success:
return render_template('driver.html', driver=result)
return render_template("search_error.html", formvariables=[], operation="Search: DL Details", message=message)
| 37.444444 | 116 | 0.710682 | 441 | 3,370 | 5.253968 | 0.356009 | 0.034527 | 0.044022 | 0.036254 | 0.202417 | 0.188174 | 0.155805 | 0.114372 | 0.09495 | 0.058697 | 0 | 0.004691 | 0.177745 | 3,370 | 89 | 117 | 37.865169 | 0.831469 | 0.128487 | 0 | 0.163636 | 0 | 0.018182 | 0.302699 | 0.028357 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0.036364 | 0.090909 | 0.018182 | 0.345455 | 0.036364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b22bae86b2a172039a1ba2e5e91e90a703e4bd98 | 2,416 | py | Python | kornia/augmentation/_2d/intensity/grayscale.py | Ishticode/kornia | 974abb43ec72d12dbd244a2fb247bbbab8498de0 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2022-03-15T02:24:30.000Z | 2022-03-15T02:24:30.000Z | kornia/augmentation/_2d/intensity/grayscale.py | Ishticode/kornia | 974abb43ec72d12dbd244a2fb247bbbab8498de0 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2022-03-12T03:14:18.000Z | 2022-03-12T03:14:18.000Z | kornia/augmentation/_2d/intensity/grayscale.py | Ishticode/kornia | 974abb43ec72d12dbd244a2fb247bbbab8498de0 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | from typing import Dict, Optional
import torch
from torch import Tensor
from kornia.augmentation._2d.intensity.base import IntensityAugmentationBase2D
from kornia.color import rgb_to_grayscale
class RandomGrayscale(IntensityAugmentationBase2D):
r"""Apply random transformation to Grayscale according to a probability p value.
.. image:: _static/img/RandomGrayscale.png
Args:
p: probability of the image to be transformed to grayscale.
same_on_batch: apply the same transformation across the batch.
keepdim: whether to keep the output shape the same as input (True) or broadcast it
to the batch form (False).
Shape:
- Input: :math:`(C, H, W)` or :math:`(B, C, H, W)`, Optional: :math:`(B, 3, 3)`
- Output: :math:`(B, C, H, W)`
.. note::
This function internally uses :func:`kornia.color.rgb_to_grayscale`.
Examples:
>>> rng = torch.manual_seed(0)
>>> inputs = torch.randn((1, 3, 3, 3))
>>> aug = RandomGrayscale(p=1.0)
>>> aug(inputs)
tensor([[[[-1.1344, -0.1330, 0.1517],
[-0.0791, 0.6711, -0.1413],
[-0.1717, -0.9023, 0.0819]],
<BLANKLINE>
[[-1.1344, -0.1330, 0.1517],
[-0.0791, 0.6711, -0.1413],
[-0.1717, -0.9023, 0.0819]],
<BLANKLINE>
[[-1.1344, -0.1330, 0.1517],
[-0.0791, 0.6711, -0.1413],
[-0.1717, -0.9023, 0.0819]]]])
To apply the exact augmenation again, you may take the advantage of the previous parameter state:
>>> input = torch.randn(1, 3, 32, 32)
>>> aug = RandomGrayscale(p=1.0)
>>> (aug(input) == aug(input, params=aug._params)).all()
tensor(True)
"""
def __init__(
self,
same_on_batch: bool = False,
p: float = 0.1,
keepdim: bool = False,
return_transform: Optional[bool] = None,
) -> None:
super().__init__(p=p, return_transform=return_transform, same_on_batch=same_on_batch, keepdim=keepdim)
def apply_transform(
self, input: Tensor, params: Dict[str, Tensor], transform: Optional[Tensor] = None
) -> Tensor:
# Make sure it returns (*, 3, H, W)
grayscale = torch.ones_like(input)
grayscale[:] = rgb_to_grayscale(input)
return grayscale
| 35.529412 | 110 | 0.573675 | 307 | 2,416 | 4.413681 | 0.368078 | 0.04059 | 0.032472 | 0.02214 | 0.160148 | 0.148339 | 0.112915 | 0.112915 | 0.112915 | 0.112915 | 0 | 0.092075 | 0.289735 | 2,416 | 67 | 111 | 36.059701 | 0.697552 | 0.60596 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0 | 0.238095 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b22bce67a19e43295e841d3e6517a911e44726b0 | 2,045 | py | Python | rumor/domain/report.py | SudoQ/rumor | a02a0b8b4ee929d9cd41c33816b1533c24b9cdb3 | [
"MIT"
] | 1 | 2019-09-16T13:57:27.000Z | 2019-09-16T13:57:27.000Z | rumor/domain/report.py | SudoQ/rumor | a02a0b8b4ee929d9cd41c33816b1533c24b9cdb3 | [
"MIT"
] | null | null | null | rumor/domain/report.py | SudoQ/rumor | a02a0b8b4ee929d9cd41c33816b1533c24b9cdb3 | [
"MIT"
] | null | null | null | from datetime import datetime, timedelta
from typing import Any, Dict, List
from logzero import logger
from rumor.upstreams.aws import get_reports, send_notification
def send_reports(report_period_hours: int, evaluation_report_table_name: str,
topic_arn_hint: str) -> None:
created_at_to = datetime.now()
created_at_from = created_at_to - timedelta(hours=report_period_hours)
reports = get_reports(evaluation_report_table_name, created_at_from,
created_at_to)
send(reports, topic_arn_hint)
def send(reports: List[Dict[str, Any]], topic_arn_hint: str) -> None:
if len(reports) == 0:
logger.info('No reports to send')
return
for report in reports:
formated_report = format_report(report)
send_notification(formated_report, topic_arn_hint, 'Rumor Report')
logger.info('Sent {} report(s)'.format(len(reports)))
def format_report(report: Dict[str, Any]) -> str:
attributes = get_attributes(report)
head_template = 'Created {created_at_pretty}\n\n'
body_template = (
'[{score} + {score_bonus}] {title}\n'
'{url}\n'
'\n'
)
head = head_template.format(**attributes)
body = ''
for news_item in attributes['news_items']:
body += body_template.format(**news_item)
formated_report = head + body
return formated_report
def get_attributes(report: Dict[str, Any]) -> Dict[str, Any]:
created_at = report['created_at']
created_at_pretty = datetime.utcfromtimestamp(created_at).strftime(
'%Y-%m-%d %H:%M:%S+00:00 (UTC)'
)
for ni in report['news_items']:
modified_score = (ni.get('modified_score')
if 'modified_score' in ni else ni['score'])
score_bonus = modified_score - ni['score']
ni['score_bonus'] = score_bonus
ni['modified_score'] = modified_score
return {
'created_at': created_at,
'created_at_pretty': created_at_pretty,
'news_items': report['news_items']
}
| 32.983871 | 77 | 0.656724 | 264 | 2,045 | 4.806818 | 0.265152 | 0.099291 | 0.037825 | 0.042553 | 0.105595 | 0.037825 | 0 | 0 | 0 | 0 | 0 | 0.003165 | 0.227384 | 2,045 | 61 | 78 | 33.52459 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0.142298 | 0.011247 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081633 | false | 0 | 0.081633 | 0 | 0.22449 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b23017b8e70fe7088c23cf8a067dce1e0311a845 | 1,165 | py | Python | Python/Zelle/Chapter10_DefiningClasses/ProgrammingExercises/9_SphereClass/sphereProperties3.py | jeffvswanson/CodingPractice | 9ea8e0dd504230cea0e8684b31ef22c3ed90d2fb | [
"MIT"
] | null | null | null | Python/Zelle/Chapter10_DefiningClasses/ProgrammingExercises/9_SphereClass/sphereProperties3.py | jeffvswanson/CodingPractice | 9ea8e0dd504230cea0e8684b31ef22c3ed90d2fb | [
"MIT"
] | null | null | null | Python/Zelle/Chapter10_DefiningClasses/ProgrammingExercises/9_SphereClass/sphereProperties3.py | jeffvswanson/CodingPractice | 9ea8e0dd504230cea0e8684b31ef22c3ed90d2fb | [
"MIT"
] | null | null | null | # sphereProperties3.py
# A program to calculate the surface area and volume of a sphere.
"""Write a class to represent spheres. Your class should implement the
following methods:
__init__(self, radius) Creates a sphere having the given radius.
getRadius(self) Returns the radius of this sphere.
surfaceArea(self) Returns the surface area of the sphere.
volume(self) Returns the volume of the sphere.
Use your new class to solve Programming Exercise 1 from Chapter 3"""
from sphereClass import Sphere
def main():
radius = 0
while radius <= 0:
try:
radius = float(input("Please enter the radius of the sphere: "))
if radius <= 0:
print("You have to enter a number greater than zero.")
except(SyntaxError, NameError, TypeError, ValueError):
print("You have to enter a number greater than zero.")
continue
sphere = Sphere(radius)
volume = sphere.volume()
surfaceArea = sphere.surfaceArea()
print("\nThe volume of the sphere is {0:.2f} units.".format(volume))
print("\nThe surface area of the sphere is {0:.2f} units."
.format(surfaceArea))
main()
| 31.486486 | 76 | 0.681545 | 160 | 1,165 | 4.9375 | 0.44375 | 0.031646 | 0.06962 | 0.040506 | 0.213924 | 0.172152 | 0.172152 | 0.172152 | 0.103797 | 0.103797 | 0 | 0.011198 | 0.233476 | 1,165 | 36 | 77 | 32.361111 | 0.87346 | 0.395708 | 0 | 0.111111 | 0 | 0 | 0.319943 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.055556 | 0 | 0.111111 | 0.222222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b232535896085f71f0ffdded787b734b26375bac | 7,814 | py | Python | templates/angewandte.py | cat-lemonade/PDFDataExtractor | ff6bef8d5a41c9f92c4601981ae1d5eb078f4c53 | [
"MIT"
] | 4 | 2021-06-30T11:38:25.000Z | 2021-06-30T12:29:24.000Z | templates/angewandte.py | cat-lemonade/PDFDataExtractor | ff6bef8d5a41c9f92c4601981ae1d5eb078f4c53 | [
"MIT"
] | null | null | null | templates/angewandte.py | cat-lemonade/PDFDataExtractor | ff6bef8d5a41c9f92c4601981ae1d5eb078f4c53 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from .methods import Methods
import re
class AngewandteTemplate(Methods):
def __init__(self, pdf):
"""
:param pdf: PDF extracted in ordered textblocks with features added
:param self.extraction_pattern: Unique regex pattern for section title extraction of Angewandte
:param self.metadata: Metadata extracted by default get_metadata() method, containing abstract, keywords, doi, figure caption and title
:param self.footnotes: A list contains two sub lists, used by footnotes_detect to store journal name and publisher
"""
Methods.__init__(self)
self.pdf = pdf
self.metadata = self.get_metadata(pdf)
self.extraction_pattern = '^[A-Z][a-z]+( [A-Za-z]+)*\n*[A-Za-z]+( [A-Za-z]+)*$|^[A-Z][a-z]+$|^(?i)Reference(s)*(\s)*.+|^Acknowledg(e)*ment(s)*|Keywords|Conflict of Interest|Data Availability Statement|Supporting Information|Author Contributions'
self.footnotes = [[], []]
def author(self):
'''Temporarily taken down'''
pass
def reference(self):
'''
Reference extraction for PDFs from Angewandte
Such extraction is conducted in 5 steps.
1. ref_text building: Arrange whole reference text into a list where each element starts with a sequence number and filter unwanted noise.
Because Angewandte doesn't have actual 'Reference' title, so 'Keywords' title is used as anchor point
2. ref_text sorting: Naturally sort the list from previous step
3. ref_text concatenating: Join the the previous list into a single string
4. location pairs building: Use regex pattern to locate the span of each reference
5. indexing ref_text with location pairs: Use span from step-4 as index to to slice reference text from step-3, and store results
:param reference: A dictionary used to store final extracted results where key is the sequence number and value is the corresponding entry
:param ref_text: A list to store plain reference text, each element starts with a sequence number
:param location: A list contains two sub lists, and the span of each reference entry is stored accordingly
:param pattern: footnotes on pages where references are.
'''
reference = {}
ref_text = []
location = [[], []]
pattern = self.footnotes_detect()
ref_status = False
# ref_text building
for key, value in self.pdf.items():
text = value['text']
if 'keyword' in text.lower():
ref_status = True
continue
if ref_status == True:
if text not in pattern:
# text = text.replace('\xa0', ' ')
if re.search('^\[\d', text):
ref_text.append(text)
else:
if len(ref_text) != 0:
ref_text.append(ref_text[-1] + ' ' + text)
ref_text.remove(ref_text[-2])
# ref_text sorting
def tryint(s):
try:
return int(s)
except ValueError:
return s
def alphanum_key(s):
return [tryint(x) for x in re.split('([0-9]+)', s)]
def natural_sort(l):
l.sort(key=alphanum_key)
natural_sort(ref_text)
# ref_text concatenating
ref_text = ' '.join(ref_text)
# location pairs building
for match in re.finditer('\[\d+\]', ref_text):
if match.group():
location[0].append(match.span()[1])
location[1].append(match.span()[0])
# indexing ref_text with location pairs
for ref in range(len(location[0])):
try:
reference[str(ref)] = ''.join(ref_text[location[0][ref]:location[1][ref + 1]].replace('\n', ''))
except IndexError:
reference[str(ref)] = ''.join(ref_text[location[1][-1]:].replace('\n', ''))
return reference
def keywords(self):
'''
:param result: A list to store results
:param identifier: coordinates of 'Keywords' textblock, used to find actual keywords
'''
result = []
identifier = 0
for key, value in self.pdf.items():
if 'keyword' in value['text'].lower():
identifier = value['position_y']
for key, value in self.pdf.items():
if identifier[1] <= value['position_y'][1] <= identifier[1]:
result.append(value['text'].replace('\n', ' '))
return result
def footnotes_detect(self):
'''
Get footnotes from pages where Keywords and References are.
:param pages: A list to store page numbers
:param footnotes: A list contains two sub lists, used by footnotes_detect to store journal name and publisher
'''
footnotes_status = False
pages = []
for key, value in self.pdf.items():
if 'Keyword' in value['text']:
footnotes_status = True
# Get publisher
if footnotes_status == True:
text = value['text']
if 'wiley-vch ' in text.lower():
self.footnotes[0].append(text)
# Get page number
page_number = re.search('^\d+$', text)
if page_number:
pages.append(page_number.group())
# Get journal name
publisher = re.search('^Angew\. Chem\. Int.+', text)
if publisher:
self.footnotes[1].append(publisher.group())
self.footnotes[0] = self.most_frequent(self.footnotes[0])
self.footnotes[1] = self.most_frequent(self.footnotes[1])
return self.footnotes + pages
def section(self):
'''Extract section title and corresponding text'''
return self.get_section(self.pdf, self.extraction_pattern, pub='angewandte')
def test(self):
print('PDF returned successfully')
def plaintext(self):
return self.get_puretext(self.pdf)
def journal(self, info_type=None):
'''
Extract journal information, info_type including jounal name, year, volume and page
:param info_type: user-defined argument used to select jounal name, year, volume or page
'''
journal = {'name': '',
'year': '',
'volume': '',
'page': ''
}
for key, value in self.pdf.items():
if key[0] == 0:
text = re.search('^Angew\. Chem\. Int.+', value['text'])
if text:
text = text.group().split(',')
journal['name'] = 'Angewandte Chemie International Edition'
journal['year'] = re.search('\d+', text[0]).group()
journal['volume'] = text[1]
journal['page'] = text[2]
if info_type == None:
return journal
else:
return journal[info_type]
def doi(self):
return self.metadata['doi']
def title(self):
return self.metadata['title']
def abstract(self):
return self.metadata['abstract']
def caption(self, nicely=False):
if nicely == True:
for seq, caption in self.metadata['figure'].items():
print(seq)
print(caption)
print('\n')
return self.metadata['figure']
| 38.303922 | 254 | 0.547607 | 893 | 7,814 | 4.721165 | 0.246361 | 0.036528 | 0.013046 | 0.015417 | 0.167695 | 0.134013 | 0.11314 | 0.074004 | 0.061195 | 0.061195 | 0 | 0.007816 | 0.345022 | 7,814 | 203 | 255 | 38.492611 | 0.815944 | 0.294343 | 0 | 0.09322 | 0 | 0.008475 | 0.101749 | 0.02345 | 0 | 0 | 0 | 0 | 0 | 1 | 0.135593 | false | 0.008475 | 0.016949 | 0.042373 | 0.279661 | 0.033898 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2326d4a38358f57aa91b87608eb79c925afd6ed | 9,968 | py | Python | greenfinger/umqttsimple.py | blaa/smartenough | dc94a53bc27e324bf263f71007320003d59475b0 | [
"MIT"
] | 1 | 2020-08-31T21:13:00.000Z | 2020-08-31T21:13:00.000Z | greenfinger/umqttsimple.py | blaa/smartenough | dc94a53bc27e324bf263f71007320003d59475b0 | [
"MIT"
] | null | null | null | greenfinger/umqttsimple.py | blaa/smartenough | dc94a53bc27e324bf263f71007320003d59475b0 | [
"MIT"
] | null | null | null | # Source: https://github.com/fizista/micropython-umqtt.simple2
# License: MIT
import usocket as socket
import uselect
from utime import ticks_add, ticks_ms, ticks_diff
class MQTTException(Exception):
pass
def pid_gen(pid=0):
while True:
pid = pid + 1 if pid < 65535 else 1
yield pid
class MQTTClient:
def __init__(self, client_id, server, port=0, user=None, password=None, keepalive=0,
ssl=False, ssl_params=None, socket_timeout=5, message_timeout=10):
if port == 0:
port = 8883 if ssl else 1883
self.client_id = client_id
self.sock = None
self.poller = None
self.server = server
self.port = port
self.ssl = ssl
self.ssl_params = ssl_params if ssl_params else {}
self.newpid = pid_gen()
if not getattr(self, 'cb', None):
self.cb = None
if not getattr(self, 'cbstat', None):
self.cbstat = lambda p, s: None
self.user = user
self.pswd = password
self.keepalive = keepalive
self.lw_topic = None
self.lw_msg = None
self.lw_qos = 0
self.lw_retain = False
self.rcv_pids = {} # PUBACK and SUBACK pids awaiting ACK response
self.last_ping = ticks_ms() # Time of the last PING sent
self.last_cpacket = ticks_ms() # Time of last Control Packet
self.socket_timeout = socket_timeout
self.message_timeout = message_timeout
def _read(self, n):
# in non-blocking mode, may not download enough data
try:
msg = b''
for i in range(n):
self._sock_timeout(self.poller_r, self.socket_timeout)
msg += self.sock.read(1)
except AttributeError:
raise MQTTException(8)
if msg == b'': # Connection closed by host (?)
raise MQTTException(1)
if len(msg) != n:
raise MQTTException(2)
return msg
def _write(self, bytes_wr, length=-1):
# In non-blocking socket mode, the entire block of data may not be sent.
try:
self._sock_timeout(self.poller_w, self.socket_timeout)
out = self.sock.write(bytes_wr, length)
except AttributeError:
raise MQTTException(8)
if length < 0:
if out != len(bytes_wr):
raise MQTTException(3)
else:
if out != length:
raise MQTTException(3)
return out
def _send_str(self, s):
assert len(s) < 65536
self._write(len(s).to_bytes(2, 'big'))
self._write(s)
def _recv_len(self):
n = 0
sh = 0
while 1:
b = self._read(1)[0]
n |= (b & 0x7f) << sh
if not b & 0x80:
return n
sh += 7
def _varlen_encode(self, value, buf, offset=0):
assert value < 268435456
while value > 0x7f:
buf[offset] = (value & 0x7f) | 0x80
value >>= 7
offset += 1
buf[offset] = value
return offset + 1
def _sock_timeout(self, poller, socket_timeout):
if self.sock:
res = poller.poll(-1 if socket_timeout is None else int(socket_timeout * 1000))
if not res:
raise MQTTException(30)
else:
raise MQTTException(28)
def set_callback(self, f):
self.cb = f
def set_callback_status(self, f):
self.cbstat = f
def connect(self, clean_session=True):
self.sock = socket.socket()
self.poller_r = uselect.poll()
self.poller_r.register(self.sock, uselect.POLLIN)
self.poller_w = uselect.poll()
self.poller_w.register(self.sock, uselect.POLLOUT)
addr = socket.getaddrinfo(self.server, self.port)[0][-1]
self.sock.connect(addr)
if self.ssl:
import ussl
self.sock = ussl.wrap_socket(self.sock, **self.ssl_params)
premsg = bytearray(b"\x10\0\0\0\0\0")
msg = bytearray(b"\0\x04MQTT\x04\0\0\0")
sz = 10 + 2 + len(self.client_id)
msg[7] = bool(clean_session) << 1
# Clean session = True, remove current session
if bool(clean_session):
self.rcv_pids.clear()
if self.user is not None:
sz += 2 + len(self.user)
msg[7] |= 1 << 7 # User Name Flag
if self.pswd is not None:
sz += 2 + len(self.pswd)
msg[7] |= 1 << 6 # # Password Flag
if self.keepalive:
assert self.keepalive < 65536
msg[8] |= self.keepalive >> 8
msg[9] |= self.keepalive & 0x00FF
if self.lw_topic:
sz += 2 + len(self.lw_topic) + 2 + len(self.lw_msg)
msg[7] |= 0x4 | (self.lw_qos & 0x1) << 3 | (self.lw_qos & 0x2) << 3
msg[7] |= self.lw_retain << 5
plen = self._varlen_encode(sz, premsg, 1)
self._write(premsg, plen)
self._write(msg)
self._send_str(self.client_id)
if self.lw_topic:
self._send_str(self.lw_topic)
self._send_str(self.lw_msg)
if self.user is not None:
self._send_str(self.user)
if self.pswd is not None:
self._send_str(self.pswd)
resp = self._read(4)
if not (resp[0] == 0x20 and resp[1] == 0x02):
raise MQTTException(29)
if resp[3] != 0:
if 1 <= resp[3] <= 5:
raise MQTTException(20 + resp[3])
else:
raise MQTTException(20, resp[3])
self.last_cpacket = ticks_ms()
return resp[2] & 1
def disconnect(self):
self._write(b"\xe0\0")
self.poller_r.unregister(self.sock)
self.poller_w.unregister(self.sock)
self.sock.close()
self.sock = None
self.poller = None
def ping(self):
self._write(b"\xc0\0")
self.last_ping = ticks_ms()
def publish(self, topic, msg, retain=False, qos=0, dup=False):
assert qos in (0, 1)
pkt = bytearray(b"\x30\0\0\0\0")
pkt[0] |= qos << 1 | retain | int(dup) << 3
sz = 2 + len(topic) + len(msg)
if qos > 0:
sz += 2
plen = self._varlen_encode(sz, pkt, 1)
self._write(pkt, plen)
self._send_str(topic)
if qos > 0:
pid = next(self.newpid)
self._write(pid.to_bytes(2, 'big'))
self._write(msg)
if qos > 0:
self.rcv_pids[pid] = ticks_add(ticks_ms(), self.message_timeout * 1000)
return pid
def subscribe(self, topic, qos=0):
assert qos in (0, 1)
assert self.cb is not None, "Subscribe callback is not set"
pkt = bytearray(b"\x82\0\0\0\0\0\0")
pid = next(self.newpid)
sz = 2 + 2 + len(topic) + 1
plen = self._varlen_encode(sz, pkt, 1)
pkt[plen:plen + 2] = pid.to_bytes(2, 'big')
self._write(pkt, plen + 2)
self._send_str(topic)
self._write(qos.to_bytes(1, "little"))
self.rcv_pids[pid] = ticks_add(ticks_ms(), self.message_timeout * 1000)
return pid
def _message_timeout(self):
curr_tick = ticks_ms()
for pid, timeout in self.rcv_pids.items():
if ticks_diff(timeout, curr_tick) <= 0:
self.rcv_pids.pop(pid)
self.cbstat(pid, 0)
def check_msg(self):
if self.sock:
if not self.poller_r.poll(-1 if self.socket_timeout is None else 1):
self._message_timeout()
return None
try:
res = self._read(1)
if not res:
self._message_timeout()
return None
except OSError as e:
if e.args[0] == 110:
self._message_timeout()
return None
else:
raise e
else:
raise MQTTException(28)
if res == b"\xd0":
if self._read(1)[0] != 0:
MQTTException(-1)
self.last_cpacket = ticks_ms()
return
op = res[0]
if op == 0x40:
sz = self._read(1)
if sz != b"\x02":
raise MQTTException(-1)
rcv_pid = int.from_bytes(self._read(2), 'big')
if rcv_pid in self.rcv_pids:
self.last_cpacket = ticks_ms()
self.rcv_pids.pop(rcv_pid)
self.cbstat(rcv_pid, 1)
else:
self.cbstat(rcv_pid, 2)
if op == 0x90:
resp = self._read(4)
if resp[0] != 0x03:
raise MQTTException(40, resp)
if resp[3] == 0x80:
raise MQTTException(44)
if resp[3] not in (0, 1, 2):
raise MQTTException(40, resp)
pid = resp[2] | (resp[1] << 8)
if pid in self.rcv_pids:
self.last_cpacket = ticks_ms()
self.rcv_pids.pop(pid)
self.cbstat(pid, 1)
else:
raise MQTTException(5)
self._message_timeout()
if op & 0xf0 != 0x30:
return op
sz = self._recv_len()
topic_len = int.from_bytes(self._read(2), 'big')
topic = self._read(topic_len)
sz -= topic_len + 2
if op & 6: # QoS level > 0
pid = int.from_bytes(self._read(2), 'big')
sz -= 2
msg = self._read(sz) if sz else b''
retained = op & 0x01
dup = op & 0x08
self.cb(topic, msg, bool(retained), bool(dup))
self.last_cpacket = ticks_ms()
if op & 6 == 2:
self._write(b"\x40\x02")
self._write(pid.to_bytes(2, 'big'))
elif op & 6 == 4:
raise NotImplementedError()
elif op & 6 == 6:
raise MQTTException(-1)
| 32.575163 | 91 | 0.521469 | 1,304 | 9,968 | 3.843558 | 0.174847 | 0.064645 | 0.021947 | 0.023943 | 0.260575 | 0.173583 | 0.123703 | 0.075818 | 0.04589 | 0.04589 | 0 | 0.046198 | 0.368078 | 9,968 | 305 | 92 | 32.681967 | 0.749484 | 0.041533 | 0 | 0.278195 | 0 | 0 | 0.016143 | 0 | 0 | 0 | 0.007862 | 0 | 0.022556 | 1 | 0.06391 | false | 0.011278 | 0.015038 | 0 | 0.131579 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b232da481ef2a7d2b6e768ab0ea182851e040e15 | 3,527 | py | Python | ietf/message/management/commands/show_messages.py | hassanakbar4/ietfdb | cabee059092ae776015410640226064331c293b7 | [
"BSD-3-Clause"
] | 25 | 2022-03-05T08:26:52.000Z | 2022-03-30T15:45:42.000Z | ietf/message/management/commands/show_messages.py | hassanakbar4/ietfdb | cabee059092ae776015410640226064331c293b7 | [
"BSD-3-Clause"
] | 219 | 2022-03-04T17:29:12.000Z | 2022-03-31T21:16:14.000Z | ietf/message/management/commands/show_messages.py | hassanakbar4/ietfdb | cabee059092ae776015410640226064331c293b7 | [
"BSD-3-Clause"
] | 22 | 2022-03-04T15:34:34.000Z | 2022-03-28T13:30:59.000Z | # Copyright The IETF Trust 2020, All Rights Reserved
# -*- coding: utf-8 -*-
import email
import datetime
from django.core.management.base import BaseCommand
import debug # pyflakes:ignore
from ietf.message.models import Message
from ietf.utils.mail import parseaddr
class Command(BaseCommand):
help = """
Show outgoing messages that have been saved as Message objects. By default
all messages from the last 2 weeks are shown. Selection can be made based
on date and sent/unsent state. With the --pk option, only a list of primary
keys are shown, otherwise, creation and send date, message-id, sender and
primary recipients, and subject line is shown. The list of primary keys is
suitable for input to the send_messages management command.
"""
def add_arguments(self, parser):
default_start = datetime.datetime.now() - datetime.timedelta(days=14)
parser.add_argument(
'-t', '--start', '--from', type=str, default=default_start.strftime('%Y-%m-%d %H:%M'),
help='Limit the list to messages saved after the given time (default %(default)s).',
)
parser.add_argument(
'--stop', '--to', type=str, default=None,
help='Limit the list to messages saved after the given time.',
)
parser.add_argument(
'-p', '--pk', action="store_true", default=False,
help='output only a list of primary keys.',
)
selection = parser.add_mutually_exclusive_group()
selection.add_argument(
'-a', '--all', action='store_const', dest='state', const='all',
help='Shows a list of all messages.',
)
selection.add_argument(
'-u', '--unsent', action='store_const', dest='state', const='unsent',
help='Shows a list of unsent messages',
)
selection.add_argument(
'-s', '--sent', action='store_const', dest='state', const='sent',
help='Shows a list of sent messages.',
)
def handle(self, *args, **options):
messages = Message.objects.all()
if options['state'] == 'sent':
messages = messages.filter(sent__isnull=False)
elif options['state'] == 'unsent':
messages = messages.filter(sent__isnull=True)
else:
options['state'] = 'all'
messages = messages.filter(time__gte=options['start'])
if options['stop']:
messages = messages.filter(sent__lte=options['stop'])
selection_str = "%s messages between %s and %s" % (options['state'], options['start'], options['stop'])
else:
selection_str = "%s messages since %s" % (options['state'], options['start'])
self.stdout.write("\nShowimg %s:\n\n" % selection_str)
if options['pk']:
self.stdout.write(','.join([ str(pk) for pk in messages.values_list('pk', flat=True)] ))
else:
for m in messages:
def addr(f):
return parseaddr(f)[1]
to = ','.join( a[1] for a in email.utils.getaddresses([m.to]) )
self.stdout.write('%s %16s %16s %56s %s -> %s "%s"\n' %
(m.pk, m.time.strftime('%Y-%m-%d %H:%M'), m.sent and m.sent.strftime('%Y-%m-%d %H:%M') or '',
m.msgid.strip('<>'), addr(m.frm), to, m.subject.strip()))
self.stdout.write("\n%s messages (%s)\n" % (messages.count(), selection_str))
| 41.988095 | 115 | 0.580663 | 444 | 3,527 | 4.545045 | 0.34009 | 0.017839 | 0.017344 | 0.025273 | 0.213578 | 0.1333 | 0.047572 | 0.047572 | 0.047572 | 0.047572 | 0 | 0.006262 | 0.275588 | 3,527 | 83 | 116 | 42.493976 | 0.783562 | 0.02495 | 0 | 0.132353 | 0 | 0 | 0.317322 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044118 | false | 0 | 0.088235 | 0.014706 | 0.176471 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b23409470c80d954f89cedab304087d78552758c | 1,317 | py | Python | python/Number-Guess-Game.py | Codingdecode/hacktoberfest_2021 | d11f9ca4b28a43740c46b60e48a081014515b475 | [
"MIT"
] | 50 | 2021-10-02T08:26:30.000Z | 2021-11-08T08:57:28.000Z | python/Number-Guess-Game.py | ashu-rb/hacktoberfest_2021 | 49ee3145c926182f318210bb10f41fe686596c63 | [
"MIT"
] | 130 | 2021-10-02T11:29:57.000Z | 2021-11-08T14:24:05.000Z | python/Number-Guess-Game.py | ashu-rb/hacktoberfest_2021 | 49ee3145c926182f318210bb10f41fe686596c63 | [
"MIT"
] | 226 | 2021-10-02T07:56:03.000Z | 2021-10-31T18:30:06.000Z | import random
play = 'y'
def game(ll, ul):
chances = 7
number = random.randint(ll, ul)
print("\nIm thinking of a number between {} and {}".format(ll, ul))
print("\nYou have 7 chances to guess the right number.\n")
numberChoice = int(input("Guess the number -> "))
while numberChoice != number:
chances -= 1
if chances>0:
if numberChoice > ul or numberChoice < ll:
print("\nInvalid input!\nPlease input a number between {} and {}".format(ll,ul))
elif numberChoice > number and numberChoice < ul:
print("\nA little too high! Remaining chances {}".format(chances))
elif numberChoice < number:
print("\nA little too low!! Remaining chances {}".format(chances))
numberChoice = int(input("Please try again -> "))
else:
print("\nYou lost! The number was {}\n".format(number))
break
else:
print('\nCorrect!! Great Guess!\n')
while play.lower() != 'n':
option = int(input("Enter Difficulty Level (1\\2\\3) -> "))
if option == 1:
game(0,10)
elif option == 2:
game(0,30)
elif option == 3:
game(0,100)
play = input("Want to play again? (y/n) -> ")
print('Thank you for playing my game!')
| 32.925 | 96 | 0.560364 | 165 | 1,317 | 4.472727 | 0.418182 | 0.02168 | 0.02439 | 0.04607 | 0.073171 | 0.073171 | 0.073171 | 0 | 0 | 0 | 0 | 0.021692 | 0.299924 | 1,317 | 39 | 97 | 33.769231 | 0.778742 | 0 | 0 | 0.060606 | 0 | 0 | 0.322703 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030303 | false | 0 | 0.030303 | 0 | 0.060606 | 0.242424 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2390aaf839718bdcab5ae6520d7c5d838cd9a48 | 4,216 | py | Python | Plots/Contours/NCL_polar_1.py | NCAR/GeoCAT-examples | fba1b045ba5145fa48cf2f3c1e3b3c7c863b0b5b | [
"Apache-2.0"
] | 42 | 2020-03-03T16:19:30.000Z | 2022-03-18T09:03:26.000Z | Plots/Contours/NCL_polar_1.py | netgodz/GeoCAT-examples | 5ed9a1d68b69a921d0f1fee1160e109853926ed9 | [
"Apache-2.0"
] | 351 | 2019-12-20T22:10:47.000Z | 2022-03-16T20:46:09.000Z | Plots/Contours/NCL_polar_1.py | netgodz/GeoCAT-examples | 5ed9a1d68b69a921d0f1fee1160e109853926ed9 | [
"Apache-2.0"
] | 32 | 2020-01-06T21:18:48.000Z | 2022-03-31T13:45:01.000Z | """
NCL_polar_1.py
==============
This script illustrates the following concepts:
- Drawing black-and-white contours over a polar stereographic map
- Drawing the northern hemisphere of a polar stereographic map
See following URLs to see the reproduced NCL plot & script:
- Original NCL script: https://www.ncl.ucar.edu/Applications/Scripts/polar_1.ncl
- Original NCL plot: https://www.ncl.ucar.edu/Applications/Images/polar_1_lg.png
"""
###############################################################################
# Import packages:
import numpy as np
import xarray as xr
import cartopy.feature as cfeature
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import geocat.datafiles as gdf
from geocat.viz import util as gvutil
###############################################################################
# Read in data:
# Open a netCDF data file using xarray default engine and load the data into xarrays
ds = xr.open_dataset(gdf.get("netcdf_files/uv300.nc"))
U = ds.U[1, :, :]
###############################################################################
# Fix the artifact of not-shown-data around 0 and 360-degree longitudes
wrap_U = gvutil.xr_add_cyclic_longitudes(U, "lon")
###############################################################################
# Plot:
# Generate axes, using Cartopy, drawing coastlines, and adding features
fig = plt.figure(figsize=(10, 10))
projection = ccrs.NorthPolarStereo()
ax = plt.axes(projection=projection)
ax.add_feature(cfeature.LAND, facecolor='lightgray')
# Set map boundary to include latitudes between 0 and 40 and longitudes
# between -180 and 180 only
gvutil.set_map_boundary(ax, [-180, 180], [0, 40], south_pad=1)
# Set draw_labels to False so that you can manually manipulate it later
gl = ax.gridlines(ccrs.PlateCarree(),
draw_labels=False,
linestyle="--",
color='black')
# Manipulate latitude and longitude gridline numbers and spacing
gl.ylocator = mticker.FixedLocator(np.arange(0, 90, 15))
gl.xlocator = mticker.FixedLocator(np.arange(-180, 180, 30))
# Manipulate longitude labels (0, 30 E, 60 E, ..., 30 W, etc.)
ticks = np.arange(0, 210, 30)
etick = ['0'] + [
r'%dE' % tick for tick in ticks if (tick != 0) & (tick != 180)
] + ['180']
wtick = [r'%dW' % tick for tick in ticks[::-1] if (tick != 0) & (tick != 180)]
labels = etick + wtick
xticks = np.arange(0, 360, 30)
yticks = np.full_like(xticks, -5) # Latitude where the labels will be drawn
for xtick, ytick, label in zip(xticks, yticks, labels):
if label == '180':
ax.text(xtick,
ytick,
label,
fontsize=14,
horizontalalignment='center',
verticalalignment='top',
transform=ccrs.Geodetic())
elif label == '0':
ax.text(xtick,
ytick,
label,
fontsize=14,
horizontalalignment='center',
verticalalignment='bottom',
transform=ccrs.Geodetic())
else:
ax.text(xtick,
ytick,
label,
fontsize=14,
horizontalalignment='center',
verticalalignment='center',
transform=ccrs.Geodetic())
# Contour-plot U-data
p = wrap_U.plot.contour(ax=ax,
vmin=-8,
vmax=16,
transform=ccrs.PlateCarree(),
levels=np.arange(-12, 44, 4),
linewidths=0.5,
cmap='black',
add_labels=False)
ax.clabel(p, np.arange(-8, 17, 8), fmt='%d', inline=1, fontsize=14)
# Use geocat.viz.util convenience function to add left and right titles
gvutil.set_titles_and_labels(ax, lefttitle="Zonal Wind", righttitle="m/s")
# Add lower text box
ax.text(1.0,
-.10,
"CONTOUR FROM -12 TO 40 BY 4",
horizontalalignment='right',
transform=ax.transAxes,
bbox=dict(boxstyle='square, pad=0.25',
facecolor='white',
edgecolor='black'))
# Show the plot
plt.show()
| 34.557377 | 84 | 0.565939 | 505 | 4,216 | 4.679208 | 0.445545 | 0.020313 | 0.025391 | 0.020313 | 0.145154 | 0.11807 | 0.092679 | 0.092679 | 0.092679 | 0.092679 | 0 | 0.03773 | 0.251898 | 4,216 | 121 | 85 | 34.842975 | 0.711477 | 0.274905 | 0 | 0.243243 | 0 | 0 | 0.060706 | 0.007726 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.108108 | 0 | 0.108108 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2399036b9426bd3df0badd4bc71c2bb84c1b587 | 1,060 | py | Python | google_problems/problem_12.py | loftwah/Daily-Coding-Problem | 0327f0b4f69ef419436846c831110795c7a3c1fe | [
"MIT"
] | 129 | 2018-10-14T17:52:29.000Z | 2022-01-29T15:45:57.000Z | google_problems/problem_12.py | loftwah/Daily-Coding-Problem | 0327f0b4f69ef419436846c831110795c7a3c1fe | [
"MIT"
] | 2 | 2019-11-30T23:28:23.000Z | 2020-01-03T16:30:32.000Z | google_problems/problem_12.py | loftwah/Daily-Coding-Problem | 0327f0b4f69ef419436846c831110795c7a3c1fe | [
"MIT"
] | 60 | 2019-02-21T09:18:31.000Z | 2022-03-25T21:01:04.000Z | """This problem was asked by Google.
The edit distance between two strings refers to the minimum
number of character insertions, deletions, and substitutions
required to change one string to the other. For example,
the edit distance between “kitten” and “sitting” is three:
substitute the “k” for “s”, substitute the “e” for “i”, and append a “g”.
Given two strings, compute the edit distance between them."""
import math
def main(one, two):
if len(one) > len(two):
small = len(two)
else:
small = len(one)
distance = int(math.fabs(len(one) - len(two)))
if distance == 0:
for i, j in zip(one, two):
if i != j:
distance += 1
else:
for i in range(small):
if one[i] != two[i]:
distance += 1
return distance
print(main("kitten", "sitting")) # should gave 3
print(main("me", "you")) # should gave 3
print(main("majid", "younes")) # should gave 6
print(main("hello", "ehllo")) # should gave 2
print(main("younes", "younes")) # should gave 2
| 31.176471 | 73 | 0.615094 | 156 | 1,060 | 4.179487 | 0.467949 | 0.069018 | 0.069018 | 0.101227 | 0.06135 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010165 | 0.257547 | 1,060 | 33 | 74 | 32.121212 | 0.818297 | 0.450943 | 0 | 0.190476 | 0 | 0 | 0.089161 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.047619 | 0 | 0.142857 | 0.238095 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b23b21897cb8aef599419412b7874cff2cfd72d6 | 2,090 | py | Python | tests/integration/util.py | jensh007/gardenlinux | 7fab6531a004702631f43ddda028ae5e9fa865b9 | [
"MIT"
] | null | null | null | tests/integration/util.py | jensh007/gardenlinux | 7fab6531a004702631f43ddda028ae5e9fa865b9 | [
"MIT"
] | null | null | null | tests/integration/util.py | jensh007/gardenlinux | 7fab6531a004702631f43ddda028ae5e9fa865b9 | [
"MIT"
] | null | null | null | import subprocess
import logging
import json
import urllib.request
import paramiko
from googleapiclient.errors import HttpError
logger = logging.getLogger(__name__)
def get_my_ip():
"""Obtain external visible IP address"""
url='https://api.myip.com'
response = urllib.request.urlopen(url)
if response.status != 200:
raise Exception(f'Unable to obtain this hosts public IP, got HTTP status {response.status} from {url}')
doc = json.load(response)
return doc['ip']
def get_public_key(private_key_file):
k = paramiko.RSAKey.from_private_key_file(private_key_file)
return k.get_name() + " " + k.get_base64()
# gcp related
def delete_firewall_rule(compute, project, name):
try:
request = compute.firewalls().delete(project=project, firewall=name)
response = request.execute()
logger.info(response)
op_name = response['name']
logger.info(f'waiting for delete filewall rule {op_name=}')
wait_for_global_operation(compute, project, op_name)
except HttpError as h:
if h.resp.status != 404:
raise
def ensure_firewall_rules(compute, project, restfw):
name = restfw["name"]
delete_firewall_rule(compute, project, name)
request = compute.firewalls().insert(project=project, body=restfw)
response = request.execute()
logger.info(response)
op_name = response['name']
logger.info(f'waiting for create filewall rule {op_name=}')
wait_for_global_operation(compute, project, op_name)
def wait_for_global_operation(compute, project, operation):
response = compute.globalOperations().wait(project=project, operation=operation,).execute()
if response["status"] != "DONE":
logger.error("Operation failed %s" % json.dumps(response, indent=4))
error = ""
if "error" in response:
error = response["error"]
raise Exception("Operation %s failed: %s" % (operation, error))
def get_config_value(config, key):
if key in config and config[key] != "":
return config[key]
else:
return None | 33.174603 | 111 | 0.686603 | 265 | 2,090 | 5.264151 | 0.354717 | 0.060215 | 0.030108 | 0.047312 | 0.276703 | 0.276703 | 0.199283 | 0.199283 | 0.199283 | 0.199283 | 0 | 0.005376 | 0.199043 | 2,090 | 63 | 112 | 33.174603 | 0.827957 | 0.022488 | 0 | 0.16 | 0 | 0 | 0.13052 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12 | false | 0 | 0.12 | 0 | 0.32 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b23d134df4102e23f15d0e750fc852774a1350a6 | 2,797 | py | Python | v3/cpu/mpi/kskipcg.py | 5enxia/parallel-krylov | 2d75e220b9b0cc6df924111cfb57f917f2100925 | [
"MIT"
] | 1 | 2022-02-25T14:17:55.000Z | 2022-02-25T14:17:55.000Z | v3/cpu/mpi/kskipcg.py | 5enxia/parallel-krylov | 2d75e220b9b0cc6df924111cfb57f917f2100925 | [
"MIT"
] | null | null | null | v3/cpu/mpi/kskipcg.py | 5enxia/parallel-krylov | 2d75e220b9b0cc6df924111cfb57f917f2100925 | [
"MIT"
] | 1 | 2022-02-20T02:57:10.000Z | 2022-02-20T02:57:10.000Z | import numpy as np
from numpy import float64, dot
from numpy.linalg import norm
from .common import start, finish, init, MultiCpu
def kskipcg(comm, local_A, b, x=None, tol=1e-05, maxiter=None, k=0, M=None, callback=None, atol=None) -> tuple:
# MPI初期化
rank = comm.Get_rank()
MultiCpu.joint_mpi(comm)
# 初期化
T = float64
x, maxiter, b_norm, N, residual, num_of_solution_updates = init(
b, x, maxiter)
MultiCpu.alloc(local_A, T)
Ax = np.zeros(N, T)
Ar = np.zeros((k + 2, N), T)
Ap = np.zeros((k + 3, N), T)
a = np.zeros(2*k + 2, T)
f = np.zeros(2*k + 4, T)
c = np.zeros(2*k + 2, T)
# 初期残差
MultiCpu.dot(local_A, x, out=Ax)
Ar[0] = b - Ax
Ap[0] = Ar[0].copy()
# 反復計算
i = 0
index = 0
if rank == 0:
start_time = start(method_name='k-skip CG + MPI', k=k)
while i < maxiter:
# 収束判定
residual[index] = norm(Ar[0]) / b_norm
if residual[index] < tol:
isConverged = True
break
# 基底計算
for j in range(1, k + 1):
MultiCpu.dot(local_A, Ar[j-1], out=Ar[j])
for j in range(1, k + 2):
MultiCpu.dot(local_A, Ap[j-1], out=Ap[j])
# 係数計算
for j in range(2 * k + 1):
jj = j // 2
a[j] = dot(Ar[jj], Ar[jj + j % 2])
for j in range(2 * k + 4):
jj = j // 2
f[j] = dot(Ap[jj], Ap[jj + j % 2])
for j in range(2 * k + 2):
jj = j // 2
c[j] = dot(Ar[jj], Ap[jj + j % 2])
# CGでの1反復
# 解の更新
alpha = a[0] / f[1]
beta = alpha ** 2 * f[2] / a[0] - 1
x += alpha * Ap[0]
Ar[0] -= alpha * Ap[1]
Ap[0] = Ar[0] + beta * Ap[0]
MultiCpu.dot(local_A, Ap[0], out=Ap[1])
# CGでのk反復
for j in range(k):
for l in range(0, 2*(k-j)+1):
a[l] += alpha*(alpha*f[l+2] - 2*c[l+1])
d = c[l] - alpha*f[l+1]
c[l] = a[l] + d*beta
f[l] = c[l] + beta*(d + beta*f[l])
# 解の更新
alpha = a[0] / f[1]
beta = alpha ** 2 * f[2] / a[0] - 1
x += alpha * Ap[0]
Ar[0] -= alpha * Ap[1]
Ap[0] = Ar[0] + beta * Ap[0]
MultiCpu.dot(local_A, Ap[0], out=Ap[1])
i += (k + 1)
index += 1
num_of_solution_updates[index] = i
else:
isConverged = False
residual[index] = norm(Ar[0]) / b_norm
if rank == 0:
elapsed_time = finish(start_time, isConverged, i, residual[index])
info = {
'time': elapsed_time,
'nosl': num_of_solution_updates[:index+1],
'residual': residual[:index+1],
}
return x, info
else:
exit(0)
| 27.693069 | 111 | 0.454773 | 442 | 2,797 | 2.819005 | 0.21267 | 0.021669 | 0.028892 | 0.05297 | 0.324238 | 0.25923 | 0.210273 | 0.210273 | 0.166934 | 0.139647 | 0 | 0.048696 | 0.383268 | 2,797 | 100 | 112 | 27.97 | 0.673623 | 0.021809 | 0 | 0.276316 | 0 | 0 | 0.01138 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.013158 | false | 0 | 0.052632 | 0 | 0.078947 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b240e4b922a27aed34300795e2b148f02265135d | 12,530 | py | Python | alipay/aop/api/domain/IsvShopDishModel.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/IsvShopDishModel.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/IsvShopDishModel.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class IsvShopDishModel(object):
def __init__(self):
self._content = None
self._dish_type_id = None
self._dish_type_name = None
self._good_level = None
self._merchant_sold_cnt_seven_d = None
self._merchant_sold_cnt_thirty_d = None
self._merchant_sold_reusercnt_thirty_d = None
self._merchant_sold_usercnt_thirty_d = None
self._name = None
self._outer_dish_id = None
self._pict = None
self._platform = None
self._price = None
self._quantity = None
self._shop_id = None
self._sold_cnt_seven_d = None
self._sold_cnt_thirty_d = None
self._sold_reusercnt_thirty_d = None
self._sold_usercnt_thirty_d = None
self._sort_col = None
self._unit = None
@property
def content(self):
return self._content
@content.setter
def content(self, value):
self._content = value
@property
def dish_type_id(self):
return self._dish_type_id
@dish_type_id.setter
def dish_type_id(self, value):
self._dish_type_id = value
@property
def dish_type_name(self):
return self._dish_type_name
@dish_type_name.setter
def dish_type_name(self, value):
self._dish_type_name = value
@property
def good_level(self):
return self._good_level
@good_level.setter
def good_level(self, value):
self._good_level = value
@property
def merchant_sold_cnt_seven_d(self):
return self._merchant_sold_cnt_seven_d
@merchant_sold_cnt_seven_d.setter
def merchant_sold_cnt_seven_d(self, value):
self._merchant_sold_cnt_seven_d = value
@property
def merchant_sold_cnt_thirty_d(self):
return self._merchant_sold_cnt_thirty_d
@merchant_sold_cnt_thirty_d.setter
def merchant_sold_cnt_thirty_d(self, value):
self._merchant_sold_cnt_thirty_d = value
@property
def merchant_sold_reusercnt_thirty_d(self):
return self._merchant_sold_reusercnt_thirty_d
@merchant_sold_reusercnt_thirty_d.setter
def merchant_sold_reusercnt_thirty_d(self, value):
self._merchant_sold_reusercnt_thirty_d = value
@property
def merchant_sold_usercnt_thirty_d(self):
return self._merchant_sold_usercnt_thirty_d
@merchant_sold_usercnt_thirty_d.setter
def merchant_sold_usercnt_thirty_d(self, value):
self._merchant_sold_usercnt_thirty_d = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def outer_dish_id(self):
return self._outer_dish_id
@outer_dish_id.setter
def outer_dish_id(self, value):
self._outer_dish_id = value
@property
def pict(self):
return self._pict
@pict.setter
def pict(self, value):
self._pict = value
@property
def platform(self):
return self._platform
@platform.setter
def platform(self, value):
self._platform = value
@property
def price(self):
return self._price
@price.setter
def price(self, value):
self._price = value
@property
def quantity(self):
return self._quantity
@quantity.setter
def quantity(self, value):
self._quantity = value
@property
def shop_id(self):
return self._shop_id
@shop_id.setter
def shop_id(self, value):
self._shop_id = value
@property
def sold_cnt_seven_d(self):
return self._sold_cnt_seven_d
@sold_cnt_seven_d.setter
def sold_cnt_seven_d(self, value):
self._sold_cnt_seven_d = value
@property
def sold_cnt_thirty_d(self):
return self._sold_cnt_thirty_d
@sold_cnt_thirty_d.setter
def sold_cnt_thirty_d(self, value):
self._sold_cnt_thirty_d = value
@property
def sold_reusercnt_thirty_d(self):
return self._sold_reusercnt_thirty_d
@sold_reusercnt_thirty_d.setter
def sold_reusercnt_thirty_d(self, value):
self._sold_reusercnt_thirty_d = value
@property
def sold_usercnt_thirty_d(self):
return self._sold_usercnt_thirty_d
@sold_usercnt_thirty_d.setter
def sold_usercnt_thirty_d(self, value):
self._sold_usercnt_thirty_d = value
@property
def sort_col(self):
return self._sort_col
@sort_col.setter
def sort_col(self, value):
if isinstance(value, list):
self._sort_col = list()
for i in value:
self._sort_col.append(i)
@property
def unit(self):
return self._unit
@unit.setter
def unit(self, value):
self._unit = value
def to_alipay_dict(self):
params = dict()
if self.content:
if hasattr(self.content, 'to_alipay_dict'):
params['content'] = self.content.to_alipay_dict()
else:
params['content'] = self.content
if self.dish_type_id:
if hasattr(self.dish_type_id, 'to_alipay_dict'):
params['dish_type_id'] = self.dish_type_id.to_alipay_dict()
else:
params['dish_type_id'] = self.dish_type_id
if self.dish_type_name:
if hasattr(self.dish_type_name, 'to_alipay_dict'):
params['dish_type_name'] = self.dish_type_name.to_alipay_dict()
else:
params['dish_type_name'] = self.dish_type_name
if self.good_level:
if hasattr(self.good_level, 'to_alipay_dict'):
params['good_level'] = self.good_level.to_alipay_dict()
else:
params['good_level'] = self.good_level
if self.merchant_sold_cnt_seven_d:
if hasattr(self.merchant_sold_cnt_seven_d, 'to_alipay_dict'):
params['merchant_sold_cnt_seven_d'] = self.merchant_sold_cnt_seven_d.to_alipay_dict()
else:
params['merchant_sold_cnt_seven_d'] = self.merchant_sold_cnt_seven_d
if self.merchant_sold_cnt_thirty_d:
if hasattr(self.merchant_sold_cnt_thirty_d, 'to_alipay_dict'):
params['merchant_sold_cnt_thirty_d'] = self.merchant_sold_cnt_thirty_d.to_alipay_dict()
else:
params['merchant_sold_cnt_thirty_d'] = self.merchant_sold_cnt_thirty_d
if self.merchant_sold_reusercnt_thirty_d:
if hasattr(self.merchant_sold_reusercnt_thirty_d, 'to_alipay_dict'):
params['merchant_sold_reusercnt_thirty_d'] = self.merchant_sold_reusercnt_thirty_d.to_alipay_dict()
else:
params['merchant_sold_reusercnt_thirty_d'] = self.merchant_sold_reusercnt_thirty_d
if self.merchant_sold_usercnt_thirty_d:
if hasattr(self.merchant_sold_usercnt_thirty_d, 'to_alipay_dict'):
params['merchant_sold_usercnt_thirty_d'] = self.merchant_sold_usercnt_thirty_d.to_alipay_dict()
else:
params['merchant_sold_usercnt_thirty_d'] = self.merchant_sold_usercnt_thirty_d
if self.name:
if hasattr(self.name, 'to_alipay_dict'):
params['name'] = self.name.to_alipay_dict()
else:
params['name'] = self.name
if self.outer_dish_id:
if hasattr(self.outer_dish_id, 'to_alipay_dict'):
params['outer_dish_id'] = self.outer_dish_id.to_alipay_dict()
else:
params['outer_dish_id'] = self.outer_dish_id
if self.pict:
if hasattr(self.pict, 'to_alipay_dict'):
params['pict'] = self.pict.to_alipay_dict()
else:
params['pict'] = self.pict
if self.platform:
if hasattr(self.platform, 'to_alipay_dict'):
params['platform'] = self.platform.to_alipay_dict()
else:
params['platform'] = self.platform
if self.price:
if hasattr(self.price, 'to_alipay_dict'):
params['price'] = self.price.to_alipay_dict()
else:
params['price'] = self.price
if self.quantity:
if hasattr(self.quantity, 'to_alipay_dict'):
params['quantity'] = self.quantity.to_alipay_dict()
else:
params['quantity'] = self.quantity
if self.shop_id:
if hasattr(self.shop_id, 'to_alipay_dict'):
params['shop_id'] = self.shop_id.to_alipay_dict()
else:
params['shop_id'] = self.shop_id
if self.sold_cnt_seven_d:
if hasattr(self.sold_cnt_seven_d, 'to_alipay_dict'):
params['sold_cnt_seven_d'] = self.sold_cnt_seven_d.to_alipay_dict()
else:
params['sold_cnt_seven_d'] = self.sold_cnt_seven_d
if self.sold_cnt_thirty_d:
if hasattr(self.sold_cnt_thirty_d, 'to_alipay_dict'):
params['sold_cnt_thirty_d'] = self.sold_cnt_thirty_d.to_alipay_dict()
else:
params['sold_cnt_thirty_d'] = self.sold_cnt_thirty_d
if self.sold_reusercnt_thirty_d:
if hasattr(self.sold_reusercnt_thirty_d, 'to_alipay_dict'):
params['sold_reusercnt_thirty_d'] = self.sold_reusercnt_thirty_d.to_alipay_dict()
else:
params['sold_reusercnt_thirty_d'] = self.sold_reusercnt_thirty_d
if self.sold_usercnt_thirty_d:
if hasattr(self.sold_usercnt_thirty_d, 'to_alipay_dict'):
params['sold_usercnt_thirty_d'] = self.sold_usercnt_thirty_d.to_alipay_dict()
else:
params['sold_usercnt_thirty_d'] = self.sold_usercnt_thirty_d
if self.sort_col:
if isinstance(self.sort_col, list):
for i in range(0, len(self.sort_col)):
element = self.sort_col[i]
if hasattr(element, 'to_alipay_dict'):
self.sort_col[i] = element.to_alipay_dict()
if hasattr(self.sort_col, 'to_alipay_dict'):
params['sort_col'] = self.sort_col.to_alipay_dict()
else:
params['sort_col'] = self.sort_col
if self.unit:
if hasattr(self.unit, 'to_alipay_dict'):
params['unit'] = self.unit.to_alipay_dict()
else:
params['unit'] = self.unit
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = IsvShopDishModel()
if 'content' in d:
o.content = d['content']
if 'dish_type_id' in d:
o.dish_type_id = d['dish_type_id']
if 'dish_type_name' in d:
o.dish_type_name = d['dish_type_name']
if 'good_level' in d:
o.good_level = d['good_level']
if 'merchant_sold_cnt_seven_d' in d:
o.merchant_sold_cnt_seven_d = d['merchant_sold_cnt_seven_d']
if 'merchant_sold_cnt_thirty_d' in d:
o.merchant_sold_cnt_thirty_d = d['merchant_sold_cnt_thirty_d']
if 'merchant_sold_reusercnt_thirty_d' in d:
o.merchant_sold_reusercnt_thirty_d = d['merchant_sold_reusercnt_thirty_d']
if 'merchant_sold_usercnt_thirty_d' in d:
o.merchant_sold_usercnt_thirty_d = d['merchant_sold_usercnt_thirty_d']
if 'name' in d:
o.name = d['name']
if 'outer_dish_id' in d:
o.outer_dish_id = d['outer_dish_id']
if 'pict' in d:
o.pict = d['pict']
if 'platform' in d:
o.platform = d['platform']
if 'price' in d:
o.price = d['price']
if 'quantity' in d:
o.quantity = d['quantity']
if 'shop_id' in d:
o.shop_id = d['shop_id']
if 'sold_cnt_seven_d' in d:
o.sold_cnt_seven_d = d['sold_cnt_seven_d']
if 'sold_cnt_thirty_d' in d:
o.sold_cnt_thirty_d = d['sold_cnt_thirty_d']
if 'sold_reusercnt_thirty_d' in d:
o.sold_reusercnt_thirty_d = d['sold_reusercnt_thirty_d']
if 'sold_usercnt_thirty_d' in d:
o.sold_usercnt_thirty_d = d['sold_usercnt_thirty_d']
if 'sort_col' in d:
o.sort_col = d['sort_col']
if 'unit' in d:
o.unit = d['unit']
return o
| 35.902579 | 115 | 0.623224 | 1,679 | 12,530 | 4.222156 | 0.042287 | 0.08887 | 0.076174 | 0.055015 | 0.686133 | 0.556073 | 0.417266 | 0.200028 | 0.154747 | 0.113979 | 0 | 0.000224 | 0.288348 | 12,530 | 348 | 116 | 36.005747 | 0.794774 | 0.003352 | 0 | 0.133333 | 0 | 0 | 0.118872 | 0.050304 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.006349 | 0.066667 | 0.228571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b24190389e4c642732c6bdc6f96cc2c88a3d22ff | 3,104 | py | Python | scripts/test_1000.py | OneStarSolution/prometeo | ed2d534600c0c6207c6d351c54bd3307d15a7859 | [
"MIT"
] | null | null | null | scripts/test_1000.py | OneStarSolution/prometeo | ed2d534600c0c6207c6d351c54bd3307d15a7859 | [
"MIT"
] | 1 | 2021-05-06T16:48:52.000Z | 2021-05-06T16:48:52.000Z | scripts/test_1000.py | OneStarSolution/prometeo | ed2d534600c0c6207c6d351c54bd3307d15a7859 | [
"MIT"
] | 4 | 2021-01-12T21:22:41.000Z | 2021-11-10T05:13:25.000Z | from multiprocessing.dummy import Pool
import requests
import pandas as pd
from requests.models import Response
def on_success(r: Response):
if r.status_code == 200:
print(f'Post succeed: {r.json()}')
else:
print(f'Post failed: {r}')
def on_error(ex: Exception):
print(f'Post requests failed: {ex}')
# Creates a pool with ten threads; more threads = more concurrency.
pool = Pool(80)
# "pool" is a module attribute; you can be sure there will only
# be one of them in your application
# as modules are cached after initialization.
if __name__ == '__main__':
futures_bbb = []
futures_yp = []
test_size = 5
with open('valid_zipcodes.csv', 'r') as f:
locations = [line.strip() for line in f.readlines()]
# locations = ["85033", "90001", "84044", "33101", "68001"]
categories = ["Concrete",
"Flooring",
"Glass",
"Doors",
"Tree Services",
"Interior Cleaning"]
for category in categories:
for location in (locations):
location = location.zfill(5)
futures_bbb.append(pool.apply_async(requests.post,
args=[
'https://82ip2yupkh.execute-api.us-west-1.amazonaws.com/stage/runbbb'],
kwds={'json': {
"country": "USA", "location": location,
"category": category}},
callback=on_success, error_callback=on_error))
futures_yp.append(pool.apply_async(requests.post,
args=[
'https://82ip2yupkh.execute-api.us-west-1.amazonaws.com/stage/runyp'],
kwds={'json': {"country": "USA", "location": location,
"category": category}},
callback=on_success, error_callback=on_error))
res_bbb = []
# futures is now a list of 10 futures.
for future in futures_bbb:
try:
# For each future, wait until the request is
res_bbb.append(future.get().json())
# finished and then print the response object.
except Exception as e:
print(e)
pass
res_yp = []
# futures is now a list of 10 futures.
for future in futures_yp:
try:
# For each future, wait until the request is
res_yp.append(future.get().json())
# finished and then print the response object.
except Exception as e:
# print(e)
pass
df = pd.DataFrame(res_bbb)
df.to_csv(f"test_{test_size}_zipcodes_results_bbb.csv", index=False)
df = pd.DataFrame(res_yp)
df.to_csv(f"test_{test_size}_zipcodes_results_yp.csv", index=False)
| 35.678161 | 123 | 0.509987 | 334 | 3,104 | 4.60479 | 0.41018 | 0.026008 | 0.019506 | 0.026008 | 0.490247 | 0.490247 | 0.490247 | 0.490247 | 0.490247 | 0.444733 | 0 | 0.023293 | 0.39143 | 3,104 | 86 | 124 | 36.093023 | 0.790895 | 0.168492 | 0 | 0.210526 | 0 | 0.035088 | 0.16472 | 0.031542 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035088 | false | 0.035088 | 0.070175 | 0 | 0.105263 | 0.070175 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2420f1896b2081c636df3a35f79705c08b70284 | 747 | py | Python | good_spot/pages/models.py | jasmine92122/NightClubBackend | 7f59129b78baaba0e0c25de2b493033b858f1b00 | [
"MIT"
] | null | null | null | good_spot/pages/models.py | jasmine92122/NightClubBackend | 7f59129b78baaba0e0c25de2b493033b858f1b00 | [
"MIT"
] | 5 | 2020-02-12T03:13:11.000Z | 2022-01-13T01:41:14.000Z | good_spot/pages/models.py | jasmine92122/NightClubBackend | 7f59129b78baaba0e0c25de2b493033b858f1b00 | [
"MIT"
] | null | null | null | from django.db import models
from model_utils import Choices
from model_utils.models import TimeStampedModel
from ckeditor.fields import RichTextField
class Page(TimeStampedModel):
TYPE = Choices(
('terms', 'terms'),
('policy', 'policy'),
('faq', 'faq')
)
title = models.CharField(max_length=100)
slug = models.CharField(unique=True, choices=TYPE, max_length=6)
short_description = models.CharField(max_length=255, blank=True, null=True)
text = RichTextField()
is_published = models.BooleanField(default=False)
order = models.PositiveIntegerField(default=0, blank=False, null=False)
class Meta(object):
ordering = ['order']
def __str__(self):
return self.title
| 27.666667 | 79 | 0.690763 | 87 | 747 | 5.804598 | 0.551724 | 0.089109 | 0.055446 | 0.09505 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013333 | 0.196787 | 747 | 26 | 80 | 28.730769 | 0.828333 | 0 | 0 | 0 | 0 | 0 | 0.044177 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.2 | 0.05 | 0.75 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b243f09d3924e679ebc081a90fdc4b71d08d90fd | 257 | py | Python | componentsdb/default_settings.py | rjw57/componentsdb | 7e5fd96d3afbbcde09d2f7fba1d6c86975e41272 | [
"MIT"
] | null | null | null | componentsdb/default_settings.py | rjw57/componentsdb | 7e5fd96d3afbbcde09d2f7fba1d6c86975e41272 | [
"MIT"
] | null | null | null | componentsdb/default_settings.py | rjw57/componentsdb | 7e5fd96d3afbbcde09d2f7fba1d6c86975e41272 | [
"MIT"
] | null | null | null | """
Default configuration for the application.
"""
# DEBUG and TESTING can be security holes so ensure they default to off
DEBUG = False
TESTING = False
# Page size for paginated resources
PAGE_SIZE = 20
# Auto-commit
SQLALCHEMY_COMMIT_ON_TEARDOWN=True
| 17.133333 | 71 | 0.774319 | 37 | 257 | 5.27027 | 0.783784 | 0.082051 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009346 | 0.167315 | 257 | 14 | 72 | 18.357143 | 0.901869 | 0.618677 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b24d7b2dd4a28be23ae22fa8b624e92e081971d8 | 1,774 | py | Python | tests/stdout_test_main.py | schliffen/fxcollect | 75282579d4d5cbc5e64f74be0e069f2f7eaca59d | [
"MIT"
] | 29 | 2018-02-17T21:04:42.000Z | 2022-02-25T10:22:10.000Z | tests/stdout_test_main.py | schliffen/fxcollect | 75282579d4d5cbc5e64f74be0e069f2f7eaca59d | [
"MIT"
] | 4 | 2018-12-29T21:48:22.000Z | 2021-07-09T20:13:45.000Z | tests/stdout_test_main.py | schliffen/fxcollect | 75282579d4d5cbc5e64f74be0e069f2f7eaca59d | [
"MIT"
] | 9 | 2018-02-27T20:41:07.000Z | 2021-08-07T21:12:30.000Z | from subprocess import Popen, PIPE
from threading import Thread
from queue import Queue
import time
class SubprocessHandler(object):
def __init__(self, events_queue):
self._events_queue = events_queue
self.process = {}
def _send_job_to_subprocess(self, sumting, job):
p = self.process[sumting]['process']
p.stdin.write('%s\n' % job)
def initialise_sub(self, sumting):
sub = Popen(['python3', 'fx_collect/stdout_test.py'],
stdin=PIPE, stdout=PIPE,
shell=False, bufsize=0,
universal_newlines=True)
nbsr = SubprocessQueue(
sub.stdout, self._events_queue)
sub_attribs = {
'process': sub,
'pipe': nbsr}
self.process[sumting] = sub_attribs
def on_collect(self, event):
jobno = str(event[0])
sumting = str(event[1])
if sumting not in self.process:
self.initialise_sub(sumting)
job = '{0}, {1}'.format(
jobno, sumting)
self._send_job_to_subprocess(sumting, job)
class SubprocessQueue(object):
def __init__(self, stream, events_queue):
self._s = stream
self._q = events_queue
def _streamQueue(s, q):
while True:
response = s.readline()
if response:
q.put(response)
self._t = Thread(target=_streamQueue,
args=(self._s, self._q,))
self._t.daemon = True
self._t.start()
from datetime import datetime
q = Queue()
s = SubprocessHandler(q)
while True:
for jobno in range(200):
event = jobno, jobno*10
s.on_collect(event)
time.sleep(60)
| 28.15873 | 62 | 0.563698 | 201 | 1,774 | 4.766169 | 0.358209 | 0.068894 | 0.046973 | 0.035491 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011008 | 0.334273 | 1,774 | 62 | 63 | 28.612903 | 0.800169 | 0 | 0 | 0.039216 | 0 | 0 | 0.036215 | 0.014603 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.098039 | 0 | 0.254902 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b24dad5017dbbe026b224bcbe64e24c9dc3ffb72 | 33,765 | py | Python | scripts/analyze_csv_data.py | Aristxcy/FirstOrderLp.jl | fd83c8f099f34bbc39494c1bf0a88535395edd9e | [
"Apache-2.0"
] | 66 | 2021-05-03T18:15:15.000Z | 2022-03-26T10:10:10.000Z | scripts/analyze_csv_data.py | Aristxcy/FirstOrderLp.jl | fd83c8f099f34bbc39494c1bf0a88535395edd9e | [
"Apache-2.0"
] | 21 | 2021-05-05T17:46:14.000Z | 2022-02-15T13:56:12.000Z | scripts/analyze_csv_data.py | Aristxcy/FirstOrderLp.jl | fd83c8f099f34bbc39494c1bf0a88535395edd9e | [
"Apache-2.0"
] | 14 | 2021-05-19T16:24:54.000Z | 2022-03-14T20:29:58.000Z | # Copyright 2021 The FirstOrderLp Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script generates all the experimental results used in the paper.
# It requires python 3, numpy, pandas, and matplotlib installed to run.
#
# `python analyze_csv_data.py`
#
# It reads csv files containing experimental results from ./csv, and outputs
# pdf figures to ./results/figs and latex tables to ./results/tex.
import itertools
import os
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from cycler import cycler
plt.rcParams.update({"figure.max_open_warning": 0, "font.size": 16})
# The 'TkAgg' matplotlib backend fails at the default recursion limit.
sys.setrecursionlimit(10000)
# This is required to generate plots that are easy to read when printed:
plt.rcParams["axes.prop_cycle"] = cycler(
linestyle=["-", "--", ":", "-.", "-", "--", ":", "-.", "-", "--"],
color=[
"#1f77b4",
"#ff7f0e",
"#2ca02c",
"#d62728",
"#9467bd",
"#8c564b",
"#e377c2",
"#7f7f7f",
"#bcbd22",
"#17becf",
],
)
# directory where the csv files are located
CSV_DIR = "./csv"
# directory where all the figure pdf and table tex files are written to:
OUTPUT_DIR = "./results"
FIGS_DIR = os.path.join(OUTPUT_DIR, "figs")
TEX_DIR = os.path.join(OUTPUT_DIR, "tex")
OPT = "TERMINATION_REASON_OPTIMAL"
KKT_PASSES_LIMIT = 1e5
TIME_LIMIT_SECS = 60 * 60 # 1hr
TIME_LIMIT_SECS_ABLATION = 6 * 60 * 60 # 6hr
# shift to use for shifted geometric mean
SGM_SHIFT = int(10)
# penalised average runtime:
PAR = 1.0 # can be None, which removes unsolved instead of penalizing
# Which scaling experiments to present
SCALING_EXPS_TO_USE = [
"off,off",
"off,pock_chambolle alpha=1",
"10 rounds,off",
"10 rounds,pock_chambolle alpha=1",
]
# Which primal-weight experiments to present
PRIMALWEIGHT_EXPS_TO_USE = [
"adaptive",
#'Fixed 1e-0',
]
# placeholder:
_BEST_STR = "_best_str_"
_BEST_FIXED = "_best_fixed_"
# Dataset names:
MITTELMANN_STR = "lp_benchmark"
MIPLIB_STR = "mip_relaxations"
NETLIB_STR = "netlib"
# Change table font size to fit paper:
LATEX_FONT_SIZE = "\\small"
# Naming for improvements plots:
_PDHG = "PDHG"
_RESTARTS = "+restarts"
_SCALING = "+scaling"
_PRIMAL_WEIGHT = "+primal\nweight"
_STEPSIZE = "+step\nsize"
_PRESOLVE = "+presolve\n(= PDLP)"
# Order in which improvements should appear:
IMPROVEMENTS_ORDER = [
_PDHG,
_RESTARTS,
_SCALING,
_PRIMAL_WEIGHT,
_STEPSIZE,
_PRESOLVE,
]
IMPROVEMENTS_ORDER_IDX = dict(
zip(IMPROVEMENTS_ORDER, range(len(IMPROVEMENTS_ORDER)))
)
# Horrible HACK, but needs to be done
def label_lookup(label):
if "pdhg_enhanced" in label:
return "PDLP"
if "mirror-prox" in label:
return "Enh. Extragradient"
if "pdhg_vanilla" in label:
return "PDHG"
if "scs-indirect" in label:
return "SCS (matrix-free)"
if "scs-direct" in label:
return "SCS"
if "nopresolve" in label:
return "No presolve"
if "no restarts" in label:
return "No restart"
if "adaptive theoretical" in label:
return "Adaptive restart (theory)"
if "adaptive enhanced" in label:
return "PDLP"
if "pdhg" in label and "pdhg_mp_1h" in label:
return "PDLP"
if "off,off" in label:
return "No scaling"
if "off,pock_chambolle alpha=1" in label:
return "Pock-Chambolle"
if "10 rounds,off" in label:
return "Ruiz"
if "10 rounds,pock_chambolle alpha=1" in label:
return "Ruiz + Pock-Chambolle"
if "stepsize" in label:
if "adaptive" in label:
return "PDLP"
if "fixed" in label:
return "Fixed step-size"
if "scaling" in label:
if _BEST_STR in label:
return "Best per-instance scaling"
if "primalweight" in label:
if "adaptive" in label:
return "PDLP"
if "Fixed 1e-0" in label:
return r"Fixed PW ($\theta=0$)"
if _BEST_STR in label:
return "Best per-instance PW"
if _BEST_FIXED in label:
return "Best fixed PW"
if "improvements" in label:
if "vanilla" in label:
return _PDHG
st = ""
if "restarts" in label:
st = _RESTARTS
if "scaling" in label:
st = _SCALING
if "primal weight" in label:
st = _PRIMAL_WEIGHT
if "step size" in label:
st = _STEPSIZE
if "pdlp_final" in label:
st = _PRESOLVE
return st
if "malitskypock" in label:
if _BEST_STR in label:
return "Best per-instance MP settings"
return "Best fixed MP setting"
return label
def sanitize_title(title):
title = title.replace("_", " ").title()
title = title.replace("Lp", "LP")
title = title.replace("Mip", "MIP")
title = title.replace("Pdlp", "PDLP")
title = title.replace("Pdhg", "PDHG")
title = title.replace("Scs", "SCS")
title = title.replace("Sgm", "SGM")
return title
# Generate plots of xaxis vs fraction of solved problems
def solved_problems_vs_xaxis_figs(
dfs, xaxis, xlabel, prefix, num_instances, legend_location="best", xmin=0.0
):
fig = plt.figure()
stats_dfs = {}
for k, df_k in dfs.items():
stats_df = (
df_k.groupby(xaxis)[xaxis]
.agg("count")
.pipe(pd.DataFrame)
.rename(columns={xaxis: "frequency"})
)
stats_df["cum_solved_count"] = (
stats_df["frequency"].cumsum() / num_instances
)
stats_df = stats_df.drop(columns="frequency").reset_index()
stats_dfs[k] = stats_df
max_xaxis = pd.concat(stats_dfs)[xaxis].max()
lines = []
labels = []
for k, df_k in stats_dfs.items():
if df_k.empty:
continue
df_k = df_k.append(
{
xaxis: max_xaxis,
"cum_solved_count": df_k.iloc[-1]["cum_solved_count"],
},
ignore_index=True,
)
df_k.reset_index()
label = label_lookup(k)
lines.extend(
plt.plot(df_k[xaxis], df_k["cum_solved_count"], label=label)
)
labels.append(label)
plt.ylabel("Fraction of problems solved")
plt.xlabel(xlabel)
plt.ylim((0, 1))
plt.ticklabel_format(axis="x", style="sci", scilimits=(0, 0))
plt.title(sanitize_title(prefix))
plt.xscale("log")
plt.xlim(left=xmin)
if legend_location == "outer":
plt.legend(bbox_to_anchor=(1.04, 0.5), loc="center left")
elif legend_location == "separate":
figlegend = plt.figure()
figlegend.legend(lines, labels, ncol=len(lines), loc="center")
legendpath = os.path.join(
FIGS_DIR, f"{prefix}_{xaxis}_v_solved_probs_legend.pdf"
)
figlegend.savefig(legendpath, bbox_inches="tight")
else:
plt.legend(loc=legend_location)
path = os.path.join(FIGS_DIR, f"{prefix}_{xaxis}_v_solved_probs.pdf")
fig.savefig(path, bbox_inches="tight")
def gen_solved_problems_plots(
df, prefix, num_instances, legend_location="best"
):
exps = df["experiment_label"].unique()
dfs = {k: df[df["experiment_label"] == k] for k in exps}
optimal_dfs = {
k: v[v["termination_reason"] == OPT] for (k, v) in dfs.items()
}
solved_problems_vs_xaxis_figs(
optimal_dfs,
"cumulative_kkt_matrix_passes",
f"KKT matrix passes SGM{SGM_SHIFT}",
prefix,
num_instances,
legend_location,
xmin=100,
)
solved_problems_vs_xaxis_figs(
optimal_dfs,
"solve_time_sec",
"Wall-clock time (secs)",
prefix,
num_instances,
legend_location,
xmin=1.0,
)
def gen_solved_problems_plots_split_tol(
df, prefix, num_instances, legend_location="best"
):
tols = df["tolerance"].unique()
for t in tols:
gen_solved_problems_plots(
df[df["tolerance"] == t],
prefix + f"_tol_{t:.0E}",
num_instances,
legend_location,
)
def shifted_geomean(x, shift):
x = x[~np.isnan(x)]
sgm = np.exp(np.sum(np.log(x + shift) / len(x))) - shift
return sgm if sgm > 0 else np.nan
def change_table_font_size(table):
table = table.replace(
"\\begin{table}\n", "\\begin{table}\n" + LATEX_FONT_SIZE + "\n"
)
table = table.replace("\\caption{", "\\caption{" + LATEX_FONT_SIZE + " ")
return table
def gen_total_solved_problems_table(
df, prefix, par, time_limit=TIME_LIMIT_SECS
):
solved_probs = (
df[df["termination_reason"] == OPT]
.groupby("experiment_label")["experiment_label"]
.agg("count")
.pipe(pd.DataFrame)
.rename(columns={"experiment_label": "Solved count"})
)
solved_probs.index.name = "Experiment"
solved_probs = solved_probs.reset_index()
shift = SGM_SHIFT
kkt_sgm = df.copy()
if par is not None:
kkt_sgm.loc[
kkt_sgm["termination_reason"] != OPT, "cumulative_kkt_matrix_passes"
] = (par * KKT_PASSES_LIMIT)
else:
kkt_sgm.loc[
kkt_sgm["termination_reason"] != OPT, "cumulative_kkt_matrix_passes"
] = np.nan
# Hack for SCS direct
kkt_sgm.loc[
kkt_sgm["experiment_label"].str.contains("scs-direct"),
"cumulative_kkt_matrix_passes",
] = np.nan
kkt_sgm = (
kkt_sgm.groupby("experiment_label")["cumulative_kkt_matrix_passes"]
.agg(lambda _: shifted_geomean(_, shift))
.pipe(pd.DataFrame)
.rename(
columns={"cumulative_kkt_matrix_passes": f"KKT passes SGM{shift}"}
)
)
kkt_sgm.index.name = "Experiment"
kkt_sgm = kkt_sgm.reset_index()
wall_clock = df.copy()
if par is not None:
wall_clock.loc[
wall_clock["termination_reason"] != OPT, "solve_time_sec"
] = (par * time_limit)
else:
wall_clock.loc[
wall_clock["termination_reason"] != OPT, "solve_time_sec"
] = np.nan
wall_clock = (
wall_clock.groupby("experiment_label")["solve_time_sec"]
.agg(lambda _: shifted_geomean(_, shift))
.pipe(pd.DataFrame)
.rename(columns={"solve_time_sec": f"Solve time secs SGM10"})
)
wall_clock.index.name = "Experiment"
wall_clock = wall_clock.reset_index()
output = solved_probs.merge(kkt_sgm).merge(wall_clock)
# rename the labels
for e in output["Experiment"]:
output.loc[output["Experiment"] == e, "Experiment"] = label_lookup(e)
output = output.sort_values("Solved count", ascending=True)
# HACK to fix improvements table ordering and line break
if "improvements" in prefix:
output["rank"] = output["Experiment"].map(IMPROVEMENTS_ORDER_IDX)
output.sort_values("rank", inplace=True)
output.drop(labels="rank", axis=1, inplace=True)
to_write = output.copy()
for e in to_write["Experiment"]:
to_write.loc[to_write["Experiment"] == e, "Experiment"] = e.replace(
"\n", " "
)
else:
to_write = output
table = to_write.to_latex(
float_format="%.1f",
longtable=False,
index=False,
caption=f"Performance statistics: {sanitize_title(prefix)}",
label=f"t:solved-probs-{prefix}",
column_format="lccc",
escape=False,
na_rep="-",
)
table = change_table_font_size(table)
path = os.path.join(TEX_DIR, f"{prefix}_solved_probs_table.tex")
with open(path, "w") as f:
f.write(table)
return output
def gen_total_solved_problems_table_split_tol(
df, prefix, par, time_limit=TIME_LIMIT_SECS
):
outputs = {}
tols = df["tolerance"].unique()
for t in tols:
outputs[t] = gen_total_solved_problems_table(
df[df["tolerance"] == t], prefix + f"_tol_{t:.0E}", par, time_limit
)
return outputs
def plot_loghist(x, nbins):
x = x[~np.isnan(x)]
hist, bins = np.histogram(x, bins=nbins)
logbins = np.logspace(
np.log10(max(bins[0], 1e-10)), np.log10(max(bins[-1], 1e-10)), nbins
)
plt.hist(x, bins=logbins)
plt.xscale("log")
def gen_ratio_histograms_split_tol(df, prefix, par):
tols = df["tolerance"].unique()
for t in tols:
gen_ratio_histograms(
df[df["tolerance"] == t],
prefix + f"_tol_{t:.0E}",
"cumulative_kkt_matrix_passes",
f"KKT matrix passes SGM{SGM_SHIFT}",
KKT_PASSES_LIMIT,
par,
)
gen_ratio_histograms(
df[df["tolerance"] == t],
prefix + f"_tol_{t:.0E}",
"solve_time_sec",
"Wall-clock time (secs)",
TIME_LIMIT_SECS,
par,
)
def gen_ratio_histograms(df, prefix, xaxis, xlabel, limit, par):
assert len(df["experiment_label"].unique()) == 2
(l0, l1) = df["experiment_label"].unique()
def performance_ratio_fn(df, par):
df = df.reset_index()
assert len(df) <= 2
df0 = df[df["experiment_label"] == l0]
df1 = df[df["experiment_label"] == l1]
instance = df.instance_name.unique()
if len(df0) == 1 and df0["termination_reason"].iloc[0] == OPT:
kkt_passes_0 = df0[xaxis].iloc[0]
else:
kkt_passes_0 = par * limit
if len(df1) == 1 and df1["termination_reason"].iloc[0] == OPT:
kkt_passes_1 = df1[xaxis].iloc[0]
else:
kkt_passes_1 = par * limit
# if (df['termination_reason'] != OPT).any():
# return np.nan
return kkt_passes_0 / kkt_passes_1
ratios = (
df.groupby(["instance_name"])
.apply(lambda _: performance_ratio_fn(_, par))
.reset_index(name="ratio")
.dropna()
)
nbins = min(len(ratios) // 3, 25)
if nbins > 0:
plt.figure(figsize=(10, 6))
plt.title(
sanitize_title(
f"{prefix} {xlabel} {label_lookup(l0)}:{label_lookup(l1)}"
)
)
plot_loghist(ratios["ratio"], nbins)
path = os.path.join(
FIGS_DIR,
f"{prefix}_{label_lookup(l0)}_{label_lookup(l1)}"
+ f"_{xaxis}_performance_ratio.pdf",
)
plt.savefig(path)
table = ratios.to_latex(
float_format="%.2f",
longtable=False,
index=False,
caption=f"Performance ratio.",
label=f"t:ratio-{prefix}",
column_format="lc",
na_rep="-",
)
table = change_table_font_size(table)
path = os.path.join(
TEX_DIR,
f"{prefix}_{label_lookup(l0)}:"
f"{label_lookup(l1)}_{xaxis}_ratio_table.tex",
)
with open(path, "w") as f:
f.write(table)
# Unsolved problems might be missing from csv, make sure all are accounted for.
def fill_in_missing_problems(df, instances_list):
new_index = pd.Index(instances_list, name="instance_name")
experiments = df["experiment_label"].unique()
dfs = []
for e in experiments:
old_df = df[df["experiment_label"] == e]
tol = old_df["tolerance"].unique()[0]
new_df = (
old_df.set_index("instance_name").reindex(new_index).reset_index()
)
# otherwise these would be nan
new_df["tolerance"] = tol
new_df["experiment_label"] = e
dfs.append(new_df)
return pd.concat(dfs)
def improvements_plot(dfs, prefix, key, ascending):
normalized_dfs = []
for df in dfs:
df[key] /= df[df["Experiment"] == "PDHG"][key].to_numpy()[0]
normalized_dfs.append(df)
df = pd.concat(normalized_dfs)
fig = plt.figure(figsize=(10, 6))
markers = itertools.cycle(["o", "v", "^", "<", ">", "s"])
for tol in df["tolerance"].unique():
_df = df[df["tolerance"] == tol].reset_index(drop=True)
plt.plot(
_df[key].to_numpy(),
linestyle="--",
marker=next(markers),
markersize=12,
label=f"tolerance {tol:.0E}",
)
plt.yscale("log")
plt.ylabel("Normalized " + key, fontsize=20)
plt.title(sanitize_title(prefix), fontsize=20)
plt.xticks(range(len(_df["Experiment"])), _df["Experiment"].to_list())
plt.tick_params(axis="both", which="both", labelsize=20)
ax = plt.gca()
ax.yaxis.set_major_locator(ticker.LogLocator(subs=[1, 2, 3, 5, 7]))
ax.yaxis.set_major_formatter(
ticker.LogFormatterSciNotation(
labelOnlyBase=False, minor_thresholds=(4, 2)
)
)
# ax.yaxis.set_major_formatter(ticker.FormatStrFormatter("%.2f")
if len(dfs) > 1:
plt.legend(loc="best", prop={"size": 20})
name = key.replace(" ", "_")
path = os.path.join(FIGS_DIR, f"{prefix}_{name}.pdf")
plt.savefig(path, bbox_inches="tight")
def gen_all_improvement_plots(outputs, prefix):
dfs = []
for tol, df in outputs.items():
df = df.copy()
df["tolerance"] = tol
dfs.append(df)
improvements_plot(dfs, prefix, "KKT passes SGM10", ascending=False)
improvements_plot(dfs, prefix, "Solve time secs SGM10", ascending=False)
improvements_plot(dfs, prefix, "Solved count", ascending=True)
# First, make output directories
if not os.path.exists(FIGS_DIR):
os.makedirs(FIGS_DIR)
if not os.path.exists(TEX_DIR):
os.makedirs(TEX_DIR)
# Get clean list of all problems we tested on:
with open("../benchmarking/mip_relaxations_instance_list") as f:
miplib_instances = f.readlines()
miplib_instances = [p.strip() for p in miplib_instances if p[0] != "#"]
with open("../benchmarking/lp_benchmark_instance_list") as f:
mittelmann_instances = f.readlines()
mittelmann_instances = [p.strip() for p in mittelmann_instances if p[0] != "#"]
with open("../benchmarking/netlib_benchmark_instance_list") as f:
netlib_instances = f.readlines()
netlib_instances = [p.strip() for p in netlib_instances if p[0] != "#"]
# Pull out 'default' (ie best) pdhg implementation to compare against:
df_default = pd.read_csv(os.path.join(CSV_DIR, "miplib_pdhg_enhanced_100k.csv"))
df_default = fill_in_missing_problems(df_default, miplib_instances)
######################################################################
# PDLP pdhg vs vanilla pdhg (JOIN DEFAULT)
df = pd.read_csv(os.path.join(CSV_DIR, "miplib_pdhg_vanilla_100k.csv"))
df = fill_in_missing_problems(df, miplib_instances)
df = pd.concat((df_default, df))
gen_solved_problems_plots_split_tol(df, f"{MIPLIB_STR}", len(miplib_instances))
gen_total_solved_problems_table_split_tol(df, f"{MIPLIB_STR}", PAR)
gen_ratio_histograms_split_tol(df, f"{MIPLIB_STR}", PAR)
######################################################################
df = pd.read_csv(os.path.join(CSV_DIR, "mittelmann_pdhg_enhanced_100k.csv"))
df = fill_in_missing_problems(df, mittelmann_instances)
df_vanilla = pd.read_csv(
os.path.join(CSV_DIR, "mittelmann_improvements_100k.csv")
)
df_vanilla = df_vanilla[df_vanilla["enhancements"] == "vanilla"]
df_vanilla = fill_in_missing_problems(df_vanilla, mittelmann_instances)
df = pd.concat((df, df_vanilla))
gen_solved_problems_plots_split_tol(
df, f"{MITTELMANN_STR}", len(mittelmann_instances)
)
gen_total_solved_problems_table_split_tol(df, f"{MITTELMANN_STR}", PAR)
gen_ratio_histograms_split_tol(df, f"{MITTELMANN_STR}", PAR)
######################################################################
df = pd.read_csv(os.path.join(CSV_DIR, "netlib_pdhg_enhanced_100k.csv"))
df = fill_in_missing_problems(df, netlib_instances)
df_vanilla = pd.read_csv(os.path.join(CSV_DIR, "netlib_improvements_100k.csv"))
df_vanilla = df_vanilla[df_vanilla["enhancements"] == "vanilla"]
df_vanilla = fill_in_missing_problems(df_vanilla, netlib_instances)
df = pd.concat((df, df_vanilla))
gen_solved_problems_plots_split_tol(df, f"{NETLIB_STR}", len(netlib_instances))
gen_total_solved_problems_table_split_tol(df, f"{NETLIB_STR}", PAR)
gen_ratio_histograms_split_tol(df, f"{NETLIB_STR}", PAR)
######################################################################
# Scaling results (JOIN DEFAULT)
df = pd.read_csv(os.path.join(CSV_DIR, "miplib_malitskypock_100k.csv"))
mp_solved = (
df[df["termination_reason"] == OPT]
.groupby(["experiment_label", "tolerance"])["experiment_label"]
.agg("count")
.pipe(pd.DataFrame)
.rename(columns={"experiment_label": "solved"})
.reset_index()
)
dfs = []
for t in df["tolerance"].unique():
_df = mp_solved[mp_solved["tolerance"] == t]
best_mp_run = _df.loc[_df["solved"].idxmax()]["experiment_label"]
dfs.append(df[df["experiment_label"] == best_mp_run])
df_best_ind = fill_in_missing_problems(pd.concat(dfs), miplib_instances)
# Pull out best performing scaling for each instance / tolerance:
df_best_fixed = df[df["termination_reason"] == OPT].reset_index()
best_idxs = df_best_fixed.groupby(["instance_name", "tolerance"])[
"cumulative_kkt_matrix_passes"
].idxmin()
df_best_fixed = df_best_fixed.loc[best_idxs]
for t in df_best_fixed["tolerance"].unique():
# rename the experiment label
df_best_fixed.loc[
df_best_fixed["tolerance"] == t, "experiment_label"
] = f"malitskypock {_BEST_STR} {t}"
df_best_fixed = fill_in_missing_problems(df_best_fixed, miplib_instances)
df_stepsize = pd.read_csv(os.path.join(CSV_DIR, "miplib_stepsize_100k.csv"))
df_stepsize = fill_in_missing_problems(df_stepsize, miplib_instances)
df = pd.concat((df_stepsize, df_best_fixed, df_best_ind))
gen_solved_problems_plots_split_tol(
df, f"{MIPLIB_STR}_stepsize", len(miplib_instances), False
)
gen_total_solved_problems_table_split_tol(
df, f"{MIPLIB_STR}_stepsize", PAR, TIME_LIMIT_SECS_ABLATION
)
######################################################################
# PDLP vs mp vs scs on MIPLIB (JOIN PDHG/MP WITH SCS)
df_pdhg_mp = pd.read_csv(os.path.join(CSV_DIR, "miplib_pdhg_mp_1h.csv"))
df_pdhg_mp = fill_in_missing_problems(df_pdhg_mp, miplib_instances)
df_scs = pd.read_csv(os.path.join(CSV_DIR, "miplib_scs_1h.csv"))
df_scs = fill_in_missing_problems(df_scs, miplib_instances)
df_pdhg_vanilla = pd.read_csv(
os.path.join(CSV_DIR, "miplib_pdhg_vanilla_1h.csv")
)
df_pdhg_vanilla = fill_in_missing_problems(df_pdhg_vanilla, miplib_instances)
df = pd.concat((df_pdhg_mp, df_pdhg_vanilla, df_scs))
gen_solved_problems_plots_split_tol(
df,
f"{MIPLIB_STR}_baselines",
len(miplib_instances),
legend_location="separate",
)
gen_total_solved_problems_table_split_tol(df, f"{MIPLIB_STR}_baselines", PAR)
df_pdhg_scs_dir = pd.concat(
(
df_pdhg_mp[df_pdhg_mp["method"] == "pdhg"],
df_scs[df_scs["method"] == "scs-direct"],
)
)
df_pdhg_scs_indir = pd.concat(
(
df_pdhg_mp[df_pdhg_mp["method"] == "pdhg"],
df_scs[df_scs["method"] == "scs-indirect"],
)
)
gen_ratio_histograms_split_tol(df_pdhg_mp, f"{MIPLIB_STR}", PAR)
gen_ratio_histograms_split_tol(df_pdhg_scs_indir, f"{MIPLIB_STR}", PAR)
gen_ratio_histograms_split_tol(df_pdhg_scs_dir, f"{MIPLIB_STR}", PAR)
######################################################################
# PDLP vs mp vs scs on MITTELMANN (JOIN PDHG/MP WITH SCS)
df_pdhg_mp = pd.read_csv(os.path.join(CSV_DIR, "mittelmann_pdhg_mp_1h.csv"))
df_pdhg_mp = fill_in_missing_problems(df_pdhg_mp, mittelmann_instances)
df_pdhg_vanilla = pd.read_csv(
os.path.join(CSV_DIR, "mittelmann_pdhg_vanilla_1h.csv")
)
df_pdhg_vanilla = fill_in_missing_problems(
df_pdhg_vanilla, mittelmann_instances
)
df_scs = pd.read_csv(os.path.join(CSV_DIR, "mittelmann_scs_1h.csv"))
df_scs = fill_in_missing_problems(df_scs, mittelmann_instances)
df = pd.concat((df_pdhg_mp, df_pdhg_vanilla, df_scs))
gen_solved_problems_plots_split_tol(
df,
f"{MITTELMANN_STR}_baselines",
len(mittelmann_instances),
legend_location="separate",
)
gen_total_solved_problems_table_split_tol(
df, f"{MITTELMANN_STR}_baselines", PAR
)
df_pdhg_scs_dir = pd.concat(
(
df_pdhg_mp[df_pdhg_mp["method"] == "pdhg"],
df_scs[df_scs["method"] == "scs-direct"],
)
)
df_pdhg_scs_indir = pd.concat(
(
df_pdhg_mp[df_pdhg_mp["method"] == "pdhg"],
df_scs[df_scs["method"] == "scs-indirect"],
)
)
gen_ratio_histograms_split_tol(df_pdhg_mp, f"{MITTELMANN_STR}", PAR)
gen_ratio_histograms_split_tol(df_pdhg_scs_indir, f"{MITTELMANN_STR}", PAR)
gen_ratio_histograms_split_tol(df_pdhg_scs_dir, f"{MITTELMANN_STR}", PAR)
######################################################################
# PDLP vs mp vs scs on NETLIB (JOIN PDHG/MP WITH SCS)
df_pdhg_mp = pd.read_csv(os.path.join(CSV_DIR, "netlib_pdhg_mp_1h.csv"))
df_pdhg_mp = fill_in_missing_problems(df_pdhg_mp, netlib_instances)
df_pdhg_vanilla = pd.read_csv(
os.path.join(CSV_DIR, "netlib_pdhg_vanilla_1h.csv")
)
df_pdhg_vanilla = fill_in_missing_problems(df_pdhg_vanilla, netlib_instances)
df_scs = pd.read_csv(os.path.join(CSV_DIR, "netlib_scs_1h.csv"))
df_scs = fill_in_missing_problems(df_scs, netlib_instances)
df = pd.concat((df_pdhg_mp, df_pdhg_vanilla, df_scs))
gen_solved_problems_plots_split_tol(
df,
f"{NETLIB_STR}_baselines",
len(netlib_instances),
legend_location="separate",
)
gen_total_solved_problems_table_split_tol(df, f"{NETLIB_STR}_baselines", PAR)
df_pdhg_scs_dir = pd.concat(
(
df_pdhg_mp[df_pdhg_mp["method"] == "pdhg"],
df_scs[df_scs["method"] == "scs-direct"],
)
)
df_pdhg_scs_indir = pd.concat(
(
df_pdhg_mp[df_pdhg_mp["method"] == "pdhg"],
df_scs[df_scs["method"] == "scs-indirect"],
)
)
gen_ratio_histograms_split_tol(df_pdhg_mp, f"{NETLIB_STR}", PAR)
gen_ratio_histograms_split_tol(df_pdhg_scs_indir, f"{NETLIB_STR}", PAR)
gen_ratio_histograms_split_tol(df_pdhg_scs_dir, f"{NETLIB_STR}", PAR)
######################################################################
# PDLP presolve vs no presolve (JOIN DEFAULT)
df = pd.read_csv(os.path.join(CSV_DIR, "miplib_nopresolve_100k.csv"))
df = pd.concat((df_default, df))
gen_solved_problems_plots_split_tol(
df, f"{MIPLIB_STR}_presolve", len(miplib_instances)
)
gen_total_solved_problems_table_split_tol(
df, f"{MIPLIB_STR}_presolve", PAR, TIME_LIMIT_SECS_ABLATION
)
######################################################################
# PDLP scaling vs no scaling (NO JOIN DEFAULT)
df = pd.read_csv(os.path.join(CSV_DIR, "miplib_scaling_100k.csv"))
df = fill_in_missing_problems(df, miplib_instances)
# Pull out best performing scaling for each instance / tolerance:
df_best_per = df[df["termination_reason"] == OPT].reset_index()
best_idxs = df_best_per.groupby(["instance_name", "tolerance"])[
"cumulative_kkt_matrix_passes"
].idxmin()
df_best_per = df_best_per.loc[best_idxs]
for t in df_best_per["tolerance"].unique():
# rename the experiment label
df_best_per.loc[
df_best_per["tolerance"] == t, "experiment_label"
] = f"scaling {_BEST_STR} {t}"
df_best_per = fill_in_missing_problems(df_best_per, miplib_instances)
# filter out un-needed scaling experiments:
df = pd.concat(
df[df["experiment_label"].str.contains(e)] for e in SCALING_EXPS_TO_USE
)
gen_solved_problems_plots_split_tol(
df, f"{MIPLIB_STR}_scaling", len(miplib_instances)
)
gen_total_solved_problems_table_split_tol(
df, f"{MIPLIB_STR}_scaling", PAR, TIME_LIMIT_SECS_ABLATION
)
df = pd.concat((df, df_best_per))
gen_solved_problems_plots_split_tol(
df, f"{MIPLIB_STR}_scaling_with_best_per", len(miplib_instances)
)
gen_total_solved_problems_table_split_tol(
df, f"{MIPLIB_STR}_scaling_with_best_per", PAR, TIME_LIMIT_SECS_ABLATION
)
######################################################################
# PDLP restart vs no restart (NO JOIN DEFAULT)
df = pd.read_csv(os.path.join(CSV_DIR, "miplib_restarts_100k.csv"))
df = fill_in_missing_problems(df, miplib_instances)
gen_solved_problems_plots_split_tol(
df, f"{MIPLIB_STR}_restarts", len(miplib_instances)
)
gen_total_solved_problems_table_split_tol(
df, f"{MIPLIB_STR}_restarts", PAR, TIME_LIMIT_SECS_ABLATION
)
######################################################################
# PDLP primalweight (NO JOIN DEFAULT)
df = pd.read_csv(os.path.join(CSV_DIR, "miplib_primalweight_100k.csv"))
df = fill_in_missing_problems(df, miplib_instances)
df_fixed = df[df["experiment_label"].str.contains("Fixed")]
pw_solved = (
df_fixed[df_fixed["termination_reason"] == OPT]
.groupby(["experiment_label", "tolerance"])["experiment_label"]
.agg("count")
.pipe(pd.DataFrame)
.rename(columns={"experiment_label": "solved"})
.reset_index()
)
dfs = []
for t in df_fixed["tolerance"].unique():
_df = pw_solved[pw_solved["tolerance"] == t]
best_mp_run = _df.loc[_df["solved"].idxmax()]["experiment_label"]
dfs.append(df_fixed[df_fixed["experiment_label"] == best_mp_run])
df_best_ind = fill_in_missing_problems(pd.concat(dfs), miplib_instances)
for t in df_best_fixed["tolerance"].unique():
# rename the experiment label
df_best_ind.loc[
df_best_ind["tolerance"] == t, "experiment_label"
] = f"primalweight {_BEST_FIXED} {t}"
# Pull out best performing fixed weight for each instance / tolerance:
df_best_fixed = df_fixed[df_fixed["termination_reason"] == OPT].reset_index()
best_idxs = df_best_fixed.groupby(["instance_name", "tolerance"])[
"cumulative_kkt_matrix_passes"
].idxmin()
df_best_fixed = df_best_fixed.loc[best_idxs]
for t in df_best_fixed["tolerance"].unique():
# rename the experiment label
df_best_fixed.loc[
df_best_fixed["tolerance"] == t, "experiment_label"
] = f"primalweight {_BEST_STR} {t}"
df_best_fixed = fill_in_missing_problems(df_best_fixed, miplib_instances)
df = pd.concat(
df[df["experiment_label"].str.contains(e)] for e in PRIMALWEIGHT_EXPS_TO_USE
)
df = pd.concat((df, df_best_fixed, df_best_ind))
gen_solved_problems_plots_split_tol(
df, f"{MIPLIB_STR}_primalweight", len(miplib_instances), False
)
gen_total_solved_problems_table_split_tol(
df, f"{MIPLIB_STR}_primalweight", PAR, TIME_LIMIT_SECS_ABLATION
)
######################################################################
# MIPLIB PDLP ablate improvements (JOIN DEFAULT)
df = pd.read_csv(os.path.join(CSV_DIR, "miplib_improvements_100k.csv"))
df_pdlp = df_default.copy()
for t in df_pdlp["tolerance"].unique():
df_pdlp.loc[
df_pdlp["tolerance"] == t, "experiment_label"
] = f"pdlp_final_improvements_{t}"
df = pd.concat((df, df_pdlp.reset_index()))
df = fill_in_missing_problems(df, miplib_instances)
gen_solved_problems_plots_split_tol(
df, f"{MIPLIB_STR}_improvements", len(miplib_instances), True
)
outputs = gen_total_solved_problems_table_split_tol(
df, f"{MIPLIB_STR}_improvements", PAR
)
gen_all_improvement_plots(outputs, f"{MIPLIB_STR}_improvements")
######################################################################
# MITTELMAN PDLP ablate improvements (JOIN DEFAULT)
df_default_mittelmann = pd.read_csv(
os.path.join(CSV_DIR, "mittelmann_pdhg_enhanced_100k.csv")
)
df_default_mittelmann = fill_in_missing_problems(
df_default_mittelmann, mittelmann_instances
)
df = pd.read_csv(os.path.join(CSV_DIR, "mittelmann_improvements_100k.csv"))
df_pdlp = df_default_mittelmann.copy()
for t in df_pdlp["tolerance"].unique():
df_pdlp.loc[
df_pdlp["tolerance"] == t, "experiment_label"
] = f"pdlp_final_improvements_{t}"
df = pd.concat((df, df_pdlp.reset_index()))
df = fill_in_missing_problems(df, mittelmann_instances)
gen_solved_problems_plots_split_tol(
df, f"{MITTELMANN_STR}_improvements", len(mittelmann_instances), True
)
outputs = gen_total_solved_problems_table_split_tol(
df, f"{MITTELMANN_STR}_improvements", PAR
)
for df in outputs.values():
df["rank"] = df["Experiment"].map(IMPROVEMENTS_ORDER_IDX)
df.sort_values("rank", inplace=True)
df.drop(labels="rank", axis=1, inplace=True)
gen_all_improvement_plots(outputs, f"{MITTELMANN_STR}_improvements")
######################################################################
# NETLIB PDLP ablate improvements (JOIN DEFAULT)
df_default_netlib = pd.read_csv(
os.path.join(CSV_DIR, "netlib_pdhg_enhanced_100k.csv")
)
df_default_netlib = fill_in_missing_problems(
df_default_netlib, netlib_instances
)
df = pd.read_csv(os.path.join(CSV_DIR, "netlib_improvements_100k.csv"))
df_pdlp = df_default_netlib.copy()
for t in df_pdlp["tolerance"].unique():
df_pdlp.loc[
df_pdlp["tolerance"] == t, "experiment_label"
] = f"pdlp_final_improvements_{t}"
df = pd.concat((df, df_pdlp.reset_index()))
df = fill_in_missing_problems(df, netlib_instances)
gen_solved_problems_plots_split_tol(
df, f"{NETLIB_STR}_improvements", len(netlib_instances), True
)
outputs = gen_total_solved_problems_table_split_tol(
df, f"{NETLIB_STR}_improvements", PAR
)
for df in outputs.values():
df["rank"] = df["Experiment"].map(IMPROVEMENTS_ORDER_IDX)
df.sort_values("rank", inplace=True)
df.drop(labels="rank", axis=1, inplace=True)
gen_all_improvement_plots(outputs, f"{NETLIB_STR}_improvements")
| 33.167976 | 80 | 0.65331 | 4,594 | 33,765 | 4.492381 | 0.110797 | 0.014827 | 0.021804 | 0.017589 | 0.592838 | 0.557661 | 0.505233 | 0.448299 | 0.427658 | 0.416513 | 0 | 0.009424 | 0.1923 | 33,765 | 1,017 | 81 | 33.20059 | 0.747323 | 0.087398 | 0 | 0.299252 | 0 | 0 | 0.203642 | 0.073783 | 0 | 0 | 0 | 0 | 0.002494 | 1 | 0.01995 | false | 0.026185 | 0.009975 | 0 | 0.071072 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b24df3e31526e440875ce71fb0d3c2dd300ce719 | 5,739 | py | Python | traits.py | JW6969/Programming-Language-Identification | 69070ccf712532b73b6526c0e6bc8ed3daa66706 | [
"MIT"
] | 24 | 2015-04-10T11:38:56.000Z | 2022-03-31T09:22:50.000Z | traits.py | JW6969/Programming-Language-Identification | 69070ccf712532b73b6526c0e6bc8ed3daa66706 | [
"MIT"
] | 1 | 2020-02-23T15:31:29.000Z | 2020-02-23T17:11:05.000Z | traits.py | JW6969/Programming-Language-Identification | 69070ccf712532b73b6526c0e6bc8ed3daa66706 | [
"MIT"
] | 6 | 2015-04-28T20:40:01.000Z | 2021-03-28T14:37:47.000Z | #Copyright (c) 2011 David Klein and Simon Weber
#Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
#this file contains one function for each statistical feature
#these functions process source code from known languages and adds them to the database
#reads data into a dictionary from the database file and returns it
def getDataFromFile(language, filename):
fileExists = False
data = {}
#IOError if file does not exist already
try:
textfile = open('./database/'+str(language)+'/' + str(filename), 'r')
fileExists = True
except:
pass
#read database into memory
if fileExists:
temp = textfile.readline().strip()
while temp != '':
data[temp.split(" ")[0]] = int(temp.split(" ")[1])
temp = textfile.readline().strip()
textfile.close()
return data
#checks for similarities based on the last character on each line
def addLastCharacter(language, source):
characters = getDataFromFile(language, 'lastCharacter.txt')
#parse sourcecode for characters
for line in source:
if line.strip() == '':
continue
char = line.strip()[-1]
if characters.has_key(char):
characters[char] += 1
else:
characters[char] = 1
#write database back to file
writefile = open('./database/'+str(language)+'/lastCharacter.txt', 'w')
for char in characters:
writefile.write(str(char) + ' ' + str(characters[char]) + '\n')
writefile.close()
#checks for similarities based on the first word (i.e. all characters before the first space) on each line
def addFirstWord(language, source):
words = getDataFromFile(language, 'firstWord.txt')
#parse sourcecode for first word
for line in source:
if line.strip() == '':
continue
word = line.strip().split(" ")[0]
if words.has_key(word):
words[word] += 1
else:
words[word] = 1
#write database back to file
writefile = open('./database/'+str(language)+'/firstWord.txt', 'w')
for word in words:
writefile.write(str(word) + ' ' + str(words[word]) + '\n')
writefile.close()
#check for similarities in the frequency of different operators
def addOperator(language, source):
operators = getDataFromFile(language, 'operators.txt')
translationTable = ' !"#$%&\'()*+,-./ :;<=>?@ [\\]^_` {|}~ '
#parse sourcecode for operators
oplist = []
for line in source:
if line.strip() == '':
continue
temp = line.translate(translationTable).strip()
reading = False
start = 0
for i in range(len(temp)):
if reading == False and temp[i] != ' ':
start = i
reading = True
elif reading == True and temp[i] == ' ':
oplist.append(temp[start:i])
reading = False
if reading == True:
oplist.append(temp[start:])
for operator in oplist:
if operators.has_key(operator):
operators[operator] += 1
else:
operators[operator] = 1
#write database back to file
writefile = open('./database/'+str(language)+'/operators.txt', 'w')
for operator in operators:
writefile.write(str(operator) + ' ' + str(operators[operator]) + '\n')
writefile.close()
#counts relative prevalence of various types of brackets
def addBrackets(language, source):
brackets = getDataFromFile(language, 'brackets.txt')
bracketslist = "{}()<>[]"
for brack in bracketslist:
if brackets.has_key(brack):
pass
else:
brackets[brack] = 0
for line in source:
for char in line:
if brackets.has_key(char):
brackets[char] += 1
#write database back to file
writefile = open('./database/'+str(language)+'/brackets.txt', 'w')
for brack in brackets:
writefile.write(str(brack) + ' ' + str(brackets[brack]) + '\n')
writefile.close()
#counts relative prevalence of various types of punctuation vs letters
def addPunctuation(language, source):
puncnum = getDataFromFile(language, 'punctuation.txt')
punclist = '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~'
letterlist = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
if not puncnum.has_key(';'):
puncnum[';'] = 0
if not puncnum.has_key('a'):
puncnum['a'] = 0
for line in source:
for char in line:
if char in punclist:
puncnum[';'] += 1
elif char in letterlist:
puncnum['a'] += 1
#write database back to file
writefile = open('./database/'+str(language)+'/punctuation.txt', 'w')
for punc in puncnum:
writefile.write(str(punc) + ' ' + str(puncnum[punc]) + '\n')
writefile.close()
#check for similarities in the frequency of different keywords
def addKeywords(language, source):
keywords = getDataFromFile(language, 'keywords.txt')
translationTable = ' ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz '
#parse sourcecode for operators
wordlist = []
for line in source:
if line.strip() == '':
continue
temp = line.translate(translationTable).strip()
reading = False
start = 0
for i in range(len(temp)):
if reading == False and temp[i] != ' ':
start = i
reading = True
elif reading == True and temp[i] == ' ':
wordlist.append(temp[start:i])
reading = False
if reading == True:
wordlist.append(temp[start:])
for word in wordlist:
if keywords.has_key(word):
keywords[word] += 1
else:
keywords[word] = 1
#write database back to file
writefile = open('./database/'+str(language)+'/keywords.txt', 'w')
for word in keywords:
writefile.write(str(word) + ' ' + str(keywords[word]) + '\n')
writefile.close()
| 28.984848 | 280 | 0.626416 | 674 | 5,739 | 5.318991 | 0.224036 | 0.013389 | 0.029289 | 0.044909 | 0.364296 | 0.333612 | 0.316318 | 0.316318 | 0.29735 | 0.274477 | 0 | 0.005488 | 0.238021 | 5,739 | 197 | 281 | 29.13198 | 0.814315 | 0.193065 | 0 | 0.383459 | 0 | 0 | 0.140903 | 0.022579 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0.015038 | 0 | 0 | 0.06015 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b250046104c28ac5c5337430f126db1ff5d09fbd | 3,136 | py | Python | others/GDAS/exps-rnn/train_rnn_base.py | shashank3959/NAS-Projects | 2c0577231a52375de5ebd7a588750899a8c7bf1c | [
"MIT"
] | 20 | 2019-10-10T07:13:27.000Z | 2022-03-25T11:33:16.000Z | exps-rnn/train_rnn_base.py | BaiYuYuan/GDAS | 5eed8101a78d223a20a43494176051298b24ac3a | [
"MIT"
] | null | null | null | exps-rnn/train_rnn_base.py | BaiYuYuan/GDAS | 5eed8101a78d223a20a43494176051298b24ac3a | [
"MIT"
] | 6 | 2020-04-21T14:52:02.000Z | 2021-08-05T15:00:22.000Z | ##################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 #
##################################################
import os, gc, sys, math, time, glob, random, argparse
import numpy as np
from copy import deepcopy
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
import multiprocessing
from pathlib import Path
lib_dir = (Path(__file__).parent / '..' / 'lib').resolve()
print ('lib-dir : {:}'.format(lib_dir))
if str(lib_dir) not in sys.path: sys.path.insert(0, str(lib_dir))
from utils import AverageMeter, time_string, time_file_str, convert_secs2time
from utils import print_log, obtain_accuracy
from utils import count_parameters_in_MB
from nas_rnn import DARTS_V1, DARTS_V2, GDAS
from train_rnn_utils import main_procedure
from scheduler import load_config
Networks = {'DARTS_V1': DARTS_V1,
'DARTS_V2': DARTS_V2,
'GDAS' : GDAS}
parser = argparse.ArgumentParser("RNN")
parser.add_argument('--arch', type=str, choices=Networks.keys(), help='the network architecture')
parser.add_argument('--config_path', type=str, help='the training configure for the discovered model')
# log
parser.add_argument('--save_path', type=str, help='Folder to save checkpoints and log.')
parser.add_argument('--print_freq', type=int, help='print frequency (default: 200)')
parser.add_argument('--manualSeed', type=int, help='manual seed')
parser.add_argument('--threads', type=int, default=4, help='the number of threads')
args = parser.parse_args()
assert torch.cuda.is_available(), 'torch.cuda is not available'
if args.manualSeed is None:
args.manualSeed = random.randint(1, 10000)
random.seed(args.manualSeed)
cudnn.benchmark = True
cudnn.enabled = True
torch.manual_seed(args.manualSeed)
torch.cuda.manual_seed_all(args.manualSeed)
torch.set_num_threads(args.threads)
def main():
# Init logger
args.save_path = os.path.join(args.save_path, 'seed-{:}'.format(args.manualSeed))
if not os.path.isdir(args.save_path):
os.makedirs(args.save_path)
log = open(os.path.join(args.save_path, 'log-seed-{:}-{:}.txt'.format(args.manualSeed, time_file_str())), 'w')
print_log('save path : {:}'.format(args.save_path), log)
state = {k: v for k, v in args._get_kwargs()}
print_log(state, log)
print_log("Random Seed: {}".format(args.manualSeed), log)
print_log("Python version : {}".format(sys.version.replace('\n', ' ')), log)
print_log("Torch version : {}".format(torch.__version__), log)
print_log("CUDA version : {}".format(torch.version.cuda), log)
print_log("cuDNN version : {}".format(cudnn.version()), log)
print_log("Num of GPUs : {}".format(torch.cuda.device_count()), log)
print_log("Num of CPUs : {}".format(multiprocessing.cpu_count()), log)
config = load_config( args.config_path )
genotype = Networks[ args.arch ]
main_procedure(config, genotype, args.save_path, args.print_freq, log)
log.close()
if __name__ == '__main__':
main()
| 40.727273 | 112 | 0.694515 | 442 | 3,136 | 4.742081 | 0.337104 | 0.038168 | 0.040076 | 0.021469 | 0.03626 | 0.020992 | 0 | 0 | 0 | 0 | 0 | 0.008112 | 0.135204 | 3,136 | 76 | 113 | 41.263158 | 0.764749 | 0.020089 | 0 | 0 | 0 | 0 | 0.162736 | 0 | 0 | 0 | 0 | 0 | 0.016393 | 1 | 0.016393 | false | 0 | 0.278689 | 0 | 0.295082 | 0.213115 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b250c65ff96fd0949da3ade700eaf209ac70c200 | 23,047 | py | Python | sdk/storage/azure-storage-blob/azure/storage/blob/aio/blob_service_client_async.py | gautam714/azure-sdk-for-python | 1741c199c42e8c85a2e14bc78195fd992837ef92 | [
"MIT"
] | null | null | null | sdk/storage/azure-storage-blob/azure/storage/blob/aio/blob_service_client_async.py | gautam714/azure-sdk-for-python | 1741c199c42e8c85a2e14bc78195fd992837ef92 | [
"MIT"
] | null | null | null | sdk/storage/azure-storage-blob/azure/storage/blob/aio/blob_service_client_async.py | gautam714/azure-sdk-for-python | 1741c199c42e8c85a2e14bc78195fd992837ef92 | [
"MIT"
] | null | null | null | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import functools
from typing import ( # pylint: disable=unused-import
Union, Optional, Any, Iterable, Dict, List,
TYPE_CHECKING
)
from azure.core.async_paging import AsyncItemPaged
from .._shared.models import LocationMode
from .._shared.policies_async import ExponentialRetry
from .._shared.base_client_async import AsyncStorageAccountHostsMixin
from .._shared.response_handlers import return_response_headers, process_storage_error
from .._generated.aio import AzureBlobStorage
from .._generated.models import StorageErrorException, StorageServiceProperties
from ..blob_service_client import BlobServiceClient as BlobServiceClientBase
from .container_client_async import ContainerClient
from .blob_client_async import BlobClient
from .models import ContainerProperties, ContainerPropertiesPaged
if TYPE_CHECKING:
from datetime import datetime
from azure.core.pipeline.transport import HttpTransport
from azure.core.pipeline.policies import HTTPPolicy
from .._shared.models import AccountPermissions, ResourceTypes
from .lease_async import LeaseClient
from ..models import (
BlobProperties,
Logging,
Metrics,
RetentionPolicy,
StaticWebsite,
CorsRule,
PublicAccess
)
class BlobServiceClient(AsyncStorageAccountHostsMixin, BlobServiceClientBase):
"""A client to interact with the Blob Service at the account level.
This client provides operations to retrieve and configure the account properties
as well as list, create and delete containers within the account.
For operations relating to a specific container or blob, clients for those entities
can also be retrieved using the `get_client` functions.
:ivar str url:
The full endpoint URL to the Blob service endpoint. This could be either the
primary endpoint, or the secondary endpoint depending on the current `location_mode`.
:ivar str primary_endpoint:
The full primary endpoint URL.
:ivar str primary_hostname:
The hostname of the primary endpoint.
:ivar str secondary_endpoint:
The full secondary endpoint URL if configured. If not available
a ValueError will be raised. To explicitly specify a secondary hostname, use the optional
`secondary_hostname` keyword argument on instantiation.
:ivar str secondary_hostname:
The hostname of the secondary endpoint. If not available this
will be None. To explicitly specify a secondary hostname, use the optional
`secondary_hostname` keyword argument on instantiation.
:ivar str location_mode:
The location mode that the client is currently using. By default
this will be "primary". Options include "primary" and "secondary".
:param str account_url:
The URL to the blob storage account. Any other entities included
in the URL path (e.g. container or blob) will be discarded. This URL can be optionally
authenticated with a SAS token.
:param credential:
The credentials with which to authenticate. This is optional if the
account URL already has a SAS token. The value can be a SAS token string, and account
shared access key, or an instance of a TokenCredentials class from azure.identity.
If the URL already has a SAS token, specifying an explicit credential will take priority.
Example:
.. literalinclude:: ../tests/test_blob_samples_authentication_async.py
:start-after: [START create_blob_service_client]
:end-before: [END create_blob_service_client]
:language: python
:dedent: 8
:caption: Creating the BlobServiceClient with account url and credential.
.. literalinclude:: ../tests/test_blob_samples_authentication_async.py
:start-after: [START create_blob_service_client_oauth]
:end-before: [END create_blob_service_client_oauth]
:language: python
:dedent: 8
:caption: Creating the BlobServiceClient with Azure Identity credentials.
"""
def __init__(
self, account_url, # type: str
credential=None, # type: Optional[Any]
loop=None, # type: Any
**kwargs # type: Any
):
# type: (...) -> None
kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs)
super(BlobServiceClient, self).__init__(
account_url,
credential=credential,
loop=loop,
**kwargs)
self._client = AzureBlobStorage(url=self.url, pipeline=self._pipeline, loop=loop)
self._loop = loop
async def get_account_information(self, **kwargs): # type: ignore
# type: (Optional[int]) -> Dict[str, str]
"""Gets information related to the storage account.
The information can also be retrieved if the user has a SAS to a container or blob.
The keys in the returned dictionary include 'sku_name' and 'account_kind'.
:returns: A dict of account information (SKU and account type).
:rtype: dict(str, str)
Example:
.. literalinclude:: ../tests/test_blob_samples_service_async.py
:start-after: [START get_blob_service_account_info]
:end-before: [END get_blob_service_account_info]
:language: python
:dedent: 8
:caption: Getting account information for the blob service.
"""
try:
return await self._client.service.get_account_info(cls=return_response_headers, **kwargs) # type: ignore
except StorageErrorException as error:
process_storage_error(error)
async def get_service_stats(self, timeout=None, **kwargs): # type: ignore
# type: (Optional[int], **Any) -> Dict[str, Any]
"""Retrieves statistics related to replication for the Blob service.
It is only available when read-access geo-redundant replication is enabled for
the storage account.
With geo-redundant replication, Azure Storage maintains your data durable
in two locations. In both locations, Azure Storage constantly maintains
multiple healthy replicas of your data. The location where you read,
create, update, or delete data is the primary storage account location.
The primary location exists in the region you choose at the time you
create an account via the Azure Management Azure classic portal, for
example, North Central US. The location to which your data is replicated
is the secondary location. The secondary location is automatically
determined based on the location of the primary; it is in a second data
center that resides in the same region as the primary location. Read-only
access is available from the secondary location, if read-access geo-redundant
replication is enabled for your storage account.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: The blob service stats.
:rtype: ~azure.storage.blob._generated.models.StorageServiceStats
Example:
.. literalinclude:: ../tests/test_blob_samples_service_async.py
:start-after: [START get_blob_service_stats]
:end-before: [END get_blob_service_stats]
:language: python
:dedent: 8
:caption: Getting service stats for the blob service.
"""
try:
return await self._client.service.get_statistics( # type: ignore
timeout=timeout, use_location=LocationMode.SECONDARY, **kwargs)
except StorageErrorException as error:
process_storage_error(error)
async def get_service_properties(self, timeout=None, **kwargs):
# type(Optional[int]) -> Dict[str, Any]
"""Gets the properties of a storage account's Blob service, including
Azure Storage Analytics.
:param int timeout:
The timeout parameter is expressed in seconds.
:rtype: ~azure.storage.blob._generated.models.StorageServiceProperties
Example:
.. literalinclude:: ../tests/test_blob_samples_service_async.py
:start-after: [START get_blob_service_properties]
:end-before: [END get_blob_service_properties]
:language: python
:dedent: 8
:caption: Getting service properties for the blob service.
"""
try:
return await self._client.service.get_properties(timeout=timeout, **kwargs)
except StorageErrorException as error:
process_storage_error(error)
async def set_service_properties(
self, logging=None, # type: Optional[Logging]
hour_metrics=None, # type: Optional[Metrics]
minute_metrics=None, # type: Optional[Metrics]
cors=None, # type: Optional[List[CorsRule]]
target_version=None, # type: Optional[str]
delete_retention_policy=None, # type: Optional[RetentionPolicy]
static_website=None, # type: Optional[StaticWebsite]
timeout=None, # type: Optional[int]
**kwargs
):
# type: (...) -> None
"""Sets the properties of a storage account's Blob service, including
Azure Storage Analytics.
If an element (e.g. Logging) is left as None, the
existing settings on the service for that functionality are preserved.
:param logging:
Groups the Azure Analytics Logging settings.
:type logging:
:class:`~azure.storage.blob.models.Logging`
:param hour_metrics:
The hour metrics settings provide a summary of request
statistics grouped by API in hourly aggregates for blobs.
:type hour_metrics:
:class:`~azure.storage.blob.models.Metrics`
:param minute_metrics:
The minute metrics settings provide request statistics
for each minute for blobs.
:type minute_metrics:
:class:`~azure.storage.blob.models.Metrics`
:param cors:
You can include up to five CorsRule elements in the
list. If an empty list is specified, all CORS rules will be deleted,
and CORS will be disabled for the service.
:type cors: list(:class:`~azure.storage.blob.models.CorsRule`)
:param str target_version:
Indicates the default version to use for requests if an incoming
request's version is not specified.
:param delete_retention_policy:
The delete retention policy specifies whether to retain deleted blobs.
It also specifies the number of days and versions of blob to keep.
:type delete_retention_policy:
:class:`~azure.storage.blob.models.RetentionPolicy`
:param static_website:
Specifies whether the static website feature is enabled,
and if yes, indicates the index document and 404 error document to use.
:type static_website:
:class:`~azure.storage.blob.models.StaticWebsite`
:param int timeout:
The timeout parameter is expressed in seconds.
:rtype: None
Example:
.. literalinclude:: ../tests/test_blob_samples_service_async.py
:start-after: [START set_blob_service_properties]
:end-before: [END set_blob_service_properties]
:language: python
:dedent: 8
:caption: Setting service properties for the blob service.
"""
props = StorageServiceProperties(
logging=logging,
hour_metrics=hour_metrics,
minute_metrics=minute_metrics,
cors=cors,
default_service_version=target_version,
delete_retention_policy=delete_retention_policy,
static_website=static_website
)
try:
await self._client.service.set_properties(props, timeout=timeout, **kwargs)
except StorageErrorException as error:
process_storage_error(error)
def list_containers(
self, name_starts_with=None, # type: Optional[str]
include_metadata=False, # type: Optional[bool]
results_per_page=None, # type: Optional[int]
timeout=None, # type: Optional[int]
**kwargs
):
# type: (...) -> AsyncItemPaged[ContainerProperties]
"""Returns a generator to list the containers under the specified account.
The generator will lazily follow the continuation tokens returned by
the service and stop when all containers have been returned.
:param str name_starts_with:
Filters the results to return only containers whose names
begin with the specified prefix.
:param bool include_metadata:
Specifies that container metadata be returned in the response.
The default value is `False`.
:param int results_per_page:
The maximum number of container names to retrieve per API
call. If the request does not specify the server will return up to 5,000 items.
:param int timeout:
The timeout parameter is expressed in seconds.
:returns: An iterable (auto-paging) of ContainerProperties.
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.models.ContainerProperties]
Example:
.. literalinclude:: ../tests/test_blob_samples_service_async.py
:start-after: [START bsc_list_containers]
:end-before: [END bsc_list_containers]
:language: python
:dedent: 12
:caption: Listing the containers in the blob service.
"""
include = 'metadata' if include_metadata else None
command = functools.partial(
self._client.service.list_containers_segment,
prefix=name_starts_with,
include=include,
timeout=timeout,
**kwargs)
return AsyncItemPaged(
command,
prefix=name_starts_with,
results_per_page=results_per_page,
page_iterator_class=ContainerPropertiesPaged
)
async def create_container(
self, name, # type: str
metadata=None, # type: Optional[Dict[str, str]]
public_access=None, # type: Optional[Union[PublicAccess, str]]
timeout=None, # type: Optional[int]
**kwargs
):
# type: (...) -> ContainerClient
"""Creates a new container under the specified account.
If the container with the same name already exists, a ResourceExistsError will
be raised. This method returns a client with which to interact with the newly
created container.
:param str name: The name of the container to create.
:param metadata:
A dict with name-value pairs to associate with the
container as metadata. Example: `{'Category':'test'}`
:type metadata: dict(str, str)
:param public_access:
Possible values include: container, blob.
:type public_access: str or ~azure.storage.blob.models.PublicAccess
:param int timeout:
The timeout parameter is expressed in seconds.
:rtype: ~azure.storage.blob.aio.container_client_async.ContainerClient
Example:
.. literalinclude:: ../tests/test_blob_samples_service_async.py
:start-after: [START bsc_create_container]
:end-before: [END bsc_create_container]
:language: python
:dedent: 12
:caption: Creating a container in the blob service.
"""
container = self.get_container_client(name)
await container.create_container(
metadata=metadata, public_access=public_access, timeout=timeout, **kwargs)
return container
async def delete_container(
self, container, # type: Union[ContainerProperties, str]
lease=None, # type: Optional[Union[LeaseClient, str]]
timeout=None, # type: Optional[int]
**kwargs
):
# type: (...) -> None
"""Marks the specified container for deletion.
The container and any blobs contained within it are later deleted during garbage collection.
If the container is not found, a ResourceNotFoundError will be raised.
:param container:
The container to delete. This can either be the name of the container,
or an instance of ContainerProperties.
:type container: str or ~azure.storage.blob.models.ContainerProperties
:param ~azure.storage.blob.lease.LeaseClient lease:
If specified, delete_container only succeeds if the
container's lease is active and matches this ID.
Required if the container has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds.
:rtype: None
Example:
.. literalinclude:: ../tests/test_blob_samples_service_async.py
:start-after: [START bsc_delete_container]
:end-before: [END bsc_delete_container]
:language: python
:dedent: 12
:caption: Deleting a container in the blob service.
"""
container = self.get_container_client(container) # type: ignore
await container.delete_container( # type: ignore
lease=lease,
timeout=timeout,
**kwargs)
def get_container_client(self, container):
# type: (Union[ContainerProperties, str]) -> ContainerClient
"""Get a client to interact with the specified container.
The container need not already exist.
:param container:
The container. This can either be the name of the container,
or an instance of ContainerProperties.
:type container: str or ~azure.storage.blob.models.ContainerProperties
:returns: A ContainerClient.
:rtype: ~azure.core.blob.aio.container_client_async.ContainerClient
Example:
.. literalinclude:: ../tests/test_blob_samples_service_async.py
:start-after: [START bsc_get_container_client]
:end-before: [END bsc_get_container_client]
:language: python
:dedent: 8
:caption: Getting the container client to interact with a specific container.
"""
return ContainerClient(
self.url, container=container,
credential=self.credential, _configuration=self._config,
_pipeline=self._pipeline, _location_mode=self._location_mode, _hosts=self._hosts,
require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key,
key_resolver_function=self.key_resolver_function, loop=self._loop)
def get_blob_client(
self, container, # type: Union[ContainerProperties, str]
blob, # type: Union[BlobProperties, str]
snapshot=None # type: Optional[Union[Dict[str, Any], str]]
):
# type: (...) -> BlobClient
"""Get a client to interact with the specified blob.
The blob need not already exist.
:param container:
The container that the blob is in. This can either be the name of the container,
or an instance of ContainerProperties.
:type container: str or ~azure.storage.blob.models.ContainerProperties
:param blob:
The blob with which to interact. This can either be the name of the blob,
or an instance of BlobProperties.
:type blob: str or ~azure.storage.blob.models.BlobProperties
:param snapshot:
The optional blob snapshot on which to operate. This can either be the ID of the snapshot,
or a dictionary output returned by :func:`~azure.storage.blob.aio.blob_client_async.BlobClient.create_snapshot()`.
:type snapshot: str or dict(str, Any)
:returns: A BlobClient.
:rtype: ~azure.storage.blob.aio.blob_client_async.BlobClient
Example:
.. literalinclude:: ../tests/test_blob_samples_service_async.py
:start-after: [START bsc_get_blob_client]
:end-before: [END bsc_get_blob_client]
:language: python
:dedent: 12
:caption: Getting the blob client to interact with a specific blob.
"""
return BlobClient( # type: ignore
self.url, container=container, blob=blob, snapshot=snapshot,
credential=self.credential, _configuration=self._config,
_pipeline=self._pipeline, _location_mode=self._location_mode, _hosts=self._hosts,
require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key,
key_resolver_function=self.key_resolver_function, loop=self._loop)
| 47.617769 | 126 | 0.651451 | 2,662 | 23,047 | 5.519159 | 0.161908 | 0.019466 | 0.019603 | 0.017969 | 0.419616 | 0.381364 | 0.333855 | 0.316022 | 0.26722 | 0.259053 | 0 | 0.001321 | 0.27726 | 23,047 | 483 | 127 | 47.716356 | 0.880711 | 0.302642 | 0 | 0.30137 | 0 | 0 | 0.005236 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027397 | false | 0 | 0.130137 | 0 | 0.212329 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b251cfeca5722cb618287c206d805573fb4e321d | 660 | py | Python | examples/Python/durapub.py | dcramer/zguide | a07fe97c4c597e6401b4281ae07c3a156590f4c6 | [
"Zed",
"X11",
"MIT"
] | 2 | 2015-09-24T19:53:04.000Z | 2015-11-06T10:22:53.000Z | examples/Python/durapub.py | tzuryby/zguide | 609b4255d87287a42932392db1d50256c1b878ae | [
"Zed",
"X11",
"MIT"
] | null | null | null | examples/Python/durapub.py | tzuryby/zguide | 609b4255d87287a42932392db1d50256c1b878ae | [
"Zed",
"X11",
"MIT"
] | null | null | null | # encoding: utf-8
#
# Publisher for durable subscriber
#
# Author: Jeremy Avnet (brainsik) <spork(dash)zmq(at)theory(dot)org>
#
import zmq
import time
context = zmq.Context()
# Subscriber tells us when it's ready here
sync = context.socket(zmq.PULL)
sync.bind("tcp://*:5564")
# We send updates via this socket
publisher = context.socket(zmq.PUB)
publisher.bind("tcp://*:5565")
# Wait for synchronization request
sync_request = sync.recv()
# Now broadcast exactly 10 updates with pause
for n in xrange(10):
msg = "Update %d" % n
publisher.send(msg)
time.sleep(1)
publisher.send("END")
time.sleep(1) # Give 0MQ/2.0.x time to flush output
| 20.625 | 70 | 0.70303 | 102 | 660 | 4.539216 | 0.666667 | 0.056156 | 0.069114 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.032609 | 0.163636 | 660 | 31 | 71 | 21.290323 | 0.806159 | 0.462121 | 0 | 0.142857 | 0 | 0 | 0.104956 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b252b3d89de68ba9a8b8200a051240e1c5d38842 | 5,979 | py | Python | TOOL/TOOL_PTCurve.py | Jonghyun-Kim-73/SAMG_Project | 42aef9985bc40775fd72d4525ce72ed299d93db1 | [
"Apache-2.0"
] | 1 | 2021-03-02T10:31:24.000Z | 2021-03-02T10:31:24.000Z | TOOL/TOOL_PTCurve.py | Jonghyun-Kim-73/SAMG_Project | 42aef9985bc40775fd72d4525ce72ed299d93db1 | [
"Apache-2.0"
] | null | null | null | TOOL/TOOL_PTCurve.py | Jonghyun-Kim-73/SAMG_Project | 42aef9985bc40775fd72d4525ce72ed299d93db1 | [
"Apache-2.0"
] | 1 | 2021-03-02T10:31:11.000Z | 2021-03-02T10:31:11.000Z | import math
class PTCureve:
"""
0 : 만족, 1: 불만족
PTCureve().Check(Temp=110, Pres=0)
"""
def __init__(self):
self.UpTemp = [0, 37.000000, 65.500000, 93.000000, 104.400000, 110.000000,
115.500000, 121.000000, 148.800000, 176.500000, 186.500000, 350.0]
self.UpPres = [29.5, 29.500000, 30.500000, 36.500000, 42.000000, 45.600000,
49.000000, 54.200000, 105.000000, 176.000000, 200.000000, 592]
self.BotTemp = [0, 37.000000, 149.000000, 159.000000, 169.000000, 179.000000,
204.000000, 232.000000, 260.000000, 287.700000, 350.000000]
self.BotPres = [17.0, 17.000000, 17.000000, 17.300000, 17.600000, 20.000000,
31.600000, 44.300000, 58.000000, 71.000000, 100.000000]
self.UpLineFunc = []
self.BotLineFunc = []
# 직교 함수를 그려 현재 포인트에서 PT 커브까지 거리를 계산하기 위해서 사용
self.UpLineOrtFunc = []
self.BotLineOrtFunc = []
self._make_bound_UpLine()
self._make_bound_BotLine()
def _make_bound_func(self, Temp, Pres):
"""
2점에 대한 1차원 함수 반환
:param Temp: [a1, a2] == x
:param Pres: [b1, b2] == y
:return: func
"""
# y1 = ax1 + b
# y2 = ax2 + b
# a = (y1-y2)/(x1-x2)
# b = y1 - {(y1-y2)/(x1-x2) * x1}
get_a = (Pres[0] - Pres[1]) / (Temp[0] - Temp[1])
get_b = Pres[0] - get_a * Temp[0]
return lambda temp: get_a * temp + get_b
def _make_bound_orthogonal_func(self, Temp, Pres):
"""
2점에 대한 ax+by+c = 0
:param Temp: [a1, a2] == x
:param Pres: [b1, b2] == y
:return: [a, b, c] List
"""
# y1 = ax1 + b
# y2 = ax2 + b
# a = (y1-y2)/(x1-x2)
# b = y1 - {(y1-y2)/(x1-x2) * x1}
get_a = (Pres[0] - Pres[1]) / (Temp[0] - Temp[1]) # slop
get_b = Pres[0] - get_a * Temp[0]
# y = get_a * x + get_b ==> ax + by + c = 0
a = - get_a
b = 1
c = - get_b
return [a, b, c]
def _make_bound_UpLine(self):
for i in range(len(self.UpTemp) - 1):
self.UpLineFunc.append(self._make_bound_func(Temp=self.UpTemp[i:i+2], Pres=self.UpPres[i:i+2]))
self.UpLineOrtFunc.append(self._make_bound_orthogonal_func(Temp=self.UpTemp[i:i+2], Pres=self.UpPres[i:i+2]))
def _make_bound_BotLine(self):
for i in range(len(self.BotTemp) - 1):
self.BotLineFunc.append(self._make_bound_func(Temp=self.BotTemp[i:i+2], Pres=self.BotPres[i:i+2]))
self.BotLineOrtFunc.append(self._make_bound_orthogonal_func(Temp=self.BotTemp[i:i+2], Pres=self.BotPres[i:i+2]))
def _call_fun(self, Temp):
UpF, BotF = 0, 0
for i in range(len(self.UpTemp) - 1):
if self.UpTemp[i] <= Temp < self.UpTemp[i + 1]:
UpF = self.UpLineFunc[i]
for i in range(len(self.BotTemp) - 1):
if self.BotTemp[i] <= Temp < self.BotTemp[i + 1]:
BotF = self.BotLineFunc[i]
return UpF, BotF
def _call_ort_fun(self, Temp):
UpOrtF, BotOrtF = 0, 0
for i in range(len(self.UpTemp) - 1):
if self.UpTemp[i] <= Temp < self.UpTemp[i + 1]:
UpOrtF = self.UpLineOrtFunc[i]
for i in range(len(self.BotTemp) - 1):
if self.BotTemp[i] <= Temp < self.BotTemp[i + 1]:
BotOrtF = self.BotLineOrtFunc[i]
return UpOrtF, BotOrtF
def _get_pres(self, Temp):
"""
온도 받아서 위아래 Pres 조건 반환
:param Temp: [0~..]
:return: [Up_pres, Bot_pres]
"""
UpF, BotF = self._call_fun(Temp=Temp)
Up_pres, Bot_pres = UpF(Temp), BotF(Temp)
return Up_pres, Bot_pres
def _check_up_or_under(self, fun, Temp, Pres):
Get_Pres = fun(Temp)
if Get_Pres > Pres:
return 0 # 입력된 Pres가 그래프보다 아래쪽에 존재
elif Get_Pres == Pres:
return 1 # 입력된 Pres가 그래프에 존재
else:
return 2 # 입력된 Pres가 그래프보다 위쪽에 존재
def _check_in_or_out(self, Temp, Pres):
UpF, BotF = self._call_fun(Temp=Temp)
Upcond = self._check_up_or_under(UpF, Temp, Pres)
Botcond = self._check_up_or_under(BotF, Temp, Pres)
Reason = 0
if Upcond == 2: Reason = 1 # Upcond 벗어난 경우
if Botcond == 0: Reason = 2 # Botcond 벗어난 경우
if Upcond == 2 or Botcond == 0:
return [1, Reason] # PT커브 초과
else:
return [0, Reason] # PT커브에서 운전 중
def _check_distance(self, Temp, Pres):
"""
현재 온도 압력을 기준으로 Upline과 Botline과의 거리 계산
:param Temp: 현재 온도
:param Pres: 현재 압력
:return: UpDis, BotDis
"""
d = 0
UpOrtF, BotOrtF = self._call_ort_fun(Temp=Temp) # [a,b,c]
# d = abs(a*x_1 + b*y_1 + c) / (math.sqrt(math.pow(a, 2) + math.pow(b, 2)))
# x_1 = Temp
# y_1 = Pres
UpDis = abs(UpOrtF[0] * Temp + UpOrtF[1] * Pres + UpOrtF[2]) / \
(math.sqrt(math.pow(UpOrtF[0], 2) + math.pow(UpOrtF[1], 2)))
BotDis = abs(BotOrtF[0] * Temp + BotOrtF[1] * Pres + BotOrtF[2]) / \
(math.sqrt(math.pow(BotOrtF[0], 2) + math.pow(BotOrtF[1], 2)))
return UpDis, BotDis
def Check(self, Temp, Pres):
"""
PT curve에 운전 중인지 확인
:param Temp: 현재 온도
:param Pres: 현재 압력
:return: 0 만족, 1 불만족
"""
return self._check_in_or_out(Temp, Pres)[0]
def Check_Dis(self, Temp, Pres):
"""
현재 온도 압력을 기준으로 PT 커브에서 벗어난 경우 벗어난 거리 제공
:param Temp: 현재 온도
:param Pres: 현재 압력
:return: 벗어난 거리
"""
Satisfiy, Reason =self._check_in_or_out(Temp, Pres)
Updis, Botdis = self._check_distance(Temp, Pres)
if Satisfiy == 0:
return 0
else:
# 가장 짧은 거리
return Updis if Updis < Botdis else Botdis
| 36.018072 | 124 | 0.525339 | 862 | 5,979 | 3.522042 | 0.206497 | 0.031621 | 0.007905 | 0.021739 | 0.385046 | 0.346179 | 0.332345 | 0.286232 | 0.218709 | 0.187088 | 0 | 0.120172 | 0.337515 | 5,979 | 165 | 125 | 36.236364 | 0.646301 | 0.175447 | 0 | 0.223404 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.138298 | false | 0 | 0.010638 | 0 | 0.308511 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b253d781e0790d2fea0e77027d2690b6ff902614 | 542 | py | Python | Examples/pyfind.py | Aarif1430/Python-Awesome-notes-and-exercises-list | c8ad7f90ebd973025f37d4e79c2f1229a8a2915c | [
"MIT"
] | 2 | 2021-01-13T21:20:57.000Z | 2021-08-18T17:53:53.000Z | Examples/pyfind.py | Aarif1430/Python-Awesome-notes-and-exercises-list | c8ad7f90ebd973025f37d4e79c2f1229a8a2915c | [
"MIT"
] | null | null | null | Examples/pyfind.py | Aarif1430/Python-Awesome-notes-and-exercises-list | c8ad7f90ebd973025f37d4e79c2f1229a8a2915c | [
"MIT"
] | 1 | 2020-11-05T09:56:55.000Z | 2020-11-05T09:56:55.000Z | import glob, os
#from IPython.core.debugger import Pdb
def findall(topdir, pat, walk=True, includetop=1):
if includetop:
curpat = os.path.join(topdir, pat)
myfiles = glob.glob(curpat)
else:
myfiles = []
if walk:
for root, dirs, files in os.walk(topdir):
for curdir in dirs:
curpath = os.path.join(root,curdir)
curpat = os.path.join(curpath, pat)
curfiles = glob.glob(curpat)
myfiles.extend(curfiles)
return myfiles
| 27.1 | 51 | 0.573801 | 66 | 542 | 4.712121 | 0.484848 | 0.057878 | 0.096463 | 0.102894 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00274 | 0.326568 | 542 | 19 | 52 | 28.526316 | 0.849315 | 0.068266 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.066667 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b257a9e52146db719e997ed8872a96d99f210459 | 4,000 | py | Python | research/nlp/dscnn/eval.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 77 | 2021-10-15T08:32:37.000Z | 2022-03-30T13:09:11.000Z | research/nlp/dscnn/eval.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 3 | 2021-10-30T14:44:57.000Z | 2022-02-14T06:57:57.000Z | research/nlp/dscnn/eval.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 24 | 2021-10-15T08:32:45.000Z | 2022-03-24T18:45:20.000Z | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===========================================================================
"""DSCNN eval."""
import os
import datetime
import glob
import numpy as np
from mindspore import context
from mindspore import Tensor, Model
from mindspore.common import dtype as mstype
from src.log import get_logger
from src.dataset import audio_dataset
from src.ds_cnn import DSCNN
from src.models import load_ckpt
from src.model_utils.config import config
from src.model_utils.moxing_adapter import moxing_wrapper
from src.model_utils.device_adapter import get_device_id
def get_top5_acc(top5_arg, gt_class):
sub_count = 0
for top5, gt in zip(top5_arg, gt_class):
if gt in top5:
sub_count += 1
return sub_count
def val(args, model, test_de):
'''Eval.'''
eval_dataloader = test_de.create_tuple_iterator()
img_tot = 0
top1_correct = 0
top5_correct = 0
for data, gt_classes in eval_dataloader:
output = model.predict(Tensor(data, mstype.float32))
output = output.asnumpy()
top1_output = np.argmax(output, (-1))
top5_output = np.argsort(output)[:, -5:]
gt_classes = gt_classes.asnumpy()
t1_correct = np.equal(top1_output, gt_classes).sum()
top1_correct += t1_correct
top5_correct += get_top5_acc(top5_output, gt_classes)
img_tot += output.shape[0]
results = [[top1_correct], [top5_correct], [img_tot]]
results = np.array(results)
top1_correct = results[0, 0]
top5_correct = results[1, 0]
img_tot = results[2, 0]
acc1 = 100.0 * top1_correct / img_tot
acc5 = 100.0 * top5_correct / img_tot
if acc1 > args.best_acc:
args.best_acc = acc1
args.best_index = args.index
args.logger.info('Eval: top1_cor:{}, top5_cor:{}, tot:{}, acc@1={:.2f}%, acc@5={:.2f}%' \
.format(top1_correct, top5_correct, img_tot, acc1, acc5))
@moxing_wrapper(pre_process=None)
def main():
context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target, device_id=get_device_id())
# Logger
config.outputs_dir = os.path.join(config.log_path, datetime.datetime.now().strftime('%Y-%m-%d_time_%H_%M_%S'))
config.logger = get_logger(config.outputs_dir)
# show args
config.logger.save_args(config)
# find model path
if os.path.isdir(config.model_dir):
models = list(glob.glob(os.path.join(config.model_dir, '*.ckpt')))
print(models)
f = lambda x: -1 * int(os.path.splitext(os.path.split(x)[-1])[0].split('-')[0].split('epoch')[-1])
config.models = sorted(models, key=f)
else:
config.models = [config.model_dir]
config.best_acc = 0
config.index = 0
config.best_index = 0
for model_path in config.models:
test_de = audio_dataset(config.eval_feat_dir, 'testing', config.model_setting_spectrogram_length,
config.model_setting_dct_coefficient_count, config.per_batch_size)
network = DSCNN(config, config.model_size_info)
load_ckpt(network, model_path, False)
network.set_train(False)
model = Model(network)
config.logger.info('load model %s success', model_path)
val(config, model, test_de)
config.index += 1
config.logger.info('Best model:{} acc:{:.2f}%'.format(config.models[config.best_index], config.best_acc))
if __name__ == "__main__":
main()
| 35.714286 | 114 | 0.6725 | 573 | 4,000 | 4.488656 | 0.34555 | 0.019051 | 0.020218 | 0.019829 | 0.021773 | 0.021773 | 0 | 0 | 0 | 0 | 0 | 0.024329 | 0.1985 | 4,000 | 111 | 115 | 36.036036 | 0.777916 | 0.17225 | 0 | 0 | 0 | 0 | 0.049635 | 0.006699 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039474 | false | 0 | 0.184211 | 0 | 0.236842 | 0.013158 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b258a2e86968b9ea451d5c04d29c962b69b4d506 | 2,137 | py | Python | slash_cog/context.py | CortexPE/slash_cog | 347ee2f5ee67b78cb93600d43fe55ecb31809ef6 | [
"MIT"
] | 6 | 2021-11-02T23:47:08.000Z | 2022-03-20T11:02:11.000Z | slash_cog/context.py | CortexPE/slash_cog | 347ee2f5ee67b78cb93600d43fe55ecb31809ef6 | [
"MIT"
] | null | null | null | slash_cog/context.py | CortexPE/slash_cog | 347ee2f5ee67b78cb93600d43fe55ecb31809ef6 | [
"MIT"
] | null | null | null | """
MIT License
Copyright (c) 2021-Present CortexPE
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import discord
from discord.ext import commands
class InteractContext(commands.Context):
"""Wrapper around InteractionRespone"""
def __init__(self, **attrs):
super().__init__(**attrs)
self.parent_interaction = self.message.parent_interaction # type: discord.Interaction
self.response = discord.InteractionResponse(self.message.parent_interaction)
async def send(self, *args, **kwargs):
delete_after = kwargs.pop("delete_after", None)
# if not self.response.is_done() and False:
# await self.response.send_message(ephemeral=False, *args, **kwargs)
# else:
await self.parent_interaction.followup.send(*args, **kwargs)
msg = await self.parent_interaction.original_message()
if delete_after is not None:
await msg.delete(delay=delete_after)
return msg
async def reply(self, *args, **kwargs):
kwargs.pop("mention_author", None) # interactions don't support mentioning author
return await self.send(*args, **kwargs)
| 40.320755 | 94 | 0.74029 | 292 | 2,137 | 5.34589 | 0.496575 | 0.056374 | 0.040359 | 0.035874 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002302 | 0.18671 | 2,137 | 52 | 95 | 41.096154 | 0.895857 | 0.606926 | 0 | 0 | 0 | 0 | 0.031592 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.117647 | 0 | 0.352941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b25942506b9fa4f6efa53ea25f36f0896f99a67e | 255 | py | Python | flask_app/__init__.py | lucidfrontier45/flasktest | 7700f35c732f9a9877d7bd34179d0a604b77a892 | [
"MIT"
] | null | null | null | flask_app/__init__.py | lucidfrontier45/flasktest | 7700f35c732f9a9877d7bd34179d0a604b77a892 | [
"MIT"
] | null | null | null | flask_app/__init__.py | lucidfrontier45/flasktest | 7700f35c732f9a9877d7bd34179d0a604b77a892 | [
"MIT"
] | null | null | null | __author__ = 'du'
import flask
import os
app = flask.Flask(__name__)
dir = os.path.dirname(os.path.dirname(__file__))
db_path = os.path.join(dir, "test.db")
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + db_path
app.config.from_object("config") | 23.181818 | 62 | 0.729412 | 39 | 255 | 4.333333 | 0.538462 | 0.106509 | 0.153846 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.098039 | 255 | 11 | 63 | 23.181818 | 0.734783 | 0 | 0 | 0 | 0 | 0 | 0.1875 | 0.089844 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b2596dda5b87ea24820ac29c8a12748c1d10c2fd | 2,269 | py | Python | drdown/appointments/models/model_appointment.py | fga-gpp-mds/2018.1-Cris-Down | 3423374360105b06ac2c57a320bf2ee8deaa08a3 | [
"MIT"
] | 11 | 2018-03-11T01:21:43.000Z | 2018-06-19T21:51:33.000Z | drdown/appointments/models/model_appointment.py | fga-gpp-mds/2018.1-Grupo12 | 3423374360105b06ac2c57a320bf2ee8deaa08a3 | [
"MIT"
] | 245 | 2018-03-13T19:07:14.000Z | 2018-07-07T22:46:00.000Z | drdown/appointments/models/model_appointment.py | fga-gpp-mds/2018.1-Grupo12 | 3423374360105b06ac2c57a320bf2ee8deaa08a3 | [
"MIT"
] | 12 | 2018-08-24T13:26:04.000Z | 2021-03-27T16:28:22.000Z | from django.db import models
from drdown.users.models.model_health_team import HealthTeam
from drdown.users.models.model_patient import Patient
from django.utils.translation import ugettext_lazy as _
class Appointment(models.Model):
date = models.DateField(
_('Date'),
help_text=_('Date of appointment'),
max_length=50
)
time = models.TimeField(
_('Time'),
help_text=_('Time of appointment'),
max_length=50
)
SPEECH_THERAPHY = "Speech Therapy"
PSYCHOLOGY = "Psychology"
PHYSIOTHERAPY = "Physiotherapy"
OCCUPATIONAL_THERAPY = "Occupational Therapy"
CARDIOLOGY = "Cardiology"
NEUROLOGY = "Neurology"
PEDIATRICS = "Pediatrics"
GENERAL_PRACTITIONER = "General Practitioner"
SPECIALITY_APPOINTMENT_CHOICES = (
(SPEECH_THERAPHY, _('Speech Therapy')),
(PSYCHOLOGY, _('Psychology')),
(PHYSIOTHERAPY, _('Physiotherapy')),
(OCCUPATIONAL_THERAPY, _('Occupational Therapy')),
(CARDIOLOGY, _('Cardiology')),
(NEUROLOGY, _('Neurology')),
(PEDIATRICS, _('Pediatrics')),
(GENERAL_PRACTITIONER, _('General Practitioner')),
)
speciality = models.CharField(
_('Speciality'),
choices=SPECIALITY_APPOINTMENT_CHOICES,
help_text=_("Speciality of appointment"),
max_length=30
)
doctor = models.ForeignKey(
HealthTeam,
on_delete=models.CASCADE,
verbose_name=_('Doctor'),
related_name='appointments',
)
patient = models.ForeignKey(
Patient,
on_delete=models.CASCADE,
verbose_name=_('Patient'),
related_name='appointments',
)
SCHEDULED = 'Scheduled'
CANCELED = 'Canceled'
STATUS_CHOICES = (
(SCHEDULED, _('Scheduled')),
(CANCELED, _('Canceled')),
)
status = models.CharField(
_('Status'),
choices=STATUS_CHOICES,
help_text=_("Is this appointment still scheduled?"),
default=SCHEDULED,
max_length=20,
editable=False,
)
def __str__(self):
return _('Appointment of ') + self.patient.user.name
class Meta:
verbose_name = _("Appointment")
verbose_name_plural = _("Appointments")
| 26.694118 | 60 | 0.629352 | 199 | 2,269 | 6.869347 | 0.351759 | 0.023409 | 0.035113 | 0.048281 | 0.495977 | 0.364301 | 0.317484 | 0.317484 | 0.317484 | 0.317484 | 0 | 0.004737 | 0.255619 | 2,269 | 84 | 61 | 27.011905 | 0.804618 | 0 | 0 | 0.085714 | 0 | 0 | 0.195681 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014286 | false | 0 | 0.057143 | 0.014286 | 0.371429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b259d2f5a1801c2abf6d040c800a6e5088550b4e | 12,292 | py | Python | django_grepdb/management/commands/grepdb.py | exonian/django-grep-db | e0105406b8f3790ba1970abd94b71f2620712667 | [
"MIT"
] | 4 | 2015-10-29T15:14:33.000Z | 2015-11-16T11:28:09.000Z | django_grepdb/management/commands/grepdb.py | exonian/django-grep-db | e0105406b8f3790ba1970abd94b71f2620712667 | [
"MIT"
] | 11 | 2015-10-26T16:47:36.000Z | 2021-06-10T17:52:39.000Z | django_grepdb/management/commands/grepdb.py | exonian/django-grep-db | e0105406b8f3790ba1970abd94b71f2620712667 | [
"MIT"
] | 1 | 2020-12-10T15:46:02.000Z | 2020-12-10T15:46:02.000Z | import argparse
import re
import colorama
from django.apps import apps
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.management.base import BaseCommand, CommandError
from django.core.urlresolvers import reverse
from termcolor import colored
def show_values_style(arg):
special_choices = ['a', 'l']
if arg in special_choices:
return arg
try:
return int(arg)
except ValueError:
raise argparse.ArgumentTypeError("Show values style must be one of '{values}' or an integer".format(
values=', '.join(special_choices)))
class Command(BaseCommand):
help = 'Provides a grep-like command line interface for searching objects in the database'
def add_arguments(self, parser):
parser.add_argument('pattern', type=str, help='Pattern to search for')
parser.add_argument('identifiers', nargs='*', type=str, help='Identifier of a model or field')
parser.add_argument('--show-values', '-s', nargs='?', type=show_values_style, default='l',
help='Turn off showing matching values (default is any line containing a match), '
'or provide the mode "a" to show the entire field '
'or an integer to show that many characters either side of a match.')
parser.add_argument('--ignore-case', '-i', action='store_true', help='Match case-insensitively')
parser.add_argument('--find-text-fields', '-t', dest='field_type', action='append_const', const='TextField',
help='Search all TextField fields (and subclasses) on a model if no field is specified')
parser.add_argument('--find-char-fields', '-c', dest='field_type', action='append_const', const='CharField',
help='Search all CharField fields (and subclasses) on a model if no field is specified')
parser.add_argument('--find-fields', '-f', dest='field_type', action='append', type=str,
help='Search all fields of this type (and subclasses) on a model if no field is specified')
parser.add_argument('--preset', '-p', help='The name of a preset configuration in DJANGO_GREPDB_PRESETS. '
'DJANGO_GREPDB_PRESETS should be a dict of dicts, with each config dict providing '
'default values for any number of parser args.')
if apps.is_installed('django.contrib.admin'):
parser.add_argument('--admin-links', '-l', dest='admin_hostname', nargs='*', default=['default'],
help='Generate admin links. Defaults to true, using http://localhost:8000/ as hostname. '
'Can be passed one or more hostnames to use instead. If DJANGO_GREPDB_SITES is a '
'dict defined in settings, the value of the "default" key will be used as default, '
'and keys from it can also be passed to use their values as hostnames. '
'Links can be disabled by using this argument without any values.')
self.parser = parser
def handle(self, **options):
colorama.init()
preset = self.get_preset(options['preset'])
if preset:
self.parser.set_defaults(**preset)
# re-parse the command line arguments with new defaults in place
try:
options = vars(self.parser.parse_args(self.raw_args))
except AttributeError:
if not self._called_from_command_line:
# regular call_command doesn't store raw_args
msg = '--preset mode is not compatible with django.core.management.call_command: you need to ' \
'use django_grepdb.management.call_command instead'
raise CommandError(msg)
else:
# if it was called from the command line, the problem is something unknown
raise
self.pattern = options['pattern']
self.ignore_case = options['ignore_case']
self.show_values = options.get('show_values', False)
self.field_type = options['field_type'] or ['TextField']
self.admin_hostnames = self.get_admin_hostnames(options)
identifiers = options['identifiers']
queries = self.get_queries(identifiers)
for query in queries:
results = self.search(query)
if results.exists():
self.stdout.write(colored(u'\n{model} {field}'.format(model=query['manager'].model, field=query['field_name']),
'cyan', attrs=['bold']))
for result in results:
self.stdout.write(colored(u'{result} (pk={result.pk})'.format(result=result), 'green', attrs=['bold']))
if self.admin_hostnames:
self.stdout.write(self.get_admin_links(result))
if self.show_values is not None: # can't be a truthiness check, as zero is different from no show
self.stdout.write(self.get_value(result, query))
def run_from_argv(self, argv):
# store raw args so that we can re-parse them with new defaults if preset mode is used
self.raw_args = argv[2:]
super(Command, self).run_from_argv(argv)
def get_admin_hostnames(self, options):
from_options = options.get('admin_hostname', False)
if not from_options:
return
from django.contrib.admin import site as admin_site
self.admin_site = admin_site
hostnames = []
for reference in from_options:
hostnames.append(self.get_admin_hostname(reference))
return hostnames
def get_admin_hostname(self, reference):
"""Treats the reference as a hostname if it contains either 'http' or 'localhost'.
If it contains neither, looks up the reference in settings.DJANGO_GREPDB_SITES
"""
if 'http' in reference or 'localhost' in reference:
return reference
try:
hostname = self.get_admin_hostname_from_settings(reference)
except CommandError:
if reference == 'default':
hostname = 'localhost:8000'
else:
raise
return hostname
def get_admin_hostname_from_settings(self, reference):
try:
sites = getattr(settings, 'DJANGO_GREPDB_SITES')
except AttributeError:
msg = u'Reference {} is not recognised as a hostname and DJANGO_GREPDB_SITES is not configured in settings'
raise CommandError(msg.format(reference))
try:
hostname = sites[reference]
except KeyError:
msg = u'Reference {} is not recognised as a hostname and was not found in DJANGO_GREPDB_SITES'
raise CommandError(msg.format(reference))
return hostname
def get_preset(self, preset_name):
if not preset_name:
return None
try:
presets = getattr(settings, 'DJANGO_GREPDB_PRESETS')
except AttributeError:
raise CommandError(u'Preset specified but DJANGO_GREPDB_PRESETS is not configured in settings')
try:
preset = presets[preset_name]
except TypeError:
msg = u'DJANGO_GREPDB_PRESETS is not a dict-like object'
raise CommandError(msg)
except KeyError:
msg = u'Preset "{preset_name}" not found in DJANGO_GREPDB_PRESETS. Available values are: {values}'
raise CommandError(msg.format(preset_name=preset_name, values=', '.join(presets.keys())))
try:
preset.keys()
except AttributeError:
msg = u'Preset "{preset_name}" is not a dict-like object'
raise CommandError(msg.format(preset_name=preset_name))
return preset
def get_queries(self, identifiers):
queries = []
for identifier in identifiers:
queries.extend(self.get_queries_for_identifier(identifier))
return queries
def get_queries_for_identifier(self, identifier):
model, field_names = self.parse_identifier(identifier)
queries = []
for field_name in field_names:
params = self.get_queryset_params(field_name)
queries.append(dict(manager=model._default_manager, params=params, field_name=field_name))
return queries
def search(self, query):
return query['manager'].filter(**query['params'])
def parse_identifier(self, identifier):
parts = identifier.split('.')
app_label, model_name = parts[:2]
field_names = parts[2:]
model = apps.get_model(app_label, model_name)
if not field_names:
field_names = self.get_field_names_for_model(model)
return (model, field_names)
def get_field_names_for_model(self, model):
return [field.name for field in model._meta.fields if field.get_internal_type() in self.field_type]
def get_queryset_params(self, field_name):
lookup_type = 'regex'
if self.ignore_case:
lookup_type = 'i' + lookup_type
return {'{field_name}__{lookup_type}'.format(field_name=field_name, lookup_type=lookup_type): self.pattern}
def get_value(self, result, query):
text = getattr(result, query['field_name'])
show_values = self.show_values
if show_values == 'a':
return self.get_value_all(text)
elif show_values == 'l':
return self.get_value_line(text)
else:
return self.get_value_surrounded(text)
def get_value_all(self, text):
regex_args = [self.pattern, text, re.DOTALL]
if self.ignore_case:
regex_args[2] += re.IGNORECASE
matches = [m.span() for m in re.finditer(*regex_args)]
value = u''
end_of_previous = 0
for start, end in matches:
value = value + text[end_of_previous:start] + colored(text[start:end], 'grey', 'on_yellow')
end_of_previous = end
value = value + text[end_of_previous:] + '\n\n'
return value
def get_value_line(self, text):
value = u''
for line in text.splitlines():
regex_args = [self.pattern, line]
if self.ignore_case:
regex_args.append(re.IGNORECASE)
matches = [m.span() for m in re.finditer(*regex_args)]
if matches:
end_of_previous = 0
for start, end in matches:
value = value + line[end_of_previous:start] + colored(line[start:end], 'grey', 'on_yellow')
end_of_previous = end
value = value + line[end_of_previous:] + '\n\n'
return value
def get_value_surrounded(self, text):
regex_args = [self.pattern, text]
if self.ignore_case:
regex_args.append(re.IGNORECASE)
matches = re.findall(*regex_args)
chars = self.show_values
matches = [m.span() for m in re.finditer(*regex_args)]
value = u''
end_of_previous = 0
for start, end in matches:
if end_of_previous and end_of_previous > start:
value = value[:start - end_of_previous]
elif end_of_previous and end_of_previous > start - chars:
value += text[end_of_previous:start]
else:
value += '\n' + text[start - chars:start]
value += colored(text[start:end], 'grey', 'on_yellow') + text[end:end + chars]
end_of_previous = end + chars
value = value.strip() + '\n\n'
return value
def get_admin_links(self, result):
content_type = ContentType.objects.get_for_model(result)
admin_url_pattern = 'admin:{app}_{model}_change'.format(app=content_type.app_label, model=content_type.model)
relative_url = reverse(admin_url_pattern, args=[result.pk])
return '\n'.join([colored(hostname + relative_url, 'green') for hostname in self.admin_hostnames])
def get_version(self):
from ...version import VERSION
return VERSION
| 47.459459 | 127 | 0.617393 | 1,510 | 12,292 | 4.85894 | 0.180132 | 0.010904 | 0.028349 | 0.012267 | 0.241924 | 0.178275 | 0.162192 | 0.138204 | 0.119122 | 0.108219 | 0 | 0.001715 | 0.288643 | 12,292 | 258 | 128 | 47.643411 | 0.837374 | 0.039619 | 0 | 0.234513 | 0 | 0 | 0.205977 | 0.021566 | 0 | 0 | 0 | 0 | 0 | 1 | 0.088496 | false | 0.00885 | 0.048673 | 0.00885 | 0.247788 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b25a02a1a048db3506f766832707e2942c1c8ed6 | 8,446 | py | Python | rationale_net/learn/train.py | ravenscroftj/text_nn | d8a2ed5a14f9166e74e254140851d4c939d3e330 | [
"MIT"
] | 62 | 2018-01-10T14:31:32.000Z | 2022-01-14T02:57:28.000Z | rationale_net/learn/train.py | crinard/text_nn | ff26c2112ecf665cba2dab0d819d22d8ffb03f6d | [
"MIT"
] | 10 | 2018-05-26T17:33:49.000Z | 2021-01-11T04:03:00.000Z | rationale_net/learn/train.py | crinard/text_nn | ff26c2112ecf665cba2dab0d819d22d8ffb03f6d | [
"MIT"
] | 32 | 2018-01-15T07:12:22.000Z | 2022-03-01T19:05:29.000Z | import os
import sys
import torch
import torch.autograd as autograd
import torch.nn.functional as F
import rationale_net.utils.generic as generic
import rationale_net.utils.metrics as metrics
import tqdm
import numpy as np
import pdb
import sklearn.metrics
import rationale_net.utils.learn as learn
def train_model(train_data, dev_data, model, gen, args):
'''
Train model and tune on dev set. If model doesn't improve dev performance within args.patience
epochs, then halve the learning rate, restore the model to best and continue training.
At the end of training, the function will restore the model to best dev version.
returns epoch_stats: a dictionary of epoch level metrics for train and test
returns model : best model from this call to train
'''
if args.cuda:
model = model.cuda()
gen = gen.cuda()
args.lr = args.init_lr
optimizer = learn.get_optimizer([model, gen], args)
num_epoch_sans_improvement = 0
epoch_stats = metrics.init_metrics_dictionary(modes=['train', 'dev'])
step = 0
tuning_key = "dev_{}".format(args.tuning_metric)
best_epoch_func = min if tuning_key == 'loss' else max
train_loader = learn.get_train_loader(train_data, args)
dev_loader = learn.get_dev_loader(dev_data, args)
for epoch in range(1, args.epochs + 1):
print("-------------\nEpoch {}:\n".format(epoch))
for mode, dataset, loader in [('Train', train_data, train_loader), ('Dev', dev_data, dev_loader)]:
train_model = mode == 'Train'
print('{}'.format(mode))
key_prefix = mode.lower()
epoch_details, step, _, _, _, _ = run_epoch(
data_loader=loader,
train_model=train_model,
model=model,
gen=gen,
optimizer=optimizer,
step=step,
args=args)
epoch_stats, log_statement = metrics.collate_epoch_stat(epoch_stats, epoch_details, key_prefix, args)
# Log performance
print(log_statement)
# Save model if beats best dev
best_func = min if args.tuning_metric == 'loss' else max
if best_func(epoch_stats[tuning_key]) == epoch_stats[tuning_key][-1]:
num_epoch_sans_improvement = 0
if not os.path.isdir(args.save_dir):
os.makedirs(args.save_dir)
# Subtract one because epoch is 1-indexed and arr is 0-indexed
epoch_stats['best_epoch'] = epoch - 1
torch.save(model, args.model_path)
torch.save(gen, learn.get_gen_path(args.model_path))
else:
num_epoch_sans_improvement += 1
if not train_model:
print('---- Best Dev {} is {:.4f} at epoch {}'.format(
args.tuning_metric,
epoch_stats[tuning_key][epoch_stats['best_epoch']],
epoch_stats['best_epoch'] + 1))
if num_epoch_sans_improvement >= args.patience:
print("Reducing learning rate")
num_epoch_sans_improvement = 0
model.cpu()
gen.cpu()
model = torch.load(args.model_path)
gen = torch.load(learn.get_gen_path(args.model_path))
if args.cuda:
model = model.cuda()
gen = gen.cuda()
args.lr *= .5
optimizer = learn.get_optimizer([model, gen], args)
# Restore model to best dev performance
if os.path.exists(args.model_path):
model.cpu()
model = torch.load(args.model_path)
gen.cpu()
gen = torch.load(learn.get_gen_path(args.model_path))
return epoch_stats, model, gen
def test_model(test_data, model, gen, args):
'''
Run model on test data, and return loss, accuracy.
'''
if args.cuda:
model = model.cuda()
gen = gen.cuda()
test_loader = torch.utils.data.DataLoader(
test_data,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.num_workers,
drop_last=False)
test_stats = metrics.init_metrics_dictionary(modes=['test'])
mode = 'Test'
train_model = False
key_prefix = mode.lower()
print("-------------\nTest")
epoch_details, _, losses, preds, golds, rationales = run_epoch(
data_loader=test_loader,
train_model=train_model,
model=model,
gen=gen,
optimizer=None,
step=None,
args=args)
test_stats, log_statement = metrics.collate_epoch_stat(test_stats, epoch_details, 'test', args)
test_stats['losses'] = losses
test_stats['preds'] = preds
test_stats['golds'] = golds
test_stats['rationales'] = rationales
print(log_statement)
return test_stats
def run_epoch(data_loader, train_model, model, gen, optimizer, step, args):
'''
Train model for one pass of train data, and return loss, acccuracy
'''
eval_model = not train_model
data_iter = data_loader.__iter__()
losses = []
obj_losses = []
k_selection_losses = []
k_continuity_losses = []
preds = []
golds = []
losses = []
texts = []
rationales = []
if train_model:
model.train()
gen.train()
else:
gen.eval()
model.eval()
num_batches_per_epoch = len(data_iter)
if train_model:
num_batches_per_epoch = min(len(data_iter), 10000)
for _ in tqdm.tqdm(range(num_batches_per_epoch)):
batch = data_iter.next()
if train_model:
step += 1
if step % 100 == 0 or args.debug_mode:
args.gumbel_temprature = max( np.exp((step+1) *-1* args.gumbel_decay), .05)
x_indx = learn.get_x_indx(batch, args, eval_model)
text = batch['text']
y = autograd.Variable(batch['y'], volatile=eval_model)
if args.cuda:
x_indx, y = x_indx.cuda(), y.cuda()
if train_model:
optimizer.zero_grad()
if args.get_rationales:
mask, z = gen(x_indx)
else:
mask = None
logit, _ = model(x_indx, mask=mask)
if args.use_as_tagger:
logit = logit.view(-1, 2)
y = y.view(-1)
loss = get_loss(logit, y, args)
obj_loss = loss
if args.get_rationales:
selection_cost, continuity_cost = gen.loss(mask, x_indx)
loss += args.selection_lambda * selection_cost
loss += args.continuity_lambda * continuity_cost
if train_model:
loss.backward()
optimizer.step()
if args.get_rationales:
k_selection_losses.append( generic.tensor_to_numpy(selection_cost))
k_continuity_losses.append( generic.tensor_to_numpy(continuity_cost))
obj_losses.append(generic.tensor_to_numpy(obj_loss))
losses.append( generic.tensor_to_numpy(loss) )
batch_softmax = F.softmax(logit, dim=-1).cpu()
preds.extend(torch.max(batch_softmax, 1)[1].view(y.size()).data.numpy())
texts.extend(text)
rationales.extend(learn.get_rationales(mask, text))
if args.use_as_tagger:
golds.extend(batch['y'].view(-1).numpy())
else:
golds.extend(batch['y'].numpy())
epoch_metrics = metrics.get_metrics(preds, golds, args)
epoch_stat = {
'loss' : np.mean(losses),
'obj_loss': np.mean(obj_losses)
}
for metric_k in epoch_metrics.keys():
epoch_stat[metric_k] = epoch_metrics[metric_k]
if args.get_rationales:
epoch_stat['k_selection_loss'] = np.mean(k_selection_losses)
epoch_stat['k_continuity_loss'] = np.mean(k_continuity_losses)
return epoch_stat, step, losses, preds, golds, rationales
def get_loss(logit,y, args):
if args.objective == 'cross_entropy':
if args.use_as_tagger:
loss = F.cross_entropy(logit, y, reduce=False)
neg_loss = torch.sum(loss * (y == 0).float()) / torch.sum(y == 0).float()
pos_loss = torch.sum(loss * (y == 1).float()) / torch.sum(y == 1).float()
loss = args.tag_lambda * neg_loss + (1 - args.tag_lambda) * pos_loss
else:
loss = F.cross_entropy(logit, y)
elif args.objective == 'mse':
loss = F.mse_loss(logit, y.float())
else:
raise Exception(
"Objective {} not supported!".format(args.objective))
return loss
| 31.281481 | 113 | 0.608572 | 1,093 | 8,446 | 4.473925 | 0.182068 | 0.034765 | 0.018609 | 0.023517 | 0.229448 | 0.174029 | 0.111043 | 0.073415 | 0.060532 | 0.053579 | 0 | 0.006582 | 0.280488 | 8,446 | 269 | 114 | 31.39777 | 0.798091 | 0.077552 | 0 | 0.283505 | 0 | 0 | 0.039477 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.020619 | false | 0 | 0.061856 | 0 | 0.103093 | 0.036082 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b25a56f147078a8ff6f3f272cfa50e8d0cfd8ed8 | 1,121 | py | Python | froide_govplan/urls.py | okfde/froide-govplan | 1ae085c39c25af7c7a74d90ce39580119942a328 | [
"MIT"
] | 2 | 2022-03-13T14:49:46.000Z | 2022-03-14T18:39:04.000Z | froide_govplan/urls.py | okfde/froide-govplan | 1ae085c39c25af7c7a74d90ce39580119942a328 | [
"MIT"
] | 3 | 2022-03-18T11:52:46.000Z | 2022-03-18T14:13:43.000Z | froide_govplan/urls.py | okfde/froide-govplan | 1ae085c39c25af7c7a74d90ce39580119942a328 | [
"MIT"
] | 1 | 2022-03-18T09:36:20.000Z | 2022-03-18T09:36:20.000Z | from django.urls import path
from django.utils.translation import pgettext_lazy
from .views import (
GovPlanDetailOGView,
GovPlanDetailView,
GovPlanProposeUpdateView,
GovPlanSectionDetailOGView,
GovPlanSectionDetailView,
search,
)
app_name = "govplan"
urlpatterns = [
path("search/", search, name="search"),
path(
pgettext_lazy("url part", "<slug:gov>/plan/<slug:plan>/"),
GovPlanDetailView.as_view(),
name="plan",
),
path(
pgettext_lazy("url part", "<slug:gov>/plan/<slug:plan>/_og/"),
GovPlanDetailOGView.as_view(),
name="plan_og",
),
path(
pgettext_lazy("url part", "<slug:gov>/plan/<slug:plan>/propose-update/"),
GovPlanProposeUpdateView.as_view(),
name="propose_planupdate",
),
path(
pgettext_lazy("url part", "<slug:gov>/<slug:section>/"),
GovPlanSectionDetailView.as_view(),
name="section",
),
path(
pgettext_lazy("url part", "<slug:gov>/<slug:section>/_og/"),
GovPlanSectionDetailOGView.as_view(),
name="section_og",
),
]
| 26.069767 | 81 | 0.619982 | 110 | 1,121 | 6.163636 | 0.290909 | 0.106195 | 0.117994 | 0.140118 | 0.306785 | 0.306785 | 0.306785 | 0.306785 | 0.306785 | 0.185841 | 0 | 0 | 0.227475 | 1,121 | 42 | 82 | 26.690476 | 0.78291 | 0 | 0 | 0.25641 | 0 | 0 | 0.236396 | 0.141838 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.076923 | 0 | 0.076923 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b25a7804b55de2591227f675511d587e48568e7f | 2,379 | py | Python | networking_generic_switch/devices/netmiko_devices/brocade.py | ChameleonCloud/networking-generic-switch | 98ddec1f11eab5197f1443207b13a16f364e5f10 | [
"Apache-2.0"
] | null | null | null | networking_generic_switch/devices/netmiko_devices/brocade.py | ChameleonCloud/networking-generic-switch | 98ddec1f11eab5197f1443207b13a16f364e5f10 | [
"Apache-2.0"
] | 4 | 2018-11-21T17:54:37.000Z | 2021-10-04T14:40:40.000Z | networking_generic_switch/devices/netmiko_devices/brocade.py | ChameleonCloud/networking-generic-switch | 98ddec1f11eab5197f1443207b13a16f364e5f10 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Servers.com
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from oslo_log import log as logging
from networking_generic_switch.devices import netmiko_devices
LOG = logging.getLogger(__name__)
class BrocadeFastIron(netmiko_devices.NetmikoSwitch):
ADD_NETWORK = (
'vlan {segmentation_id} by port',
'name {network_id}',
)
DELETE_NETWORK = (
'no vlan {segmentation_id}',
)
PLUG_PORT_TO_NETWORK = (
'vlan {segmentation_id} by port',
'untagged ether {port}',
)
DELETE_PORT = (
'vlan {segmentation_id} by port',
'no untagged ether {port}',
)
QUERY_PORT = (
'show interfaces ether {port} | include VLAN',
)
@staticmethod
def _process_raw_output(raw_output):
PATTERN = "Member of L2 VLAN ID (\\d+), port is untagged"
match = re.search(PATTERN, raw_output)
if not match:
return None
return match.group(1) # vlan_id
def get_wrong_vlan(self, port):
raw_output = self.send_commands_to_device(
self._format_commands(self.QUERY_PORT, port=port)
)
return self._process_raw_output(str(raw_output))
def clean_port_vlan_if_necessary(self, port):
wrong_vlan = self.get_wrong_vlan(port)
if not wrong_vlan:
return
if str(wrong_vlan) == '1':
return
LOG.warning(
'Port %s is used in a wrong vlan %s, clean it',
port,
str(wrong_vlan)
)
self.delete_port(port, wrong_vlan)
@netmiko_devices.check_output('plug port')
def plug_port_to_network(self, port, segmentation_id):
self.clean_port_vlan_if_necessary(port)
return super(BrocadeFastIron, self).plug_port_to_network(
port, segmentation_id)
| 29.012195 | 78 | 0.649012 | 308 | 2,379 | 4.798701 | 0.409091 | 0.048714 | 0.048714 | 0.040595 | 0.090663 | 0.041949 | 0 | 0 | 0 | 0 | 0 | 0.006315 | 0.26776 | 2,379 | 81 | 79 | 29.37037 | 0.842135 | 0.245481 | 0 | 0.096154 | 0 | 0 | 0.179213 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.057692 | 0 | 0.365385 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b25a9cc4cfb580bca0c7aa60b5180bfd5845e5a7 | 1,340 | py | Python | 2015/2015-day06/lights.py | bennettp123/advent-of-code | 07b2ada43ad16a842b010c852f456c3ed44b1562 | [
"MIT"
] | null | null | null | 2015/2015-day06/lights.py | bennettp123/advent-of-code | 07b2ada43ad16a842b010c852f456c3ed44b1562 | [
"MIT"
] | null | null | null | 2015/2015-day06/lights.py | bennettp123/advent-of-code | 07b2ada43ad16a842b010c852f456c3ed44b1562 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import re
def turn_on(lights, a, b, c, d):
for row in range(a, c+1):
for col in range(b, d+1):
lights[row][col] += 1
return lights
def turn_off(lights, a, b, c, d):
for row in range(a, c+1):
for col in range(b, d+1):
lights[row][col] = max(0, lights[row][col] - 1)
return lights
def toggle(lights, a, b, c, d):
for row in range(a, c+1):
for col in range(b, d+1):
lights[row][col] += 2
return lights
if __name__ == '__main__':
lights = [[False for row in range(1000)] for col in range(1000)]
with open('input', 'r') as f:
for line in f:
op = re.match('(turn on|turn off|toggle)',
line).groups()[0]
a, b, c, d = [int(x) for x in
re.search('(\d*),(\d*) through (\d*),(\d*)', line).groups()]
if op == 'turn on':
lights = turn_on(lights, a, b, c, d)
elif op == 'turn off':
lights = turn_off(lights, a, b, c, d)
elif op == 'toggle':
lights = toggle(lights, a, b, c, d)
else:
raise Exception('bad op! {0}'.format(op))
sum_lit = sum([sum(lights[r]) for r in range(1000)])
print('part 2: total brightness is {0}'.format(sum_lit))
| 27.346939 | 80 | 0.48806 | 211 | 1,340 | 3.033175 | 0.274882 | 0.098438 | 0.032813 | 0.04375 | 0.446875 | 0.446875 | 0.4125 | 0.253125 | 0.253125 | 0.253125 | 0 | 0.029647 | 0.345522 | 1,340 | 48 | 81 | 27.916667 | 0.700114 | 0.014925 | 0 | 0.264706 | 0 | 0 | 0.10091 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.088235 | false | 0 | 0.029412 | 0 | 0.205882 | 0.029412 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b25b5709fcc179cf1d0085cb7347b53fc47934c7 | 2,993 | py | Python | notifier.py | kondekarshubham123/covid19-notifier | a003699e345df21f713638807e9dc93ccb142ba0 | [
"MIT"
] | null | null | null | notifier.py | kondekarshubham123/covid19-notifier | a003699e345df21f713638807e9dc93ccb142ba0 | [
"MIT"
] | null | null | null | notifier.py | kondekarshubham123/covid19-notifier | a003699e345df21f713638807e9dc93ccb142ba0 | [
"MIT"
] | 1 | 2021-05-11T15:12:00.000Z | 2021-05-11T15:12:00.000Z | import os
import smtplib
from credential import SendMain
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import datetime
def emailsend(user, data):
mail_content = '''Hello,
Below are the list of covid centers.\n
'''
# print(mail_content)
# print(user,data)
lis = '\n----------------------\n'
for hos in data['sessions']:
# print(hos['name'],hos['address'],hos['state_name'],hos['pincode'],hos['fee_type'],hos['date'],hos['available_capacity'],
# hos['fee'],hos['min_age_limit'],hos['vaccine'])
lis += "Hospital name : " + hos['name'] + '\n'
lis += "Hospital Pincode : " + str(hos['pincode']) + '\n'
lis += "Fee type : " + str(hos['fee_type']) + '\n'
lis += "Available Capacity : " + \
str(hos['available_capacity']) + '\n'
lis += "Age Limit : " + str(hos['min_age_limit']) + '\n'
lis += "Vaccine type : " + hos['vaccine'] + '\n'
lis += '----------------------\n'
mail_content += lis
# The mail addresses and password
sender_address = SendMain.get_email()
sender_pass = SendMain.get_senderpwd()
receiver_address = user.email
# Setup the MIME
message = MIMEMultipart()
message['From'] = sender_address
message['To'] = receiver_address
message['Subject'] = 'Vaccine Update !!' # The subject line
# The body and the attachments for the mail
message.attach(MIMEText(mail_content, 'plain'))
# Create SMTP session for sending the mail
session = smtplib.SMTP('smtp.gmail.com', 587) # use gmail with port
session.starttls() # enable security
# login with mail_id and password
session.login(sender_address, sender_pass)
text = message.as_string()
session.sendmail(sender_address, receiver_address, text)
session.quit()
print('Mail Sent', user.email)
def dummySend(user, data):
mail_content = '''Hello,
Below are the list of covid centers.\n
'''
# print(mail_content)
# print(user,data)
lis = '\n----------------------\n'
for hos in data['sessions']:
# print(hos['name'],hos['address'],hos['state_name'],hos['pincode'],hos['fee_type'],hos['date'],hos['available_capacity'],
# hos['fee'],hos['min_age_limit'],hos['vaccine'])
lis += "Hospital name : " + hos['name'] + '\n'
lis += "Hospital Pincode : " + str(hos['pincode']) + '\n'
lis += "Fee type : " + str(hos['fee_type']) + '\n'
lis += "Available Capacity : " + \
str(hos['available_capacity']) + '\n'
lis += "Age Limit : " + str(hos['min_age_limit']) + '\n'
lis += "Vaccine type : " + hos['vaccine'] + '\n'
lis += '----------------------\n'
print(mail_content+lis, datetime.datetime.now())
def songNotify(user, data):
# Comment
os.system('mpg123 src/iphone.mp3')
| 33.255556 | 130 | 0.560307 | 350 | 2,993 | 4.682857 | 0.274286 | 0.029286 | 0.024405 | 0.034167 | 0.488103 | 0.488103 | 0.488103 | 0.488103 | 0.488103 | 0.488103 | 0 | 0.003129 | 0.252589 | 2,993 | 89 | 131 | 33.629213 | 0.729549 | 0.211828 | 0 | 0.490566 | 0 | 0 | 0.315677 | 0.042717 | 0 | 0 | 0 | 0 | 0 | 1 | 0.056604 | false | 0.037736 | 0.113208 | 0 | 0.169811 | 0.037736 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b25be4c13e25eeac90de65264819ef91f2cd9201 | 4,999 | py | Python | stock_exchange/users/views.py | andela-Taiwo/Stock-Exchange-Monitoring-App | 10851f871cb05bf5d892c776d4f4a63df02648d1 | [
"MIT"
] | null | null | null | stock_exchange/users/views.py | andela-Taiwo/Stock-Exchange-Monitoring-App | 10851f871cb05bf5d892c776d4f4a63df02648d1 | [
"MIT"
] | 1 | 2021-06-10T22:43:31.000Z | 2021-06-10T22:43:31.000Z | stock_exchange/users/views.py | andela-Taiwo/Stock-Exchange-Monitoring-App | 10851f871cb05bf5d892c776d4f4a63df02648d1 | [
"MIT"
] | null | null | null | from allauth.account.models import EmailConfirmation, EmailConfirmationHMAC
from rest_auth.serializers import PasswordResetConfirmSerializer
from django.contrib.auth import get_user_model
from rest_framework import generics
from django.utils.translation import ugettext_lazy as _
from rest_auth.registration.serializers import VerifyEmailSerializer
from rest_framework import status
from rest_framework.decorators import api_view, APIView
from rest_framework.permissions import IsAdminUser, AllowAny
from rest_framework.response import Response
from rest_framework import exceptions
from rest_auth.views import LoginView
from rest_framework import (
viewsets,
decorators
)
import users.services as user_service
from users.serializers import (
UserSerializer,
ProfileSerializer,
FileUploadSerializer,
ViewProfileSerializer
)
from rest_framework import authentication, permissions
from api.response import NSEMonitoringAPIResponse
# Create your views here.
@api_view()
def django_rest_auth_null():
return Response(status=status.HTTP_400_BAD_REQUEST)
@api_view()
def complete_view(request):
return Response("Email account is activated")
class UserViewSet(viewsets.ViewSet):
''' User Profile views '''
def retrieve(self, request, *args, **kwargs):
profile = user_service.retrieve_profile(
requestor=request.user,
profile_id=kwargs.get('pk')
)
return NSEMonitoringAPIResponse(ViewProfileSerializer(profile).data)
class UploadViewSet(viewsets.ViewSet):
def create(self, request):
try:
profile_picture = request.FILES['picture']
except:
raise exceptions.NotAcceptable(detail='Please select picture to upload')
profile = user_service.upload_profile_picture(
data=request.data,
requestor=request.user,
file=profile_picture
)
return NSEMonitoringAPIResponse(FileUploadSerializer(profile).data)
def update(self, request, **kwargs):
try:
profile_picture = request.FILES['picture']
except:
raise exceptions.NotAcceptable(detail='Please select picture to upload')
profile = user_service.update_profile_picture(
requestor=request.user,
file=profile_picture,
upload_id=kwargs.get('pk')
)
return NSEMonitoringAPIResponse(FileUploadSerializer(profile).data)
def delete(self, request, **kwargs):
try:
profile_picture = request.FILES['picture']
except:
profile_picture = None
profile = user_service.delete_profile_picture(
requestor=request.user,
file=profile_picture,
upload_id=kwargs.get('pk')
)
return NSEMonitoringAPIResponse(FileUploadSerializer(profile).data)
class CustomLoginView(LoginView):
def get_response(self):
orginal_response = super().get_response()
mydata = {"message": "You have successfully logged in", "status": "success"}
orginal_response.data.update(mydata)
return orginal_response
class RolesViewSet(viewsets.ViewSet):
def list(self, request, **kwargs):
roles = user_service.list_roles(request.user)
return NSEMonitoringAPIResponse(user_service.serialize_roles(request.user, roles, many=True, compact=True))
def retrieve(self, request, **kwargs):
role = user_service.retrieve_role(request.user, role_pk=kwargs.get('pk'))
return NSEMonitoringAPIResponse(user_service.serialize_roles(request.user, role))
def update(self, request, **kwargs):
role = user_service.update_role(request.user, role_pk=kwargs.get('pk'), data=request.data)
return NSEMonitoringAPIResponse(user_service.serialize_roles(request.user, role))
@decorators.action(methods=['GET'], detail=False, url_path='init')
def init(self, request, **kwargs):
role = user_service.init_role(request.user)
return NSEMonitoringAPIResponse(user_service.serialize_roles(request.user, role))
def create(self, request, **kwargs):
role = user_service.create_role(request.user, data=request.data)
return NSEMonitoringAPIResponse(user_service.serialize_roles(request.user, role))
class UserRolesViewSet(viewsets.ViewSet):
@decorators.action(methods=['GET'], detail=True, url_path='list')
def list_user_roles(self, request, **kwargs):
roles = user_service.list_user_roles(request.user, profile_pk=kwargs.get('pk'))
return NSEMonitoringAPIResponse(user_service.serialize_user_roles(request.user, roles))
@decorators.action(methods=['PUT'], detail=True, url_path='update')
def update_user_roles(self, request, **kwargs):
roles = user_service.update_user_roles(request.user, profile_pk=kwargs.get('pk'), data=request.data)
return NSEMonitoringAPIResponse(user_service.serialize_user_roles(request.user, roles))
| 38.751938 | 115 | 0.718944 | 547 | 4,999 | 6.396709 | 0.212066 | 0.059731 | 0.045727 | 0.082023 | 0.50643 | 0.48471 | 0.40583 | 0.394113 | 0.359246 | 0.336382 | 0 | 0.00074 | 0.189238 | 4,999 | 128 | 116 | 39.054688 | 0.862571 | 0.008802 | 0 | 0.307692 | 0 | 0 | 0.039814 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.134615 | false | 0.009615 | 0.163462 | 0.019231 | 0.480769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b25d35b45dfa93eb6db5d1480e4337817da42e96 | 59,575 | py | Python | src/natcap/invest/urban_cooling_model.py | ForestFort/invest | ea55638335e97264e09d359484b66b1b8553542d | [
"BSD-3-Clause"
] | null | null | null | src/natcap/invest/urban_cooling_model.py | ForestFort/invest | ea55638335e97264e09d359484b66b1b8553542d | [
"BSD-3-Clause"
] | null | null | null | src/natcap/invest/urban_cooling_model.py | ForestFort/invest | ea55638335e97264e09d359484b66b1b8553542d | [
"BSD-3-Clause"
] | null | null | null | """Urban Cooling Model."""
import shutil
import tempfile
import math
import logging
import os
import pickle
import time
from osgeo import gdal
from osgeo import ogr
from osgeo import osr
import pygeoprocessing
import taskgraph
import numpy
import shapely.wkb
import shapely.prepared
import rtree
from . import validation
from . import utils
LOGGER = logging.getLogger(__name__)
TARGET_NODATA = -1
_LOGGING_PERIOD = 5.0
ARGS_SPEC = {
'model_name': 'Urban Cooling Model',
'module': __name__,
'userguide_html': 'urban_cooling_model.html',
"args_with_spatial_overlap": {
"spatial_keys": ["lulc_raster_path", "ref_eto_raster_path",
"aoi_vector_path", "building_vector_path"],
"different_projections_ok": True,
},
"args": {
"workspace_dir": validation.WORKSPACE_SPEC,
"results_suffix": validation.SUFFIX_SPEC,
"n_workers": validation.N_WORKERS_SPEC,
"lulc_raster_path": {
"name": "Land Use / Land Cover Raster",
"type": "raster",
"required": True,
"validation_options": {
"projected": True,
"projection_units": "m",
},
"about": (
"A GDAL-supported raster file containing integer values "
"representing the LULC code for each cell. The LULC code "
"should be an integer. The model will use the resolution of "
"this layer to resample all outputs. The resolution should "
"be small enough to capture the effect of green areas in the "
"landscape, although LULC categories can comprise a mix of "
"vegetated and non-vegetated covers (e.g. 'residential', "
"which may have 30% canopy cover."
)
},
"ref_eto_raster_path": {
"name": "Reference Evapotranspiration Raster",
"type": "raster",
"required": True,
"about": (
"A GDAL-supported raster file containing numeric values "
"representing the evapotranspiration (in mm) for the period "
"of interest."
)
},
"aoi_vector_path": {
"name": "Area of Interest Vector",
"type": "vector",
"required": True,
"about": (
"A GDAL-compatible vector delineating areas of interest "
"(city or neighborhood boundaries). Results will be "
"aggregated within each feature in this vector."
)
},
"biophysical_table_path": {
"name": "Biophysical Table",
"type": "csv",
"required": True,
"validation_options": {
"required_fields": ["lucode", "kc", "green_area"],
},
"about": (
"A CSV table containing model information corresponding "
"to each of the land use classes in the LULC. All classes "
"in the land cover raster MUST have corresponding values "
"in this table. Each row is a land use/land cover class."
),
},
"green_area_cooling_distance": {
"name": "Green area max cooling distance effect (m)",
"type": "number",
"required": True,
"validation_options": {
"expression": "value > 0",
},
"about": (
"Distance (in m) over which large green areas (> 2 ha) "
"will have a cooling effect."
),
},
"t_air_average_radius": {
"name": "T_air moving average radius (m)",
"type": "number",
"required": True,
"validation_options": {
"expression": "value > 0"
},
"about": (
"Radius of the averaging filter for turning T_air_nomix "
"into T_air")
},
"t_ref": {
"name": "Reference Air Temperature",
"type": "number",
"required": True,
"about": (
"Rural reference temperature (where the urban heat island"
"effect is not observed) for the period of interest. This "
"could be nighttime or daytime temperature, for a specific "
"date or an average over several days. The results will be "
"given for the same period of interest)"),
},
"uhi_max": {
"name": "Magnitude of the UHI effect",
"type": "number",
"required": True,
"about": (
"The magnitude of the urban heat island effect, in degrees "
"C. Example: the difference between the rural reference "
"temperature and the maximum temperature observed in the "
"city."
),
},
"do_valuation": {
"name": "Run Valuation Model",
"type": "boolean",
"required": True,
"about": "Select to run the valuation model."
},
"avg_rel_humidity": {
"name": "Average relative humidity (0-100%)",
"type": "number",
"required": "do_valuation",
"validation_options": {
"expression": "(value >= 0) and (value <= 100)",
},
"about": (
"The average relative humidity (0-100%) over the time period "
"of interest."
),
},
"building_vector_path": {
"name": "Buildings vector",
"type": "vector",
"required": "do_valuation",
"validation_options": {
"required_fields": ["type"],
},
"about": (
"A GDAL-compatible vector with built infrastructure "
"footprints. The attribute table must contain the column "
"'type', with integers referencing the building type "
"(e.g. 1=residential, 2=office, etc.) that match types in "
"the energy consumption table."
),
},
"energy_consumption_table_path": {
"name": "Energy consumption table",
"type": "csv",
"required": "do_valuation",
"validation_options": {
"required_fields": ["type", "consumption"],
},
"about": (
"A CSV table containing information on energy consumption "
"for various types of buildings, in kW/degC."
),
},
"cc_method": {
"name": "Cooling capacity calculation method",
"type": "option_string",
"required": True,
"validation_options": {
"options": ['factors', 'intensity'],
},
"about": (
'The method selected here determines the predictor used for '
'air temperature. If <b>"Weighted Factors"</b> is '
'selected, the Cooling Capacity calculations will use the '
'weighted factors for shade, albedo and ETI as a predictor '
'for daytime temperatures. <br/>'
'Alternatively, if <b>"Building Intensity"</b> is selected, '
'building intensity will be used as a predictor for nighttime '
'temperature instead of shade, albedo and ETI.'
),
},
"cc_weight_shade": {
"name": "Cooling capacity: adjust shade weight",
"type": "number",
"required": False,
"validation_options": {
"expression": "value > 0",
},
"about": (
"The relative weight to apply to shade when calculating the "
"cooling index. Default: 0.6"
),
},
"cc_weight_albedo": {
"name": "Cooling capacity: adjust albedo weight",
"type": "number",
"required": False,
"validation_options": {
"expression": "value > 0",
},
"about": (
"The relative weight to apply to albedo when calculating the "
"cooling index. Default: 0.2"
),
},
"cc_weight_eti": {
"name": "Cooling capacity: adjust evapotranspiration weight",
"type": "number",
"required": False,
"validation_options": {
"expression": "value > 0",
},
"about": (
"The relative weight to apply to ETI when calculating the "
"cooling index. Default: 0.2"
)
},
}
}
def execute(args):
"""Urban Cooling Model.
Args:
args['workspace_dir'] (str): path to target output directory.
args['results_suffix'] (string): (optional) string to append to any
output file names
args['t_ref'] (str/float): reference air temperature.
args['lulc_raster_path'] (str): path to landcover raster.
args['ref_eto_raster_path'] (str): path to evapotranspiration raster.
args['aoi_vector_path'] (str): path to desired AOI.
args['biophysical_table_path'] (str): table to map landcover codes to
Shade, Kc, and Albedo values. Must contain the fields 'lucode',
'kc', and 'green_area'. If ``args['cc_method'] == 'factors'``,
then this table must also contain the fields 'shade' and
'albedo'. If ``args['cc_method'] == 'intensity'``, then this
table must also contain the field 'building_intensity'.
args['green_area_cooling_distance'] (float): Distance (in m) over
which large green areas (> 2 ha) will have a cooling effect.
args['t_air_average_radius'] (float): radius of the averaging filter
for turning T_air_nomix into T_air.
args['uhi_max'] (float): Magnitude of the UHI effect.
args['do_valuation'] (bool): if True, consider the valuation
parameters for buildings.
args['avg_rel_humidity'] (float): (optional, depends on
'do_valuation') Average relative humidity (0-100%).
args['building_vector_path']: (str) (optional, depends on
'do_valuation') path to a vector of building footprints that
contains at least the field 'type'.
args['energy_consumption_table_path'] (str): (optional, depends on
'do_valuation') path to a table that maps building types to
energy consumption. Must contain at least the fields 'type' and
'consumption'.
args['cc_method'] (str): Either "intensity" or "factors". If
"intensity", then the "building_intensity" column must be
present in the biophysical table. If "factors", then
``args['cc_weight_shade']``, ``args['cc_weight_albedo']``,
``args['cc_weight_eti']`` may be set to alternative weights
if desired.
args['cc_weight_shade'] (str/float): floating point number
representing the relative weight to apply to shade when
calculating the cooling index. Default: 0.6
args['cc_weight_albedo'] (str/float): floating point number
representing the relative weight to apply to albedo when
calculating the cooling index. Default: 0.2
args['cc_weight_eti'] (str/float): floating point number
representing the relative weight to apply to ETI when
calculating the cooling index. Default: 0.2
Returns:
None.
"""
LOGGER.info('Starting Urban Cooling Model')
file_suffix = utils.make_suffix_string(args, 'results_suffix')
intermediate_dir = os.path.join(
args['workspace_dir'], 'intermediate')
utils.make_directories([args['workspace_dir'], intermediate_dir])
biophysical_lucode_map = utils.build_lookup_from_csv(
args['biophysical_table_path'], 'lucode', to_lower=True,
warn_if_missing=True)
# cast to float and calculate relative weights
# Use default weights for shade, albedo, eti if the user didn't provide
# weights.
cc_weight_shade_raw = float(args.get('cc_weight_shade', 0.6))
cc_weight_albedo_raw = float(args.get('cc_weight_albedo', 0.2))
cc_weight_eti_raw = float(args.get('cc_weight_eti', 0.2))
t_ref_raw = float(args['t_ref'])
uhi_max_raw = float(args['uhi_max'])
cc_weight_sum = sum(
(cc_weight_shade_raw, cc_weight_albedo_raw, cc_weight_eti_raw))
cc_weight_shade = cc_weight_shade_raw / cc_weight_sum
cc_weight_albedo = cc_weight_albedo_raw / cc_weight_sum
cc_weight_eti = cc_weight_eti_raw / cc_weight_sum
# Cast to a float upfront in case of casting errors.
t_air_average_radius_raw = float(args['t_air_average_radius'])
try:
n_workers = int(args['n_workers'])
except (KeyError, ValueError, TypeError):
# KeyError when n_workers is not present in args.
# ValueError when n_workers is an empty string.
# TypeError when n_workers is None.
n_workers = -1 # Synchronous mode.
task_graph = taskgraph.TaskGraph(
os.path.join(intermediate_dir, '_taskgraph_working_dir'), n_workers)
# align all the input rasters.
aligned_lulc_raster_path = os.path.join(
intermediate_dir, 'lulc%s.tif' % file_suffix)
aligned_ref_eto_raster_path = os.path.join(
intermediate_dir, 'ref_eto%s.tif' % file_suffix)
lulc_raster_info = pygeoprocessing.get_raster_info(
args['lulc_raster_path'])
# ensure raster has square pixels by picking the smallest dimension
cell_size = numpy.min(numpy.abs(lulc_raster_info['pixel_size']))
# Reproject and align inputs to the intersection of the AOI, ETO and LULC,
# with target raster sizes matching those of the LULC.
aligned_raster_path_list = [
aligned_lulc_raster_path, aligned_ref_eto_raster_path]
align_task = task_graph.add_task(
func=pygeoprocessing.align_and_resize_raster_stack,
args=([args['lulc_raster_path'], args['ref_eto_raster_path']],
aligned_raster_path_list,
['mode', 'cubicspline'],
(cell_size, -cell_size),
'intersection'),
kwargs={
'base_vector_path_list': [args['aoi_vector_path']],
'raster_align_index': 1,
'target_sr_wkt': lulc_raster_info['projection']},
target_path_list=aligned_raster_path_list,
task_name='align rasters')
task_path_prop_map = {}
reclassification_props = ('kc', 'green_area')
if args['cc_method'] == 'factors':
reclassification_props += ('shade', 'albedo')
else:
reclassification_props += ('building_intensity',)
for prop in reclassification_props:
prop_map = dict(
(lucode, x[prop])
for lucode, x in biophysical_lucode_map.items())
prop_raster_path = os.path.join(
intermediate_dir, '%s%s.tif' % (prop, file_suffix))
prop_task = task_graph.add_task(
func=pygeoprocessing.reclassify_raster,
args=(
(aligned_lulc_raster_path, 1), prop_map, prop_raster_path,
gdal.GDT_Float32, TARGET_NODATA),
kwargs={'values_required': True},
target_path_list=[prop_raster_path],
dependent_task_list=[align_task],
task_name='reclassify to %s' % prop)
task_path_prop_map[prop] = (prop_task, prop_raster_path)
green_area_decay_kernel_distance = int(numpy.round(
float(args['green_area_cooling_distance']) / cell_size))
cc_park_raster_path = os.path.join(
intermediate_dir, 'cc_park%s.tif' % file_suffix)
cc_park_task = task_graph.add_task(
func=convolve_2d_by_exponential,
args=(
green_area_decay_kernel_distance,
task_path_prop_map['green_area'][1],
cc_park_raster_path),
target_path_list=[cc_park_raster_path],
dependent_task_list=[
task_path_prop_map['green_area'][0]],
task_name='calculate T air')
# Calculate the area of greenspace within a search radius of each pixel.
area_kernel_path = os.path.join(
intermediate_dir, 'area_kernel%s.tif' % file_suffix)
area_kernel_task = task_graph.add_task(
func=flat_disk_kernel,
args=(green_area_decay_kernel_distance, area_kernel_path),
target_path_list=[area_kernel_path],
task_name='area kernel')
green_area_sum_raster_path = os.path.join(
intermediate_dir, 'green_area_sum%s.tif' % file_suffix)
green_area_sum_task = task_graph.add_task(
func=pygeoprocessing.convolve_2d,
args=(
(task_path_prop_map['green_area'][1], 1), # green area path
(area_kernel_path, 1),
green_area_sum_raster_path),
kwargs={
'working_dir': intermediate_dir,
'ignore_nodata': True},
target_path_list=[green_area_sum_raster_path],
dependent_task_list=[
task_path_prop_map['green_area'][0], # reclassed green area task
area_kernel_task],
task_name='calculate green area')
align_task.join()
cc_raster_path = os.path.join(
intermediate_dir, 'cc%s.tif' % file_suffix)
if args['cc_method'] == 'factors':
LOGGER.info('Calculating Cooling Coefficient from factors')
# Evapotranspiration index (Equation #1)
ref_eto_raster = gdal.OpenEx(aligned_ref_eto_raster_path,
gdal.OF_RASTER)
ref_eto_band = ref_eto_raster.GetRasterBand(1)
_, ref_eto_max, _, _ = ref_eto_band.GetStatistics(0, 1)
ref_eto_max = numpy.round(ref_eto_max, decimals=9)
ref_eto_band = None
ref_eto_raster = None
eto_nodata = pygeoprocessing.get_raster_info(
args['ref_eto_raster_path'])['nodata'][0]
eti_raster_path = os.path.join(
intermediate_dir, 'eti%s.tif' % file_suffix)
eti_task = task_graph.add_task(
func=pygeoprocessing.raster_calculator,
args=(
[(task_path_prop_map['kc'][1], 1), (TARGET_NODATA, 'raw'),
(aligned_ref_eto_raster_path, 1), (eto_nodata, 'raw'),
(ref_eto_max, 'raw'), (TARGET_NODATA, 'raw')],
calc_eti_op, eti_raster_path, gdal.GDT_Float32, TARGET_NODATA),
target_path_list=[eti_raster_path],
dependent_task_list=[task_path_prop_map['kc'][0]],
task_name='calculate eti')
# Cooling Capacity calculations (Equation #2)
cc_task = task_graph.add_task(
func=pygeoprocessing.raster_calculator,
args=([(task_path_prop_map['shade'][1], 1),
(task_path_prop_map['albedo'][1], 1),
(eti_raster_path, 1),
(cc_weight_shade, 'raw'),
(cc_weight_albedo, 'raw'),
(cc_weight_eti, 'raw')],
calc_cc_op_factors, cc_raster_path,
gdal.GDT_Float32, TARGET_NODATA),
target_path_list=[cc_raster_path],
dependent_task_list=[
task_path_prop_map['shade'][0],
task_path_prop_map['albedo'][0],
eti_task],
task_name='calculate cc index (weighted factors)')
else:
# args['cc_method'] must be 'intensity', so we use a modified CC
# function.
LOGGER.info('Calculating Cooling Coefficient using '
'building intensity')
cc_task = task_graph.add_task(
func=pygeoprocessing.raster_calculator,
args=([(task_path_prop_map['building_intensity'][1], 1)],
calc_cc_op_intensity, cc_raster_path,
gdal.GDT_Float32, TARGET_NODATA),
target_path_list=[cc_raster_path],
dependent_task_list=[
task_path_prop_map['building_intensity'][0]],
task_name='calculate cc index (intensity)')
# Compute Heat Mitigation (HM) index.
#
# convert 2 hectares to number of pixels
green_area_threshold = 2e4 / cell_size**2
hm_raster_path = os.path.join(
args['workspace_dir'], 'hm%s.tif' % file_suffix)
hm_task = task_graph.add_task(
func=pygeoprocessing.raster_calculator,
args=([
(cc_raster_path, 1),
(green_area_sum_raster_path, 1),
(cc_park_raster_path, 1),
(green_area_threshold, 'raw'),
], hm_op, hm_raster_path, gdal.GDT_Float32, TARGET_NODATA),
target_path_list=[hm_raster_path],
dependent_task_list=[cc_task, green_area_sum_task, cc_park_task],
task_name='calculate HM index')
t_air_nomix_raster_path = os.path.join(
intermediate_dir, 'T_air_nomix%s.tif' % file_suffix)
t_air_nomix_task = task_graph.add_task(
func=pygeoprocessing.raster_calculator,
args=([(t_ref_raw, 'raw'),
(hm_raster_path, 1),
(uhi_max_raw, 'raw')],
calc_t_air_nomix_op, t_air_nomix_raster_path, gdal.GDT_Float32,
TARGET_NODATA),
target_path_list=[t_air_nomix_raster_path],
dependent_task_list=[hm_task, align_task],
task_name='calculate T air nomix')
decay_kernel_distance = int(numpy.round(
t_air_average_radius_raw / cell_size))
t_air_raster_path = os.path.join(
intermediate_dir, 'T_air%s.tif' % file_suffix)
t_air_task = task_graph.add_task(
func=convolve_2d_by_exponential,
args=(
decay_kernel_distance,
t_air_nomix_raster_path,
t_air_raster_path),
target_path_list=[t_air_raster_path],
dependent_task_list=[t_air_nomix_task],
task_name='calculate T air')
intermediate_aoi_vector_path = os.path.join(
intermediate_dir, 'reprojected_aoi%s.shp' % file_suffix)
intermediate_uhi_result_vector_task = task_graph.add_task(
func=pygeoprocessing.reproject_vector,
args=(
args['aoi_vector_path'], lulc_raster_info['projection'],
intermediate_aoi_vector_path),
kwargs={'driver_name': 'ESRI Shapefile'},
target_path_list=[intermediate_aoi_vector_path],
task_name='reproject and label aoi')
cc_aoi_stats_pickle_path = os.path.join(
intermediate_dir, 'cc_ref_aoi_stats.pickle')
_ = task_graph.add_task(
func=pickle_zonal_stats,
args=(
intermediate_aoi_vector_path,
cc_raster_path, cc_aoi_stats_pickle_path),
target_path_list=[cc_aoi_stats_pickle_path],
dependent_task_list=[cc_task, intermediate_uhi_result_vector_task],
task_name='pickle cc ref stats')
t_air_aoi_stats_pickle_path = os.path.join(
intermediate_dir, 't_air_aoi_stats.pickle')
_ = task_graph.add_task(
func=pickle_zonal_stats,
args=(
intermediate_aoi_vector_path,
t_air_raster_path, t_air_aoi_stats_pickle_path),
target_path_list=[t_air_aoi_stats_pickle_path],
dependent_task_list=[t_air_task, intermediate_uhi_result_vector_task],
task_name='pickle t-air over stats over AOI')
wbgt_stats_pickle_path = None
light_loss_stats_pickle_path = None
heavy_loss_stats_pickle_path = None
energy_consumption_vector_path = None
if bool(args['do_valuation']):
LOGGER.info('Starting valuation')
# work productivity
wbgt_raster_path = os.path.join(
intermediate_dir, 'wbgt%s.tif' % file_suffix)
wbgt_task = task_graph.add_task(
func=calculate_wbgt,
args=(
float(args['avg_rel_humidity']), t_air_raster_path,
wbgt_raster_path),
target_path_list=[wbgt_raster_path],
dependent_task_list=[t_air_task],
task_name='vapor pressure')
light_work_loss_raster_path = os.path.join(
intermediate_dir,
'light_work_loss_percent%s.tif' % file_suffix)
heavy_work_loss_raster_path = os.path.join(
intermediate_dir,
'heavy_work_loss_percent%s.tif' % file_suffix)
loss_task_path_map = {}
for loss_type, temp_map, loss_raster_path in [
# Breaks here are described in the UG chapter and are the
# result of a literature review.
('light', [31.5, 32.0, 32.5], light_work_loss_raster_path),
('heavy', [27.5, 29.5, 31.5], heavy_work_loss_raster_path)]:
work_loss_task = task_graph.add_task(
func=map_work_loss,
args=(temp_map, wbgt_raster_path, loss_raster_path),
target_path_list=[loss_raster_path],
dependent_task_list=[wbgt_task],
task_name='work loss: %s' % os.path.basename(loss_raster_path))
loss_task_path_map[loss_type] = (work_loss_task, loss_raster_path)
intermediate_building_vector_path = os.path.join(
intermediate_dir,
'reprojected_buildings%s.shp' % file_suffix)
intermediate_building_vector_task = task_graph.add_task(
func=pygeoprocessing.reproject_vector,
args=(
args['building_vector_path'], lulc_raster_info['projection'],
intermediate_building_vector_path),
kwargs={'driver_name': 'ESRI Shapefile'},
target_path_list=[intermediate_building_vector_path],
task_name='reproject building vector')
# zonal stats over buildings for t_air
t_air_stats_pickle_path = os.path.join(
intermediate_dir, 't_air_stats.pickle')
pickle_t_air_task = task_graph.add_task(
func=pickle_zonal_stats,
args=(
intermediate_building_vector_path,
t_air_raster_path, t_air_stats_pickle_path),
target_path_list=[t_air_stats_pickle_path],
dependent_task_list=[
t_air_task, intermediate_building_vector_task],
task_name='pickle t-air stats over buildings')
energy_consumption_vector_path = os.path.join(
args['workspace_dir'], 'buildings_with_stats%s.shp' % file_suffix)
_ = task_graph.add_task(
func=calculate_energy_savings,
args=(
t_air_stats_pickle_path, t_ref_raw,
uhi_max_raw, args['energy_consumption_table_path'],
intermediate_building_vector_path,
energy_consumption_vector_path),
target_path_list=[energy_consumption_vector_path],
dependent_task_list=[
pickle_t_air_task, intermediate_building_vector_task],
task_name='calculate energy savings task')
# pickle WBGT
wbgt_stats_pickle_path = os.path.join(
intermediate_dir, 'wbgt_stats.pickle')
_ = task_graph.add_task(
func=pickle_zonal_stats,
args=(
intermediate_aoi_vector_path,
wbgt_raster_path, wbgt_stats_pickle_path),
target_path_list=[wbgt_stats_pickle_path],
dependent_task_list=[
wbgt_task, intermediate_uhi_result_vector_task],
task_name='pickle WBgt stats')
# pickle light loss
light_loss_stats_pickle_path = os.path.join(
intermediate_dir, 'light_loss_stats.pickle')
_ = task_graph.add_task(
func=pickle_zonal_stats,
args=(
intermediate_aoi_vector_path,
loss_task_path_map['light'][1], light_loss_stats_pickle_path),
target_path_list=[light_loss_stats_pickle_path],
dependent_task_list=[
loss_task_path_map['light'][0],
intermediate_uhi_result_vector_task],
task_name='pickle light_loss stats')
heavy_loss_stats_pickle_path = os.path.join(
intermediate_dir, 'heavy_loss_stats.pickle')
_ = task_graph.add_task(
func=pickle_zonal_stats,
args=(
intermediate_aoi_vector_path,
loss_task_path_map['heavy'][1], heavy_loss_stats_pickle_path),
target_path_list=[heavy_loss_stats_pickle_path],
dependent_task_list=[
loss_task_path_map['heavy'][0],
intermediate_uhi_result_vector_task],
task_name='pickle heavy_loss stats')
# final reporting can't be done until everything else is complete so
# stop here
task_graph.join()
target_uhi_vector_path = os.path.join(
args['workspace_dir'], 'uhi_results%s.shp' % file_suffix)
_ = task_graph.add_task(
func=calculate_uhi_result_vector,
args=(
intermediate_aoi_vector_path,
t_ref_raw, t_air_aoi_stats_pickle_path,
cc_aoi_stats_pickle_path,
wbgt_stats_pickle_path,
light_loss_stats_pickle_path,
heavy_loss_stats_pickle_path,
energy_consumption_vector_path,
target_uhi_vector_path),
target_path_list=[target_uhi_vector_path],
task_name='calculate uhi results')
task_graph.close()
task_graph.join()
LOGGER.info('Urban Cooling Model complete.')
def calculate_uhi_result_vector(
base_aoi_path, t_ref_val, t_air_stats_pickle_path,
cc_stats_pickle_path,
wbgt_stats_pickle_path,
light_loss_stats_pickle_path,
heavy_loss_stats_pickle_path,
energy_consumption_vector_path, target_uhi_vector_path):
"""Summarize UHI results.
Output vector will have fields with attributes summarizing:
* average cc value
* average temperature value
* average temperature anomaly
* avoided energy consumption
Args:
base_aoi_path (str): path to AOI vector.
t_ref_val (float): reference temperature.
wbgt_stats_pickle_path (str): path to pickled zonal stats for wbgt.
Can be None if no valuation occurred.
light_loss_stats_pickle_path (str): path to pickled zonal stats for
light work loss. Can be None if no valuation occurred.
heavy_loss_stats_pickle_path (str): path to pickled zonal stats for
heavy work loss. Can be None if no valuation occurred.
energy_consumption_vector_path (str): path to vector that contains
building footprints with the field 'energy_sav'. Can be None
if no valuation occurred.
target_uhi_vector_path (str): path to UHI vector created for result.
Will contain the fields:
* avg_cc
* avg_tmp_an
* avd_eng_cn
* average WBGT
* average light loss work
* average heavy loss work
Returns:
None.
"""
LOGGER.info(
"Calculate UHI summary results %s", os.path.basename(
target_uhi_vector_path))
LOGGER.info("Loading t_air_stats")
with open(t_air_stats_pickle_path, 'rb') as t_air_stats_pickle_file:
t_air_stats = pickle.load(t_air_stats_pickle_file)
LOGGER.info("Loading cc_stats")
with open(cc_stats_pickle_path, 'rb') as cc_stats_pickle_file:
cc_stats = pickle.load(cc_stats_pickle_file)
wbgt_stats = None
if wbgt_stats_pickle_path:
LOGGER.info("Loading wbgt_stats")
with open(wbgt_stats_pickle_path, 'rb') as wbgt_stats_pickle_file:
wbgt_stats = pickle.load(wbgt_stats_pickle_file)
light_loss_stats = None
if light_loss_stats_pickle_path:
LOGGER.info("Loading light_loss_stats")
with open(light_loss_stats_pickle_path, 'rb') as (
light_loss_stats_pickle_file):
light_loss_stats = pickle.load(light_loss_stats_pickle_file)
heavy_loss_stats = None
if heavy_loss_stats_pickle_path:
LOGGER.info("Loading heavy_loss_stats")
with open(heavy_loss_stats_pickle_path, 'rb') as (
heavy_loss_stats_pickle_file):
heavy_loss_stats = pickle.load(heavy_loss_stats_pickle_file)
base_aoi_vector = gdal.OpenEx(base_aoi_path, gdal.OF_VECTOR)
shapefile_driver = gdal.GetDriverByName('ESRI Shapefile')
try:
# Can't make a shapefile on top of an existing one
os.remove(target_uhi_vector_path)
except FileNotFoundError:
pass
LOGGER.info("Creating %s", os.path.basename(target_uhi_vector_path))
shapefile_driver.CreateCopy(
target_uhi_vector_path, base_aoi_vector)
base_aoi_vector = None
target_uhi_vector = gdal.OpenEx(
target_uhi_vector_path, gdal.OF_VECTOR | gdal.GA_Update)
target_uhi_layer = target_uhi_vector.GetLayer()
for field_id in [
'avg_cc', 'avg_tmp_v', 'avg_tmp_an', 'avd_eng_cn', 'avg_wbgt_v',
'avg_ltls_v', 'avg_hvls_v']:
target_uhi_layer.CreateField(ogr.FieldDefn(field_id, ogr.OFTReal))
# I don't really like having two of the same conditions (one here and one
# in the for feature in target_uhi_layer loop), but if the user has
# multiple AOI features, we shouldn't have to rebuild the buildings spatial
# index every time.
if energy_consumption_vector_path:
energy_consumption_vector = gdal.OpenEx(
energy_consumption_vector_path, gdal.OF_VECTOR)
energy_consumption_layer = energy_consumption_vector.GetLayer()
LOGGER.info('Parsing building footprint geometry')
building_shapely_polygon_lookup = dict(
(poly_feat.GetFID(),
shapely.wkb.loads(poly_feat.GetGeometryRef().ExportToWkb()))
for poly_feat in energy_consumption_layer)
LOGGER.info("Constructing building footprint spatial index")
poly_rtree_index = rtree.index.Index(
[(poly_fid, poly.bounds, None)
for poly_fid, poly in
building_shapely_polygon_lookup.items()])
target_uhi_layer.StartTransaction()
for feature in target_uhi_layer:
feature_id = feature.GetFID()
if feature_id in cc_stats and cc_stats[feature_id]['count'] > 0:
mean_cc = (
cc_stats[feature_id]['sum'] / cc_stats[feature_id]['count'])
feature.SetField('avg_cc', mean_cc)
mean_t_air = None
if feature_id in t_air_stats and t_air_stats[feature_id]['count'] > 0:
mean_t_air = (
t_air_stats[feature_id]['sum'] /
t_air_stats[feature_id]['count'])
feature.SetField('avg_tmp_v', mean_t_air)
if mean_t_air:
feature.SetField(
'avg_tmp_an', mean_t_air-t_ref_val)
if wbgt_stats and feature_id in wbgt_stats and (
wbgt_stats[feature_id]['count'] > 0):
wbgt = (
wbgt_stats[feature_id]['sum'] /
wbgt_stats[feature_id]['count'])
feature.SetField('avg_wbgt_v', wbgt)
if light_loss_stats and feature_id in light_loss_stats and (
light_loss_stats[feature_id]['count'] > 0):
light_loss = (
light_loss_stats[feature_id]['sum'] /
light_loss_stats[feature_id]['count'])
LOGGER.debug("Average light loss: %s", light_loss)
feature.SetField('avg_ltls_v', float(light_loss))
if heavy_loss_stats and feature_id in heavy_loss_stats and (
heavy_loss_stats[feature_id]['count'] > 0):
heavy_loss = (
heavy_loss_stats[feature_id]['sum'] /
heavy_loss_stats[feature_id]['count'])
LOGGER.debug("Average heavy loss: %s", heavy_loss)
feature.SetField('avg_hvls_v', float(heavy_loss))
if energy_consumption_vector_path:
aoi_geometry = feature.GetGeometryRef()
aoi_shapely_geometry = shapely.wkb.loads(
aoi_geometry.ExportToWkb())
aoi_shapely_geometry_prep = shapely.prepared.prep(
aoi_shapely_geometry)
avd_eng_cn = 0.0
for building_id in poly_rtree_index.intersection(
aoi_shapely_geometry.bounds):
if aoi_shapely_geometry_prep.intersects(
building_shapely_polygon_lookup[building_id]):
energy_consumption_value = (
energy_consumption_layer.GetFeature(
building_id).GetField('energy_sav'))
if energy_consumption_value:
# this step lets us skip values that might be in
# nodata ranges that we can't help.
avd_eng_cn += float(
energy_consumption_value)
feature.SetField('avd_eng_cn', avd_eng_cn)
target_uhi_layer.SetFeature(feature)
target_uhi_layer.CommitTransaction()
def calculate_energy_savings(
t_air_stats_pickle_path, t_ref_raw, uhi_max,
energy_consumption_table_path, base_building_vector_path,
target_building_vector_path):
"""Calculate energy savings.
Energy savings is calculated from equations 8 or 9 in the User's Guide
(depending on whether a cost has been provided in the energy consumption
table).
Args:
t_air_stats_pickle_path (str): path to t_air zonal stats indexed by
FID.
t_ref_raw (float): single value for Tref.
uhi_max (float): UHI max parameter from documentation.
energy_consumption_table_path (str): path to energy consumption table
that contains at least the columns 'type', and 'consumption'. If
the table also contains a 'cost' column, the output energy
savings field will be multiplied by the floating-point cost
provided in the 'cost' column.
base_building_vector_path (str): path to existing vector to copy for
the target vector that contains at least the field 'type'.
target_building_vector_path (str): path to target vector that
will contain the additional field 'energy_sav' calculated as
``consumption.increase(b) * ((T_(air,MAX) - T_(air,i)))``.
Return:
None.
"""
LOGGER.info(
"Calculate energy savings for %s", target_building_vector_path)
LOGGER.info("Loading t_air_stats")
with open(t_air_stats_pickle_path, 'rb') as t_air_stats_pickle_file:
t_air_stats = pickle.load(t_air_stats_pickle_file)
base_building_vector = gdal.OpenEx(
base_building_vector_path, gdal.OF_VECTOR)
shapefile_driver = gdal.GetDriverByName('ESRI Shapefile')
LOGGER.info("Creating %s", os.path.basename(target_building_vector_path))
try:
# can't make a shapefile on top of an existing one
os.remove(target_building_vector_path)
except OSError:
pass
shapefile_driver.CreateCopy(
target_building_vector_path, base_building_vector)
base_building_vector = None
target_building_vector = gdal.OpenEx(
target_building_vector_path, gdal.OF_VECTOR | gdal.GA_Update)
target_building_layer = target_building_vector.GetLayer()
target_building_layer.CreateField(
ogr.FieldDefn('energy_sav', ogr.OFTReal))
target_building_layer.CreateField(
ogr.FieldDefn('mean_t_air', ogr.OFTReal))
# Find the index of the 'type' column in a case-insensitive way.
# We can assume that the field exists because we're checking for it in
# validation as defined in ARGS_SPEC.
fieldnames = [field.GetName().lower()
for field in target_building_layer.schema]
type_field_index = fieldnames.index('type')
energy_consumption_table = utils.build_lookup_from_csv(
energy_consumption_table_path, 'type', to_lower=True,
warn_if_missing=True)
target_building_layer.StartTransaction()
last_time = time.time()
for target_index, target_feature in enumerate(target_building_layer):
last_time = _invoke_timed_callback(
last_time, lambda: LOGGER.info(
"energy savings approximately %.1f%% complete ",
100.0 * float(target_index + 1) /
target_building_layer.GetFeatureCount()),
_LOGGING_PERIOD)
feature_id = target_feature.GetFID()
t_air_mean = None
if feature_id in t_air_stats:
pixel_count = float(t_air_stats[feature_id]['count'])
if pixel_count > 0:
t_air_mean = float(
t_air_stats[feature_id]['sum'] / pixel_count)
target_feature.SetField('mean_t_air', t_air_mean)
# Building type should be an integer and has to match the building
# types in the energy consumption table.
target_type = target_feature.GetField(int(type_field_index))
if target_type not in energy_consumption_table:
target_building_layer.CommitTransaction()
target_building_layer = None
target_building_vector = None
raise ValueError(
"Encountered a building 'type' of: '%s' in "
"FID: %d in the building vector layer that has no "
"corresponding entry in the energy consumption table "
"at %s" % (
target_type, target_feature.GetFID(),
energy_consumption_table_path))
consumption_increase = float(
energy_consumption_table[target_type]['consumption'])
# Load building cost if we can, but don't adjust the value if the cost
# column is not there.
# NOTE: if the user has an empty column value but the 'cost' column
# exists, this will raise an error.
try:
building_cost = float(
energy_consumption_table[target_type]['cost'])
except KeyError:
# KeyError when cost column not present.
building_cost = 1.0
# Calculate Equation 7: Energy Savings.
# We'll only calculate energy savings if there were polygons with valid
# stats that could be aggregated from t_air_mean.
if t_air_mean:
savings = (
consumption_increase * (
t_ref_raw - t_air_mean + uhi_max) * building_cost)
target_feature.SetField('energy_sav', savings)
target_building_layer.SetFeature(target_feature)
target_building_layer.CommitTransaction()
target_building_layer.SyncToDisk()
def pickle_zonal_stats(
base_vector_path, base_raster_path, target_pickle_path):
"""Calculate Zonal Stats for a vector/raster pair and pickle result.
Args:
base_vector_path (str): path to vector file
base_raster_path (str): path to raster file to aggregate over.
target_pickle_path (str): path to desired target pickle file that will
be a pickle of the pygeoprocessing.zonal_stats function.
Returns:
None.
"""
LOGGER.info('Taking zonal statistics of %s over %s',
base_vector_path, base_raster_path)
zonal_stats = pygeoprocessing.zonal_statistics(
(base_raster_path, 1), base_vector_path,
polygons_might_overlap=True)
with open(target_pickle_path, 'wb') as pickle_file:
pickle.dump(zonal_stats, pickle_file)
def calc_t_air_nomix_op(t_ref_val, hm_array, uhi_max):
"""Calculate air temperature T_(air,i)=T_ref+(1-HM_i)*UHI_max.
Args:
t_ref_val (float): The user-defined reference air temperature in
degrees Celsius.
hm_array (numpy.ndarray): The calculated Heat Mitigation index from
equation 5 in the User's Guide.
uhi_max (float): The user-defined maximum UHI magnitude.
Returns:
A numpy array with the same dimensions as ``hm_array`` with the
calculated T_air_nomix values.
"""
result = numpy.empty(hm_array.shape, dtype=numpy.float32)
result[:] = TARGET_NODATA
valid_mask = ~numpy.isclose(hm_array, TARGET_NODATA)
result[valid_mask] = t_ref_val + (1-hm_array[valid_mask]) * uhi_max
return result
def calc_cc_op_factors(
shade_array, albedo_array, eti_array, cc_weight_shade,
cc_weight_albedo, cc_weight_eti):
"""Calculate the cooling capacity index using weighted factors.
Args:
shade_array (numpy.ndarray): array of shade index values 0..1
albedo_array (numpy.ndarray): array of albedo index values 0..1
eti_array (numpy.ndarray): array of evapotransipration index values
0..1
cc_weight_shade (float): 0..1 weight to apply to shade
cc_weight_albedo (float): 0..1 weight to apply to albedo
cc_weight_eti (float): 0..1 weight to apply to eti
Returns:
CC_i = ((cc_weight_shade * shade) +
(cc_weight_albedo * albedo) +
(cc_weight_eti * ETI))
"""
result = numpy.empty(shade_array.shape, dtype=numpy.float32)
result[:] = TARGET_NODATA
valid_mask = ~(
numpy.isclose(shade_array, TARGET_NODATA) |
numpy.isclose(albedo_array, TARGET_NODATA) |
numpy.isclose(eti_array, TARGET_NODATA))
result[valid_mask] = (
cc_weight_shade*shade_array[valid_mask] +
cc_weight_albedo*albedo_array[valid_mask] +
cc_weight_eti*eti_array[valid_mask])
return result
def calc_cc_op_intensity(intensity_array):
"""Calculate the cooling capacity index using building intensity.
Args:
intensity_array (numpy.ndarray): array of intensity values.
Returns:
A numpy array of ``1 - intensity_array``.
"""
result = numpy.empty(intensity_array.shape, dtype=numpy.float32)
result[:] = TARGET_NODATA
valid_mask = ~numpy.isclose(intensity_array, TARGET_NODATA)
result[valid_mask] = 1.0 - intensity_array[valid_mask]
return result
def calc_eti_op(
kc_array, kc_nodata, et0_array, et0_nodata, et_max, target_nodata):
"""Calculate ETI = (K_c * ET_0) / ET_max."""
result = numpy.empty(kc_array.shape, dtype=numpy.float32)
result[:] = target_nodata
valid_mask = ~(
numpy.isclose(kc_array, kc_nodata) |
numpy.isclose(et0_array, et0_nodata))
result[valid_mask] = (
kc_array[valid_mask] * et0_array[valid_mask] / et_max)
return result
def calculate_wbgt(
avg_rel_humidity, t_air_raster_path, target_vapor_pressure_path):
"""Raster calculator op to calculate wet bulb globe temperature.
Args:
avg_rel_humidity (float): number between 0-100.
t_air_raster_path (string): path to T air raster.
target_vapor_pressure_path (string): path to target vapor pressure
raster.
Returns:
WBGT_i = 0.567 * T_(air,i) + 0.393 * e_i + 3.94
where e_i:
e_i = RH/100*6.105*exp(17.27*T_air/(237.7+T_air))
"""
LOGGER.info('Calculating WBGT')
t_air_nodata = pygeoprocessing.get_raster_info(
t_air_raster_path)['nodata'][0]
def wbgt_op(avg_rel_humidity, t_air_array):
wbgt = numpy.empty(t_air_array.shape, dtype=numpy.float32)
valid_mask = ~numpy.isclose(t_air_array, t_air_nodata)
wbgt[:] = TARGET_NODATA
t_air_valid = t_air_array[valid_mask]
e_i = (
(avg_rel_humidity / 100.0) * 6.105 * numpy.exp(
17.27 * (t_air_valid / (237.7 + t_air_valid))))
wbgt[valid_mask] = 0.567 * t_air_valid + 0.393 * e_i + 3.94
return wbgt
pygeoprocessing.raster_calculator(
[(avg_rel_humidity, 'raw'), (t_air_raster_path, 1)],
wbgt_op, target_vapor_pressure_path, gdal.GDT_Float32,
TARGET_NODATA)
def flat_disk_kernel(max_distance, kernel_filepath):
"""Create a flat disk kernel.
The raster created will be a tiled GeoTiff, with 256x256 memory blocks.
Args:
max_distance (int): The distance (in pixels) of the
kernel's radius.
kernel_filepath (string): The path to the file on disk where this
kernel should be stored. If this file exists, it will be
overwritten.
Returns:
None
"""
LOGGER.info('Creating a disk kernel of distance %s at %s',
max_distance, kernel_filepath)
kernel_size = int(numpy.round(max_distance * 2 + 1))
driver = gdal.GetDriverByName('GTiff')
kernel_dataset = driver.Create(
kernel_filepath.encode('utf-8'), kernel_size, kernel_size, 1,
gdal.GDT_Byte, options=[
'BIGTIFF=IF_SAFER', 'TILED=YES', 'BLOCKXSIZE=256',
'BLOCKYSIZE=256'])
# Make some kind of geotransform and SRS. It doesn't matter what, but
# having one will make GIS libraries behave better if it's all defined
kernel_dataset.SetGeoTransform([0, 1, 0, 0, 0, -1])
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS('WGS84')
kernel_dataset.SetProjection(srs.ExportToWkt())
kernel_band = kernel_dataset.GetRasterBand(1)
kernel_band.SetNoDataValue(255)
cols_per_block, rows_per_block = kernel_band.GetBlockSize()
n_cols = kernel_dataset.RasterXSize
n_rows = kernel_dataset.RasterYSize
n_col_blocks = int(math.ceil(n_cols / float(cols_per_block)))
n_row_blocks = int(math.ceil(n_rows / float(rows_per_block)))
for row_block_index in range(n_row_blocks):
row_offset = row_block_index * rows_per_block
row_block_width = n_rows - row_offset
if row_block_width > rows_per_block:
row_block_width = rows_per_block
for col_block_index in range(n_col_blocks):
col_offset = col_block_index * cols_per_block
col_block_width = n_cols - col_offset
if col_block_width > cols_per_block:
col_block_width = cols_per_block
# Numpy creates index rasters as ints by default, which sometimes
# creates problems on 32-bit builds when we try to add Int32
# matrices to float64 matrices.
row_indices, col_indices = numpy.indices((row_block_width,
col_block_width),
dtype=numpy.float)
row_indices += numpy.float(row_offset - max_distance)
col_indices += numpy.float(col_offset - max_distance)
kernel_index_distances = numpy.hypot(
row_indices, col_indices)
kernel = kernel_index_distances < max_distance
kernel_band.WriteArray(kernel, xoff=col_offset,
yoff=row_offset)
# Need to flush the kernel's cache to disk before opening up a new Dataset
# object in interblocks()
kernel_dataset.FlushCache()
def hm_op(cc_array, green_area_sum, cc_park_array, green_area_threshold):
"""Calculate HM.
cc_array (numpy.ndarray): this is the raw cooling index mapped from
landcover values.
green_area_sum (numpy.ndarray): this is the sum of green space pixels
pixels within the user defined area for green space.
cc_park_array (numpy.ndarray): this is the exponentially decayed
cooling index due to proximity of green space.
green_area_threshold (float): a value used to determine how much
area is required to trigger a green area overwrite.
Returns:
cc_array if green area < green_area_threshold or cc_park < cc array,
otherwise cc_park array is returned.
"""
result = numpy.empty(cc_array.shape, dtype=numpy.float32)
result[:] = TARGET_NODATA
valid_mask = ~(numpy.isclose(cc_array, TARGET_NODATA) &
numpy.isclose(cc_park_array, TARGET_NODATA))
cc_mask = ((cc_array >= cc_park_array) |
(green_area_sum < green_area_threshold))
result[cc_mask & valid_mask] = cc_array[cc_mask & valid_mask]
result[~cc_mask & valid_mask] = cc_park_array[~cc_mask & valid_mask]
return result
def map_work_loss(
work_temp_threshold_array, temperature_raster_path,
work_loss_raster_path):
"""Map work loss due to temperature.
Args:
work_temp_threshold_array (list): list of 3 sorted floats indicating
the thresholds for 25, 50, and 75% work loss.
temperature_raster_path (string): path to temperature raster in the
same units as `work_temp_threshold_array`.
work_loss_raster_path (string): path to target raster that maps per
pixel work loss percent.
Returns:
None.
"""
LOGGER.info('Calculating work loss using thresholds: %s',
work_temp_threshold_array)
byte_target_nodata = 255
def classify_to_percent_op(temperature_array):
result = numpy.empty(temperature_array.shape)
result[:] = byte_target_nodata
valid_mask = ~numpy.isclose(temperature_array, TARGET_NODATA)
result[
valid_mask &
(temperature_array < work_temp_threshold_array[0])] = 0
result[
valid_mask &
(temperature_array >= work_temp_threshold_array[0]) &
(temperature_array < work_temp_threshold_array[1])] = 25
result[
valid_mask &
(temperature_array >= work_temp_threshold_array[1]) &
(temperature_array < work_temp_threshold_array[2])] = 50
result[
valid_mask &
(temperature_array >= work_temp_threshold_array[2])] = 75
return result
pygeoprocessing.raster_calculator(
[(temperature_raster_path, 1)], classify_to_percent_op,
work_loss_raster_path, gdal.GDT_Byte,
nodata_target=byte_target_nodata)
def _invoke_timed_callback(
reference_time, callback_lambda, callback_period):
"""Invoke callback if a certain amount of time has passed.
This is a convenience function to standardize update callbacks from the
module.
Args:
reference_time (float): time to base `callback_period` length from.
callback_lambda (lambda): function to invoke if difference between
current time and `reference_time` has exceeded `callback_period`.
callback_period (float): time in seconds to pass until
`callback_lambda` is invoked.
Returns:
`reference_time` if `callback_lambda` not invoked, otherwise the time
when `callback_lambda` was invoked.
"""
current_time = time.time()
if current_time - reference_time > callback_period:
callback_lambda()
return current_time
return reference_time
def convolve_2d_by_exponential(
decay_kernel_distance, signal_raster_path,
target_convolve_raster_path):
"""Convolve signal by an exponential decay of a given radius.
Args:
decay_kernel_distance (float): radius of 1/e cutoff of decay kernel
raster in pixels.
signal_rater_path (str): path to single band signal raster.
target_convolve_raster_path (str): path to convolved raster.
Returns:
None.
"""
LOGGER.info("Starting a convolution over %s with a decay "
"distance of %s", signal_raster_path, decay_kernel_distance)
temporary_working_dir = tempfile.mkdtemp(
dir=os.path.dirname(target_convolve_raster_path))
exponential_kernel_path = os.path.join(
temporary_working_dir, 'exponential_decay_kernel.tif')
utils.exponential_decay_kernel_raster(
decay_kernel_distance, exponential_kernel_path)
pygeoprocessing.convolve_2d(
(signal_raster_path, 1), (exponential_kernel_path, 1),
target_convolve_raster_path, working_dir=temporary_working_dir,
ignore_nodata=True)
shutil.rmtree(temporary_working_dir)
@validation.invest_validator
def validate(args, limit_to=None):
"""Validate args to ensure they conform to `execute`'s contract.
Args:
args (dict): dictionary of key(str)/value pairs where keys and
values are specified in `execute` docstring.
limit_to (str): (optional) if not None indicates that validation
should only occur on the args[limit_to] value. The intent that
individual key validation could be significantly less expensive
than validating the entire `args` dictionary.
Returns:
list of ([invalid key_a, invalid_keyb, ...], 'warning/error message')
tuples. Where an entry indicates that the invalid keys caused
the error message in the second part of the tuple. This should
be an empty list if validation succeeds.
"""
validation_warnings = validation.validate(
args, ARGS_SPEC['args'], ARGS_SPEC['args_with_spatial_overlap'])
invalid_keys = validation.get_invalid_keys(validation_warnings)
if ('biophysical_table_path' not in invalid_keys and
'cc_method' not in invalid_keys):
if args['cc_method'] == 'factors':
extra_biophysical_keys = ['shade', 'albedo']
else:
# args['cc_method'] must be 'intensity'.
# If args['cc_method'] isn't one of these two allowed values
# ('intensity' or 'factors'), it'll be caught by
# validation.validate due to the allowed values stated in
# ARGS_SPEC.
extra_biophysical_keys = ['building_intensity']
required_keys = (
extra_biophysical_keys +
ARGS_SPEC['args']['biophysical_table_path'][
'validation_options']['required_fields'][:])
error_msg = validation.check_csv(
args['biophysical_table_path'], required_fields=required_keys)
if error_msg:
validation_warnings.append((['biophysical_table_path'], error_msg))
return validation_warnings
| 42.013399 | 80 | 0.614436 | 7,137 | 59,575 | 4.802578 | 0.108729 | 0.012137 | 0.020131 | 0.010211 | 0.411658 | 0.308467 | 0.241481 | 0.184211 | 0.162067 | 0.135897 | 0 | 0.007285 | 0.301804 | 59,575 | 1,417 | 81 | 42.043049 | 0.816757 | 0.214838 | 0 | 0.216495 | 0 | 0 | 0.177774 | 0.014339 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017526 | false | 0.002062 | 0.018557 | 0 | 0.046392 | 0.003093 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b25d8ee513e7076b5c80b126f29c14314d5b7651 | 1,249 | py | Python | Week 2/id_195/LeetCode_94_195.py | theshaodi/algorithm004-05 | cac0cd3bb1211d50936234c08f6ece38677e55cf | [
"Apache-2.0"
] | 1 | 2019-10-12T06:48:45.000Z | 2019-10-12T06:48:45.000Z | Week 2/id_195/LeetCode_94_195.py | theshaodi/algorithm004-05 | cac0cd3bb1211d50936234c08f6ece38677e55cf | [
"Apache-2.0"
] | 1 | 2019-12-01T10:02:03.000Z | 2019-12-01T10:02:03.000Z | Week 2/id_195/LeetCode_94_195.py | theshaodi/algorithm004-05 | cac0cd3bb1211d50936234c08f6ece38677e55cf | [
"Apache-2.0"
] | null | null | null | class Solution:
# 递归调用 O(n), 注意这里使用了闭包调用递归函数
def inorderTraversal_1(self, root):
treeVal = []
def helper(self, res, root):
if root:
helper(root.left)
treeVal.append(root.val)
helper(root.right)
helper(root)
return treeVal
# 用栈进行维护,将依次访问节点左子树,同时压入栈中,当访问到最左节点时,出栈,记录节点值,然后访问出栈节点的右子树,再按照之前访问右节点的左子树 O(n)
def inorderTraversal_2(self, root):
treeVal = []
stack = []
p = root
while p or stack:
while p:
stack.append(p)
p = p.left
p = stack.pop()
treeVal.append(p.val)
p = p.right
return treeVal
# 颜色标记法,对节点进行颜色标记,未访问过的节点标记为白(0),访问过标记为黑(1), 测试时发现超时
def inorderTraversal_3(self, root):
treeVal = []
stack = [(0, root)]
while stack:
color, node = stack.pop()
if node is None: continue
if color == 0:
stack.append((0, node.left))
stack.append((1, node))
stack.append((0, node))
else:
treeVal.append(node.val)
return treeVal
if __name__ == '__main__':
testCase = [1, 2, 3]
| 28.386364 | 82 | 0.498799 | 133 | 1,249 | 4.601504 | 0.37594 | 0.071895 | 0.073529 | 0.065359 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017173 | 0.393915 | 1,249 | 43 | 83 | 29.046512 | 0.791281 | 0.123299 | 0 | 0.162162 | 0 | 0 | 0.007333 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108108 | false | 0 | 0 | 0 | 0.216216 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b25e4d8d3b1e49926e6c49d88545fa265089825a | 1,433 | py | Python | utils/backup.py | SparkleChen/MillionHero | 24f23c233192ae081903a6bab72b31dfaacce4a5 | [
"MIT"
] | 1 | 2018-01-24T03:21:12.000Z | 2018-01-24T03:21:12.000Z | utils/backup.py | 18482175910/MillionHero | 24f23c233192ae081903a6bab72b31dfaacce4a5 | [
"MIT"
] | 6 | 2021-03-18T20:18:49.000Z | 2022-03-11T23:14:47.000Z | utils/backup.py | 18482175910/MillionHero | 24f23c233192ae081903a6bab72b31dfaacce4a5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import requests
def save_question_answers_to_file(question, answers, directory=".", filename="QA.txt"):
"""
bake the question and answers
:param directory:
:param filename:
:return:
"""
with open(os.path.join(directory, filename), "at") as baker:
baker.write(";".join([question, ",".join(answers)]) + "\n")
def get_qa_list(source_file):
"""
upload all question and answer to cloud
:param period:
:return:
"""
try:
fp = open("screenshots/QA.txt", "rt")
qa_li = {}
for line in fp.readlines():
line = line.strip()
if not line:
continue
pair = line.split(";")
if len(pair) != 2:
continue
question, answers = pair[0], pair[1]
answers = answers.split(",")
qa_li[question] = answers
return qa_li
finally:
fp.close()
#可部署到自己的服务器上
def upload_to_cloud(qa_li):
"""
upload data to cloud
:param qa_li:
:return:
"""
data = []
for q, a in qa_li.items():
data.append({
"question": q,
"answers": a
})
base_url = "https://bob.36deep.com/v1/assistant/question/"
resp = requests.post(base_url, json=data, verify=False)
if resp.status_code // 100 not in (2, 3):
return False
return True
| 23.112903 | 87 | 0.533147 | 170 | 1,433 | 4.388235 | 0.488235 | 0.032172 | 0.032172 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012474 | 0.328681 | 1,433 | 61 | 88 | 23.491803 | 0.762994 | 0.154222 | 0 | 0.058824 | 0 | 0 | 0.084145 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.088235 | false | 0 | 0.058824 | 0 | 0.235294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b264b29bc0f1d6b0fa8758546f61106dbd795797 | 3,902 | py | Python | Hard Projects/Multilingual Online Translator/translator.py | ankit27kh/JetBrains-Python-Learning-Track | 2d8296f51a5ba4931b9a69cab280301eddeb318e | [
"MIT"
] | null | null | null | Hard Projects/Multilingual Online Translator/translator.py | ankit27kh/JetBrains-Python-Learning-Track | 2d8296f51a5ba4931b9a69cab280301eddeb318e | [
"MIT"
] | null | null | null | Hard Projects/Multilingual Online Translator/translator.py | ankit27kh/JetBrains-Python-Learning-Track | 2d8296f51a5ba4931b9a69cab280301eddeb318e | [
"MIT"
] | null | null | null | import sys
import requests
from bs4 import BeautifulSoup
import argparse
import lxml
arguments = argparse.ArgumentParser(description='provide from_lang to_lang word --limit')
arguments.add_argument('from_lang', type=str, help='from language')
arguments.add_argument('to_lang', type=str, help='to language type "all" for all')
arguments.add_argument('translate_word', type=str, help='word to be translated')
arguments.add_argument('--limit', type=int, help='number of examples')
args = arguments.parse_args()
from_lang = args.from_lang
to_lang = args.to_lang
translate_word = args.translate_word
if args.limit:
example_limit = args.limit
else:
example_limit = 2
language_dictionary = {1: "arabic",
2: "german",
3: "english",
4: "spanish",
5: "french",
6: "hebrew",
7: "japanese",
8: "dutch",
9: "polish",
10: "portuguese",
11: "romanian",
12: "russian",
13: "turkish"}
if from_lang not in list(language_dictionary.values()):
print(f"Sorry, the program doesn't support {from_lang}")
sys.exit()
if to_lang not in list(language_dictionary.values()) and to_lang != 'all':
print(f"Sorry, the program doesn't support {to_lang}")
sys.exit()
if to_lang == 'all':
to_langs = [language_dictionary[i] for i in range(1, 14)]
to_langs.remove(from_lang)
else:
to_langs = [to_lang]
session = requests.session()
with open(f'{translate_word}.txt', 'w', encoding='utf-8') as f:
for to_lang in to_langs:
headers = {'User-Agent': 'Mozilla/5.0'}
url = f"https://context.reverso.net/translation/{from_lang}-{to_lang}/{translate_word}"
try:
r = session.get(url, headers=headers)
if r:
soup = BeautifulSoup(r.content, 'lxml')
words = soup.find_all(class_='translation')
translations = soup.find_all(class_='example')
words = [word.text.strip() for word in words]
translations = [translation.text for translation in translations]
translations = [translation.rstrip().lstrip().split('\n') for translation in translations]
translations = [[translation[0], translation[-1].lstrip()] for translation in translations]
translations_list = []
for translation in translations:
translations_list.extend(translation)
print()
f.write('\n')
print(f"{to_lang.capitalize()} Translations:")
print(f"{to_lang.capitalize()} Translations:", file=f)
count = 0
for word in words[1:]:
print(word)
print(word, file=f)
count = count + 1
if count == example_limit:
break
count = 0
print()
f.write('\n')
print(f"{to_lang.capitalize()} Examples:")
print(f"{to_lang.capitalize()} Examples:", file=f)
for translation in translations:
print(translation[0])
print(translation[1])
print(translation[0], file=f)
print(translation[1], file=f)
count = count + 1
print()
f.write('\n')
if count == example_limit:
break
else:
if r.status_code == 404:
print(f'Sorry, unable to find {translate_word}')
except ConnectionError:
print("Something wrong with your internet connection")
| 38.254902 | 107 | 0.535623 | 423 | 3,902 | 4.815603 | 0.314421 | 0.044183 | 0.039273 | 0.068729 | 0.293569 | 0.254296 | 0.103093 | 0.066765 | 0.033382 | 0 | 0 | 0.015421 | 0.351871 | 3,902 | 101 | 108 | 38.633663 | 0.790036 | 0 | 0 | 0.230769 | 0 | 0 | 0.183496 | 0.022553 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.054945 | 0 | 0.054945 | 0.186813 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b264d9799a4fed10a9642c3a30840a7f6db3fefc | 1,792 | py | Python | ebel/web/api/ebel/v1/intact.py | e-bel/ebel | 778f135d73a62038caf9cf83f2259f19d806a3ca | [
"MIT"
] | 1 | 2022-02-24T13:15:33.000Z | 2022-02-24T13:15:33.000Z | ebel/web/api/ebel/v1/intact.py | e-bel/ebel | 778f135d73a62038caf9cf83f2259f19d806a3ca | [
"MIT"
] | 4 | 2021-09-16T07:20:22.000Z | 2022-01-14T10:27:30.000Z | ebel/web/api/ebel/v1/intact.py | e-bel/ebel | 778f135d73a62038caf9cf83f2259f19d806a3ca | [
"MIT"
] | null | null | null | """IntAct API methods."""
from sqlalchemy import or_
from flask.globals import request
from ebel.web.api import RDBMS
from ebel.manager.rdbms.models.intact import Intact
from ebel.manager.orientdb.odb_structure import intact_edges
from ebel.web.api.ebel.v1 import _get_data, _get_paginated_query_result, _get_paginated_ebel_query_result
def get_intact():
"""Get generic IntAct entry."""
return _get_data(Intact)
def get_by_uniprot():
"""Get IntAct entry by UniProt ID."""
ua = request.args.get('uniprot_accession')
if ua:
a = Intact.int_a_uniprot_id
b = Intact.int_a_uniprot_id
query = RDBMS.get_session().query(Intact).filter(or_(a == ua, b == ua))
return _get_paginated_query_result(query)
def get_ebel_relation():
"""Get IntAct related eBEL relations."""
has_ppi_ia_edge = [x for x in intact_edges if x.name == 'has_ppi_ia'][0]
conf = {x.prop_name: x.prop_name for x in has_ppi_ia_edge.props}
conf.update({
'relation_type': "@class",
'edge_id': "@rid.asString()",
'interactor_a_rid': "out.@rid.asString()",
'interactor_a_name': "out.name",
'interactor_a_namespace': "out.namespace",
'interactor_a_bel': "out.bel",
'interactor_b_rid': "in.@rid.asString()",
'interactor_b_namespace': "in.namespace",
'interactor_b_name': "in.name",
'interactor_b_bel': "in.bel",
})
sql = "SELECT "
sql += ', '.join([f"{v} as {k}" for k, v in conf.items()])
sql += " FROM has_ppi_ia"
ra = request.args
paras = {k: ra[k] for k in ra if k in conf}
if paras:
sql += " WHERE " + ' AND '.join([f'{conf[k].replace(".asString()","")} like "{v}"' for k, v in paras.items()])
return _get_paginated_ebel_query_result(sql)
| 34.461538 | 118 | 0.648438 | 264 | 1,792 | 4.136364 | 0.299242 | 0.029304 | 0.029304 | 0.025641 | 0.084249 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001403 | 0.204241 | 1,792 | 51 | 119 | 35.137255 | 0.764376 | 0.0625 | 0 | 0 | 0 | 0 | 0.23689 | 0.047619 | 0.026316 | 0 | 0 | 0 | 0 | 1 | 0.078947 | false | 0 | 0.157895 | 0 | 0.315789 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |