hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0ea3ca7f07720c63bdf96544e12c0124d92cce84 | 5,057 | py | Python | examples/algorithms/DANN.py | bnestor/wilds | 886bb245d3b9bc496df25df20555b770a43f5b48 | [
"MIT"
] | null | null | null | examples/algorithms/DANN.py | bnestor/wilds | 886bb245d3b9bc496df25df20555b770a43f5b48 | [
"MIT"
] | null | null | null | examples/algorithms/DANN.py | bnestor/wilds | 886bb245d3b9bc496df25df20555b770a43f5b48 | [
"MIT"
] | null | null | null | from typing import Dict, List
import torch
from algorithms.single_model_algorithm import SingleModelAlgorithm
from models.domain_adversarial_network import DomainAdversarialNetwork
from models.initializer import initialize_model
from optimizer import initialize_optimizer_with_model_params
from losses import initialize_loss
from utils import concat_input
class DANN(SingleModelAlgorithm):
"""
Domain-adversarial training of neural networks.
Original paper:
@inproceedings{dann,
title={Domain-Adversarial Training of Neural Networks},
author={Ganin, Ustinova, Ajakan, Germain, Larochelle, Laviolette, Marchand and Lempitsky},
booktitle={Journal of Machine Learning Research 17},
year={2016}
}
"""
def __init__(
self,
config,
d_out,
grouper,
loss,
metric,
n_train_steps,
mixed_precision,
n_domains,
group_ids_to_domains,
):
# Initialize model
featurizer, classifier = initialize_model(
config, d_out=d_out, is_featurizer=True
)
model = DomainAdversarialNetwork(featurizer, classifier, n_domains)
parameters_to_optimize: List[Dict] = model.get_parameters_with_lr(
featurizer_lr=config.dann_featurizer_lr,
classifier_lr=config.dann_classifier_lr,
discriminator_lr=config.dann_discriminator_lr,
)
self.optimizer = initialize_optimizer_with_model_params(config, parameters_to_optimize)
self.domain_loss = initialize_loss('cross_entropy', config)
# Initialize module
super().__init__(
config=config,
model=model,
grouper=grouper,
loss=loss,
metric=metric,
n_train_steps=n_train_steps,
)
self.group_ids_to_domains = group_ids_to_domains
# Algorithm hyperparameters
self.penalty_weight = config.dann_penalty_weight
# Additional logging
self.logged_fields.append("classification_loss")
self.logged_fields.append("domain_classification_loss")
def process_batch(self, batch, unlabeled_batch=None):
"""
Overrides single_model_algorithm.process_batch().
Args:
- batch (tuple of Tensors): a batch of data yielded by data loaders
- unlabeled_batch (tuple of Tensors or None): a batch of data yielded by unlabeled data loader
Output:
- results (dictionary): information about the batch
- y_true (Tensor): ground truth labels for batch
- g (Tensor): groups for batch
- metadata (Tensor): metadata for batch
- y_pred (Tensor): model output for batch
- domains_true (Tensor): true domains for batch and unlabeled batch
- domains_pred (Tensor): predicted domains for batch and unlabeled batch
- unlabeled_features (Tensor): featurizer outputs for unlabeled_batch
"""
# Forward pass
x, y_true, metadata = batch
g = self.grouper.metadata_to_group(metadata).to(self.device)
domains_true = self.group_ids_to_domains[g]
if unlabeled_batch is not None:
unlabeled_x, unlabeled_metadata = unlabeled_batch
unlabeled_domains_true = self.group_ids_to_domains[
self.grouper.metadata_to_group(unlabeled_metadata)
]
# Concatenate examples and true domains
x_cat = concat_input(x, unlabeled_x)
domains_true = torch.cat([domains_true, unlabeled_domains_true])
else:
x_cat = x
x_cat = x_cat.to(self.device)
y_true = y_true.to(self.device)
domains_true = domains_true.to(self.device)
y_pred, domains_pred = self.model(x_cat)
# Ignore the predicted labels for the unlabeled data
y_pred = y_pred[: len(y_true)]
return {
"g": g,
"metadata": metadata,
"y_true": y_true,
"y_pred": y_pred,
"domains_true": domains_true,
"domains_pred": domains_pred,
}
def objective(self, results):
classification_loss = self.loss.compute(
results["y_pred"], results["y_true"], return_dict=False
)
if self.is_training:
domain_classification_loss = self.domain_loss.compute(
results.pop("domains_pred"),
results.pop("domains_true"),
return_dict=False,
)
else:
domain_classification_loss = 0.0
# Add to results for additional logging
self.save_metric_for_logging(
results, "classification_loss", classification_loss
)
self.save_metric_for_logging(
results, "domain_classification_loss", domain_classification_loss
)
return classification_loss + domain_classification_loss * self.penalty_weight
| 36.121429 | 106 | 0.635555 | 552 | 5,057 | 5.530797 | 0.266304 | 0.064854 | 0.047167 | 0.027841 | 0.201441 | 0.10285 | 0.020963 | 0 | 0 | 0 | 0 | 0.002245 | 0.295432 | 5,057 | 139 | 107 | 36.381295 | 0.854617 | 0.25173 | 0 | 0.044944 | 0 | 0 | 0.050913 | 0.014388 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033708 | false | 0 | 0.089888 | 0 | 0.157303 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0ea425584ab12bf65f87b19ae0204776aef3a2f6 | 1,029 | py | Python | semanticparsing/wikidata/annotation_loader.py | UKPLab/emnlp2018-argmin-commonsense-knowledge | 9ce3693d67072cc0e6e0dc56bb79fa593d22e783 | [
"Apache-2.0"
] | 9 | 2018-11-14T12:52:58.000Z | 2021-07-16T08:41:36.000Z | semanticparsing/wikidata/annotation_loader.py | UKPLab/emnlp2018-argmin-commonsense-knowledge | 9ce3693d67072cc0e6e0dc56bb79fa593d22e783 | [
"Apache-2.0"
] | null | null | null | semanticparsing/wikidata/annotation_loader.py | UKPLab/emnlp2018-argmin-commonsense-knowledge | 9ce3693d67072cc0e6e0dc56bb79fa593d22e783 | [
"Apache-2.0"
] | 1 | 2019-04-17T15:31:19.000Z | 2019-04-17T15:31:19.000Z | import json
from typing import Dict, Tuple, List
def extract_indices(sent: Dict, entity_to_indices_map: Dict) -> List:
token_entity_map = {t: e['linkings'][0][0] for e in sent['entities'] for t in e['token_ids']
if len(e['linkings']) > 0 and e['drop_score'] < 0.75}
# now convert tokens to indices; set to 1 for OOV
word_indices_list = [entity_to_indices_map.get(token_entity_map[i], 1) if i in token_entity_map else 2
for i in range(len(sent['tagged']))]
return word_indices_list
def load_single_file(file_name: str, arg_ids: List, entity_to_indices_map: Dict) -> Tuple:
with open(file_name) as f:
data_annotations = json.load(f)
data_annotations = [tuple([extract_indices(el, entity_to_indices_map) for el in data_annotations[arg_id]])
for arg_id in arg_ids]
warrant0_list, warrant1_list, reason_list, claim_list = tuple(zip(*data_annotations))
return warrant0_list, warrant1_list, reason_list, claim_list
| 41.16 | 110 | 0.684159 | 162 | 1,029 | 4.061728 | 0.382716 | 0.068389 | 0.091185 | 0.109422 | 0.237082 | 0.130699 | 0.130699 | 0.130699 | 0 | 0 | 0 | 0.016109 | 0.215743 | 1,029 | 24 | 111 | 42.875 | 0.799257 | 0.045675 | 0 | 0 | 0 | 0 | 0.05 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.133333 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0ea5e5cb2c5f7c41673570cd737e85301d0ea93a | 3,709 | py | Python | protocol_builder/app.py | numaru/iofus | e71c7cd0f2d89378091e6ac6d3018146c446dca0 | [
"MIT"
] | 5 | 2017-08-05T14:09:11.000Z | 2019-04-01T15:25:23.000Z | protocol_builder/app.py | numaru/iofus | e71c7cd0f2d89378091e6ac6d3018146c446dca0 | [
"MIT"
] | null | null | null | protocol_builder/app.py | numaru/iofus | e71c7cd0f2d89378091e6ac6d3018146c446dca0 | [
"MIT"
] | null | null | null | import argparse
import glob
import os
from builder import EnumBuilder, MessageBuilder, TypeBuilder
import sys
class ProgressBar:
def __init__(self, max_=100, current=0, width=40):
self.max = max_
self.current = current
self.width = width
def print(self):
percent = self.current / self.max
activates = int(percent * self.width)
sys.stdout.write("\r[{0}{1}] {2}% {3}/{4}".format(
"+" * activates,
" " * (self.width - activates),
int(percent * 100),
self.current,
self.max
))
sys.stdout.flush()
def get_actionscript_files(path):
return list(glob.iglob(
os.path.join(path, "**", "*.as"),
recursive=True
))
def main():
print("\n".join([
r" ::::::::::: :::::::: :::::::::: ::: ::: :::::::: ",
r" :+: :+: :+: :+: :+: :+: :+: :+: ",
r" +:+ +:+ +:+ +:+ +:+ +:+ +:+ ",
r" +#+ +#+ +:+ :#::+::# +#+ +:+ +#++:++#++ ",
r" +#+ +#+ +#+ +#+ +#+ +#+ +#+ ",
r" #+# #+# #+# #+# #+# #+# #+# #+# ",
r"########### ######## ### ######## ######## ",
r" _ _ _ _ _ _ ",
r" ___ ___ ___| |_ ___ ___ ___| | | |_ _ _|_| |_| |___ ___ ",
r"| . | _| . | _| . | _| . | | | . | | | | | . | -_| _|",
r"| _|_| |___|_| |___|___|___|_| |___|___|_|_|___|___|_| ",
r"|_| ",
r"------------------------------------------------------------"
]))
current_dir = os.path.dirname(os.path.realpath(__file__))
parser = argparse.ArgumentParser(description="Build protocol for the iofus projet.")
parser.add_argument(
"-i",
dest="input_dir",
help="path to the input dir",
default=os.path.join(current_dir, "input")
)
parser.add_argument(
"-o",
dest="output_dir",
help="path to the output dir",
default=os.path.join(current_dir, "output")
)
args = parser.parse_args()
enums = get_actionscript_files(os.path.join(args.input_dir, "enums"))
enums.append(os.path.join(args.input_dir, "Metadata.as"))
print("{0} enum files found".format(len(enums)))
messages = get_actionscript_files(os.path.join(args.input_dir, "messages"))
print("{0} message files found".format(len(messages)))
types = get_actionscript_files(os.path.join(args.input_dir, "types"))
print("{0} type files found".format(len(types)))
elapsed = 0
print("\nBuilding enum files:")
progress = ProgressBar(len(enums), 0)
for elapsed in EnumBuilder.build(enums, os.path.join(args.output_dir, "denums.py")):
progress.print()
progress.current += 1
print()
print("Enum files built in {0}ms".format(round(elapsed * 1000, 1)))
print("\nBuilding message files:")
progress = ProgressBar(len(messages))
for elapsed in MessageBuilder.build(messages, os.path.join(args.output_dir, "dmessages.py")):
progress.print()
progress.current += 1
print()
print("Message files built in {0}ms".format(round(elapsed * 1000, 1)))
print("\nBuilding type files:")
progress = ProgressBar(len(types))
for elapsed in TypeBuilder.build(types, os.path.join(args.output_dir, "dtypes.py")):
progress.print()
progress.current += 1
print()
print("Type files built in {0}ms".format(round(elapsed * 1000, 1)))
if __name__ == "__main__":
main()
| 34.990566 | 97 | 0.486115 | 362 | 3,709 | 4.665746 | 0.265193 | 0.042629 | 0.019538 | 0.023683 | 0.347543 | 0.328597 | 0.274719 | 0.239195 | 0.166371 | 0.09177 | 0 | 0.01557 | 0.30736 | 3,709 | 105 | 98 | 35.32381 | 0.641884 | 0 | 0 | 0.147727 | 0 | 0.034091 | 0.324346 | 0.022378 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.056818 | 0.011364 | 0.125 | 0.193182 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0ea6ba444dab9cb234add317fe5591f4c7092ef5 | 504 | py | Python | ch09_WebCS/mech.py | Nickhool/core-python | 324d6aabff5ccadb490d228c6437a203612d93e2 | [
"MIT"
] | 1 | 2019-07-25T02:36:11.000Z | 2019-07-25T02:36:11.000Z | ch09_WebCS/mech.py | Nickhool/core-python | 324d6aabff5ccadb490d228c6437a203612d93e2 | [
"MIT"
] | null | null | null | ch09_WebCS/mech.py | Nickhool/core-python | 324d6aabff5ccadb490d228c6437a203612d93e2 | [
"MIT"
] | null | null | null | __Author__ = "noduez"
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/7/12 10:57 AM
# @File : mech.py 可以编程的 Web 浏览方式
# @Software: PyCharm
from bs4 import BeautifulSoup, SoupStrainer
from mechanicalsoup import Browser
br = Browser()
#home page
rsp = br.submit('http://us.pycon.org/2011/home')
print('\n***', rsp.geturl())
print("Confirm home page has 'Log in' link; click it")
page = rsp.read()
assert 'Log in' in page, 'Log in not in page'
rsp = br.follow_link(text_regex='Log in') | 26.526316 | 54 | 0.676587 | 81 | 504 | 4.135802 | 0.703704 | 0.059701 | 0.053731 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.040094 | 0.15873 | 504 | 19 | 55 | 26.526316 | 0.75 | 0.267857 | 0 | 0 | 0 | 0 | 0.315934 | 0 | 0 | 0 | 0 | 0 | 0.1 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0ea8cd23880f4ff984949551104c8e212cbc209d | 526 | py | Python | amnesia/modules/tag/mapper.py | silenius/amnesia | ba5e3ac79a89da599c22206ad1fd17541855f74c | [
"BSD-2-Clause"
] | 4 | 2015-05-08T10:57:56.000Z | 2021-05-17T04:32:11.000Z | amnesia/modules/tag/mapper.py | silenius/amnesia | ba5e3ac79a89da599c22206ad1fd17541855f74c | [
"BSD-2-Clause"
] | 6 | 2019-12-26T16:43:41.000Z | 2022-02-28T11:07:54.000Z | amnesia/modules/tag/mapper.py | silenius/amnesia | ba5e3ac79a89da599c22206ad1fd17541855f74c | [
"BSD-2-Clause"
] | 1 | 2019-09-23T14:08:11.000Z | 2019-09-23T14:08:11.000Z | # -*- coding: utf-8 -*-
from sqlalchemy import orm
from amnesia.db import mapper_registry
from amnesia.modules.tag import Tag
from amnesia.modules.content import Content
def includeme(config):
tables = config.registry['metadata'].tables
mapper_registry.map_imperatively(
Tag,
tables['tag'],
properties={
'contents': orm.relationship(
Content,
secondary=tables['content_tag'],
back_populates='tags'
)
}
)
| 21.04 | 48 | 0.596958 | 52 | 526 | 5.942308 | 0.538462 | 0.106796 | 0.116505 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002732 | 0.304183 | 526 | 24 | 49 | 21.916667 | 0.84153 | 0.039924 | 0 | 0 | 0 | 0 | 0.067594 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.235294 | 0 | 0.294118 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0eaf2e70b582cf8327944fd6211deee227712168 | 1,604 | py | Python | scripts/deployment.py | DhruvTheEthDeveloper/SFT-Protocol | 489c0d4cba4f3b302906c366c0e796701c753c48 | [
"Apache-2.0"
] | null | null | null | scripts/deployment.py | DhruvTheEthDeveloper/SFT-Protocol | 489c0d4cba4f3b302906c366c0e796701c753c48 | [
"Apache-2.0"
] | null | null | null | scripts/deployment.py | DhruvTheEthDeveloper/SFT-Protocol | 489c0d4cba4f3b302906c366c0e796701c753c48 | [
"Apache-2.0"
] | 1 | 2021-06-29T19:17:59.000Z | 2021-06-29T19:17:59.000Z | #!/usr/bin/python3
import itertools
from brownie import *
def main(token_contract=SecurityToken, countries=(1,2,3), ratings=(1,2)):
token, issuer, kyc = deploy_contracts(token_contract)
add_investors(countries, ratings)
return token, issuer, kyc
def deploy_contracts(token_contract=SecurityToken):
kyc = accounts[0].deploy(KYCRegistrar, [accounts[0]], 1)
issuer = accounts[0].deploy(IssuingEntity, [accounts[0]], 1)
token = accounts[0].deploy(token_contract, issuer, "Test Token", "TST", 1000000)
issuer.addToken(token, {'from': accounts[0]})
issuer.setRegistrar(kyc, False, {'from': accounts[0]})
return token, issuer, kyc
def deploy_custodian():
accounts[0].deploy(OwnedCustodian, [a[0]], 1)
IssuingEntity[0].addCustodian(OwnedCustodian[0], {'from': a[0]})
return OwnedCustodian[0]
def add_investors(countries=(1,2,3), ratings=(1,2)):
# Approves accounts[1:7] in KYCRegistrar[0], with investor ratings 1-2 and country codes 1-3
product = itertools.product(countries, ratings)
for count, country, rating in [(c, i[0], i[1]) for c, i in enumerate(product, start=1)]:
KYCRegistrar[0].addInvestor(
("investor" + str(count)).encode(),
country,
'0x000001',
rating,
9999999999,
[accounts[count]],
{'from': accounts[0]}
)
# Approves investors from country codes 1-3 in IssuingEntity[0]
IssuingEntity[0].setCountries(
countries,
[1] * len(countries),
[0] * len(countries),
{'from': accounts[0]}
)
| 32.734694 | 96 | 0.638404 | 195 | 1,604 | 5.205128 | 0.312821 | 0.08867 | 0.059113 | 0.023645 | 0.098522 | 0.098522 | 0.041379 | 0 | 0 | 0 | 0 | 0.055512 | 0.21384 | 1,604 | 48 | 97 | 33.416667 | 0.749405 | 0.105985 | 0 | 0.114286 | 0 | 0 | 0.034242 | 0 | 0 | 0 | 0.005591 | 0 | 0 | 1 | 0.114286 | false | 0 | 0.057143 | 0 | 0.257143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0eb03a5ba1791b14ad3d2b1a34c497ae57504a21 | 198 | py | Python | PEC2021B/ISGOODNM/ISGOODNM.py | Sahilkumarrr13/PEC | 60548ba2d6ba913bba6f7377dece69b8356ebe75 | [
"MIT"
] | null | null | null | PEC2021B/ISGOODNM/ISGOODNM.py | Sahilkumarrr13/PEC | 60548ba2d6ba913bba6f7377dece69b8356ebe75 | [
"MIT"
] | null | null | null | PEC2021B/ISGOODNM/ISGOODNM.py | Sahilkumarrr13/PEC | 60548ba2d6ba913bba6f7377dece69b8356ebe75 | [
"MIT"
] | null | null | null | n = int(input())
sum = 1
for i in range(2, int(n**0.5)):
if n % i == 0:
sum += i
sum += (n//i)
if sum >= n:
break
if sum == n:
print("YES")
else:
print("NO")
| 15.230769 | 31 | 0.409091 | 35 | 198 | 2.314286 | 0.514286 | 0.148148 | 0.148148 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.04065 | 0.378788 | 198 | 12 | 32 | 16.5 | 0.617886 | 0 | 0 | 0 | 0 | 0 | 0.025253 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7eb89e917ae23df5fd8eebba5562a851c48b2adc | 688 | py | Python | pyintuition/orm/application.py | trobertsca/intuition | 5ef5d03b0856f2b95a9d0b81b3831a0c12e7208e | [
"MIT"
] | null | null | null | pyintuition/orm/application.py | trobertsca/intuition | 5ef5d03b0856f2b95a9d0b81b3831a0c12e7208e | [
"MIT"
] | 1 | 2018-03-24T22:42:17.000Z | 2018-03-25T03:17:19.000Z | pyintuition/orm/application.py | trobertsca/intuition | 5ef5d03b0856f2b95a9d0b81b3831a0c12e7208e | [
"MIT"
] | null | null | null | from pyintuition import Intuition
class Application(object):
objects = []
def __init__(self, **kwargs):
self.name: str = kwargs['name']
self.desc: str = kwargs.pop('desc', None)
self.original: dict = kwargs['original']
self.variables: dict = kwargs.pop('variables', None)
self.chdbids: dict = kwargs['chdbids']['chdbid']
self.fields: dict = kwargs['fields']
self.client = kwargs['client']
self.tables = []
Application.objects.append(self)
@classmethod
def get(cls, app_id: str, client: Intuition):
schema = client.get_schema(app_id)
return cls(**schema['table'], client=client)
| 28.666667 | 60 | 0.613372 | 78 | 688 | 5.320513 | 0.435897 | 0.096386 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.244186 | 688 | 23 | 61 | 29.913043 | 0.798077 | 0 | 0 | 0 | 0 | 0 | 0.079942 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.058824 | 0 | 0.352941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7ebb41e467343b6562ab310785447c20ec3a6b52 | 1,075 | py | Python | NewRelicApiParser/REST/BrowserApplications/__init__.py | Bharat23/newrelic-api-parser | c55d508387fde33af9bdc93f16aae3cb2a2e5f13 | [
"MIT"
] | null | null | null | NewRelicApiParser/REST/BrowserApplications/__init__.py | Bharat23/newrelic-api-parser | c55d508387fde33af9bdc93f16aae3cb2a2e5f13 | [
"MIT"
] | 1 | 2021-07-30T17:32:37.000Z | 2021-07-30T17:32:37.000Z | NewRelicApiParser/REST/BrowserApplications/__init__.py | Bharat23/newrelic-api-parser | c55d508387fde33af9bdc93f16aae3cb2a2e5f13 | [
"MIT"
] | null | null | null | from NewRelicApiParser.Base import BaseNewRelic
from NewRelicApiParser.CustomExceptions import ArgumentException
class BrowserApplications(BaseNewRelic):
def __init__(self, API_KEY: str):
super().__init__(API_KEY)
def get_list(self) -> dict:
"""
fetch the browser applications for new relic
"""
url = self.BASE_URI + '/browser_applications.json'
return super().get_data(url)
def create(self, browser_application_name: str) -> dict:
"""
create a browser application
"""
try:
url = self.BASE_URI + '/browser_applications.json'
if browser_application_name is None or browser_application_name == '':
raise ArgumentException
data = {
'browser_application': {
'name': browser_application_name
}
}
return super().post_data(url, data=data)
except ArgumentException as ae:
return None
except Exception as ex:
return None | 32.575758 | 82 | 0.596279 | 105 | 1,075 | 5.857143 | 0.447619 | 0.17561 | 0.178862 | 0.045528 | 0.120325 | 0.120325 | 0.120325 | 0 | 0 | 0 | 0 | 0 | 0.325581 | 1,075 | 33 | 83 | 32.575758 | 0.848276 | 0.067907 | 0 | 0.173913 | 0 | 0 | 0.078452 | 0.054393 | 0 | 0 | 0 | 0 | 0 | 1 | 0.130435 | false | 0 | 0.086957 | 0 | 0.434783 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7ebf33ea7986707e68154e8333a3770130cd2af7 | 319 | py | Python | main.py | vikrameast/find-face | cf0a0cbb74fac699d3038ebc52eff1600aae382e | [
"MIT"
] | 1 | 2022-03-16T09:58:48.000Z | 2022-03-16T09:58:48.000Z | main.py | vikrameast/face-detection | cf0a0cbb74fac699d3038ebc52eff1600aae382e | [
"MIT"
] | null | null | null | main.py | vikrameast/face-detection | cf0a0cbb74fac699d3038ebc52eff1600aae382e | [
"MIT"
] | null | null | null | import cv2
face_cascade = cv2.CascadeClassifier('face_detector.xml')
img = cv2.imread('phelps.jpg')
faces = face_cascade.detectMultiScale(img, 1.1, 4)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
cv2.imwrite("face_detected.png", img)
print('Successfully saved')
| 22.785714 | 58 | 0.655172 | 51 | 319 | 4.019608 | 0.607843 | 0.107317 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.052632 | 0.166144 | 319 | 13 | 59 | 24.538462 | 0.718045 | 0 | 0 | 0 | 0 | 0 | 0.203279 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7ec196c1e6cdddbdeaad113c1c82475a2b9bd180 | 1,257 | py | Python | send_module/go_cqhttp/sender.py | TsinbeiTech/AutoStudyCyol | 8eb3a45f7efc78580286b436d591500607e84161 | [
"MIT"
] | null | null | null | send_module/go_cqhttp/sender.py | TsinbeiTech/AutoStudyCyol | 8eb3a45f7efc78580286b436d591500607e84161 | [
"MIT"
] | null | null | null | send_module/go_cqhttp/sender.py | TsinbeiTech/AutoStudyCyol | 8eb3a45f7efc78580286b436d591500607e84161 | [
"MIT"
] | null | null | null | import requests
sess = requests.session()
user_id = ""
access_token = ""
api_url = ""
group_id = ""
at_user = ""
def set_api_url(url):
global api_url
api_url = url
def set_access_token(token):
global access_token
access_token = token
def set_group_id(id):
global group_id
group_id = id
def set_at_user(user):
global at_user
at_user = user
def set_user_id(id):
global user_id
user_id = id
def send(title, content) -> dict:
global at_user
empty = group_id == ""
noat = at_user == ""
if empty:
resp = sess.post(url=f"{api_url}/send_private_msg?access_token={access_token}", data={
'user_id': f"{user_id}",
'message': f"{title}\n\n{content}"
})
else:
if noat:
at_user = user_id
resp = sess.post(url=f"{api_url}/send_group_msg?access_token={access_token}", data={
'group_id': f"{group_id}",
'message': f"[CQ:at,qq={at_user}]\n\n{title}\n\n{content}" if not noat else f"{title}\n\n{content}"
})
res = resp.json()
success = res['status'] == "ok"
return {
'success': success,
'message': res['msg'] if not success else '发送成功'
}
| 24.173077 | 115 | 0.571201 | 179 | 1,257 | 3.759777 | 0.22905 | 0.130758 | 0.07578 | 0.098068 | 0.208024 | 0.163447 | 0.077266 | 0.077266 | 0 | 0 | 0 | 0 | 0.287192 | 1,257 | 51 | 116 | 24.647059 | 0.751116 | 0 | 0 | 0.090909 | 0 | 0.022727 | 0.212411 | 0.119332 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136364 | false | 0 | 0.022727 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7ec6b79791ff39e0c3f80e0bac301baf5e5b53d8 | 1,389 | py | Python | analyticlab/measure/std.py | xingrongtech/analyticlab | 2827591db9b31ff38299712ed6c404ff30583f6f | [
"MIT"
] | 13 | 2018-05-11T02:45:11.000Z | 2021-07-17T22:19:04.000Z | analyticlab/measure/std.py | xingrongtech/analyticlab | 2827591db9b31ff38299712ed6c404ff30583f6f | [
"MIT"
] | null | null | null | analyticlab/measure/std.py | xingrongtech/analyticlab | 2827591db9b31ff38299712ed6c404ff30583f6f | [
"MIT"
] | 2 | 2019-10-17T11:43:11.000Z | 2019-11-27T10:54:28.000Z | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 6 10:18:22 2018
@author: xingrongtech
"""
from ..system.statformat import statFormat, getMaxDeltaDigit
from ..lookup.RangeTable import C as rC
def Bessel(item, remainOneMoreDigit=False):
'''贝塞尔公式法计算标准偏差
【参数说明】
1.item:用于计算标准偏差的样本数据。
2.remainOneMoreDigit(可选,bool):结果是否多保留一位有效数字。默认remainOneMoreDigit=False。
【返回值】
Num:标准偏差数值。'''
mean = item.mean()
dsum = sum([(ni._Num__num - mean._Num__num)**2 for ni in item._NumItem__arr])
s = (dsum / (len(item._NumItem__arr) - 1))**0.5
result = statFormat(getMaxDeltaDigit(item, mean), s)
result._Num__q = item._NumItem__q
if remainOneMoreDigit:
result.remainOneMoreDigit()
return result
def Range(item, remainOneMoreDigit=False):
'''极差法计算标准偏差
【参数说明】
1.item:用于计算标准偏差的样本数据。
2.remainOneMoreDigit(可选,bool):结果是否多保留一位有效数字。默认remainOneMoreDigit=False。
【返回值】
Num:标准偏差数值。'''
R = max(item._NumItem__arr) - min(item._NumItem__arr)
C = rC(len(item._NumItem__arr))
result = R/C
if remainOneMoreDigit:
result.remainOneMoreDigit()
return result
def CollegePhysics(item, remainOneMoreDigit=False):
'''大学物理实验中的标准偏差计算
【参数说明】
1.item:用于计算标准偏差的样本数据。
2.remainOneMoreDigit(可选,bool):结果是否多保留一位有效数字。默认remainOneMoreDigit=False。
【返回值】
Num:标准偏差数值。'''
return Bessel(item, remainOneMoreDigit)
| 28.346939 | 81 | 0.691865 | 160 | 1,389 | 5.8375 | 0.4 | 0.070664 | 0.074946 | 0.070664 | 0.437902 | 0.437902 | 0.437902 | 0.311563 | 0.311563 | 0.311563 | 0 | 0.019332 | 0.180706 | 1,389 | 48 | 82 | 28.9375 | 0.801406 | 0.342693 | 0 | 0.3 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15 | false | 0 | 0.1 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7ecc05a9303031da47c8f61a09406783a34f5a9f | 14,476 | py | Python | Fooling-LIME-SHAP/cc_experiment_shap.py | domenVres/Robust-LIME-SHAP-and-IME | 8c3853389a16938c9838db3a5d4bee71ccee79a3 | [
"Apache-2.0"
] | 2 | 2021-05-25T08:08:49.000Z | 2021-05-25T08:28:52.000Z | Fooling-LIME-SHAP/cc_experiment_shap.py | domenVres/Robust-LIME-SHAP-and-IME | 8c3853389a16938c9838db3a5d4bee71ccee79a3 | [
"Apache-2.0"
] | null | null | null | Fooling-LIME-SHAP/cc_experiment_shap.py | domenVres/Robust-LIME-SHAP-and-IME | 8c3853389a16938c9838db3a5d4bee71ccee79a3 | [
"Apache-2.0"
] | 1 | 2021-05-25T08:28:53.000Z | 2021-05-25T08:28:53.000Z | """
The SHAP experiment MAIN for Communities and Crime.
* Run the file and the CC experiments will complete
* See compas experiment file for more details on how to read results.
"""
import warnings
warnings.filterwarnings('ignore')
from adversarial_models import *
from utils import *
from get_data import *
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
import pandas as pd
import lime
import lime.lime_tabular
import shap
from copy import deepcopy
import csv
''' Function that helps to determine the number of clusters with silhouette score
X -> numpy array, data
n_clusters_list -> list of candidates for number of clusters
'''
def s_score(X, n_clusters_list):
for n_clusters in n_clusters_list:
# Create a subplot with 1 row and 2 columns
fig, ax = plt.subplots(1, 1)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Clustering
k_means = KMeans(n_clusters=n_clusters)
clusters = k_means.fit_predict(X)
# Average silhouette score
silhouette_avg = silhouette_score(X, clusters)
# Silhouette score for every instance
silhouette_values = silhouette_samples(X, clusters)
# Start with y=10, so there are spaces between clusters on graph
y_lower = 10
for i in range(n_clusters):
cluster_silhouette_values = silhouette_values[clusters == i]
cluster_silhouette_values.sort()
# Upper bound for y
y_upper = y_lower + cluster_silhouette_values.shape[0]
# Color of the cluster
color = cm.nipy_spectral(float(i) / n_clusters)
# Fill the figure
ax.fill_betweenx(np.arange(y_lower, y_upper),
0, cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax.text(-0.05, y_lower + 0.5 * cluster_silhouette_values.shape[0], str(i))
# Increase lower bound for y so next cluster is 10 above this one
y_lower = y_upper + 10
ax.set_title(f"Display of silhouette scores for {n_clusters} clusters")
ax.set_xlabel("Silhouette score")
ax.set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax.axvline(x=silhouette_avg, color="red", linestyle="--")
ax.set_yticks([]) # Clear the yaxis labels / ticks
ax.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
plt.show()
# Set up experiment parameters
params = Params("model_configurations/experiment_params.json")
np.random.seed(params.seed)
# Prepare data (use this option if you are not using treeEnsemble)
'''
X, y, cols = get_and_preprocess_cc(params)
# add unrelated columns, setup
X['unrelated_column_one'] = np.random.choice([0,1],size=X.shape[0])
X['unrelated_column_two'] = np.random.choice([0,1],size=X.shape[0])
# Split the data into train and test set
data_train, data_test, ytrain, ytest = train_test_split(X, y, test_size=0.1)'''
# If data was split before the experiment (required for treeEnsemble)
data_train = pd.read_csv("..\Data\cc_forest_train.csv")
data_test = pd.read_csv("..\Data\cc_forest_test.csv")
ytrain = data_train.pop("response")
ytest = data_test.pop("response")
# Save the data, so we can generate new samples in R
data_train["response"] = ytrain
data_train.to_csv("..\Data\cc_RBF_train.csv", index = False)
# Stops the execution of experiment so generators have time to generate data in R
input("Press enter, when rbfDataGen and treeEnsemble generated all the data.")
data_train = data_train.drop("response", axis = 1)
features = [c for c in data_train]
# Only categorical features are unrelated columns
categorical_features = ["unrelated_column_one", "unrelated_column_two"]
categorical_idcs = [features.index("unrelated_column_one"), features.index("unrelated_column_two")]
dummy_idcs = [[categorical_idcs[0]], [categorical_idcs[1]]]
# Find integer features
integer_attributes = [i for i, feature in enumerate(data_test.columns)
if (data_test[feature].dtype in ["int64", "int32", "int8", "uint64", "uint32", "uint8"] and i not in categorical_idcs)]
# Original names are the same as features as there are no one-hot encoded features
original_names = features
# Parameters for MCD-VAE
latent_dim = data_train.shape[1] // 2
original_dim = data_train.shape[1]
# Indices of sensitive and unrelated features
race_indc = features.index('racePctWhite numeric')
unrelated_indcs = features.index('unrelated_column_one')
unrelated_indcs1 = features.index('unrelated_column_two')
xtrain = data_train.values
xtest = data_test.values
# for racist model
mean_race = np.mean(xtrain[:,race_indc])
###
## The models f and psi for CC. We discriminate based on race for f and concider two RANDOMLY DRAWN features to display in psi
#
# the biased model
class racist_model_f:
# Decision rule: classify negatively if race is black
def predict(self,X):
return np.array([params.negative_outcome if x[race_indc] > mean_race else params.positive_outcome for x in X])
def predict_proba(self, X):
return one_hot_encode(self.predict(X))
def score(self, X,y):
return np.sum(self.predict(X)==y) / len(X)
# the display model with one unrelated feature
class innocuous_model_psi:
# Decision rule: classify according to randomly drawn column 'unrelated column'
def predict_proba(self, X):
return one_hot_encode(np.array([params.negative_outcome if x[unrelated_indcs] > 0 else params.positive_outcome for x in X]))
# the display model with two unrelated features
class innocuous_model_psi_two:
def predict_proba(self, X):
# Using 0.5 to make it easier to detect decision boundary on perturbation
A = np.where(X[:,unrelated_indcs] > 0.5, params.positive_outcome, params.negative_outcome)
B = np.where(X[:,unrelated_indcs1] < 0.5, params.positive_outcome, params.negative_outcome)
preds = np.logical_xor(A, B).astype(int)
return one_hot_encode(preds)
#
##
###
def experiment_main():
"""
Run through experiments for SHAP on CC using both one and two unrelated features.
* This may take some time given that we iterate through every point in the test set
* We print out the rate at which features occur in the top three features
"""
# Setup SHAP
# Choose the optimal number of clusters
candidates = [2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 16, 18, 20, 32, 64, 100]
s_score(xtrain, candidates)
n_clusters = int(input("Please enter the optimal number of clusters: "))
##############################################
# One unrelated (innocuous_model_psi is used)
##############################################
background_distribution = shap.kmeans(xtrain, n_clusters)
generator_specs = {"original_dim": original_dim, "intermediate_dim": 8, "latent_dim": latent_dim, "epochs": 100, "dropout": 0.2,\
"experiment": "CC", "feature_names": features}
# Adversarial models
adv_models = dict()
adv_models["Perturbation"] = Adversarial_Kernel_SHAP_Model(racist_model_f(), innocuous_model_psi()).train(xtrain, ytrain, feature_names=features)
adv_models["DropoutVAE"] = Adversarial_Kernel_SHAP_Model(racist_model_f(), innocuous_model_psi(), generator = "DropoutVAE", generator_specs = generator_specs).\
train(xtrain, ytrain, feature_names=features, dummy_idcs=dummy_idcs, integer_idcs=integer_attributes, n_samples=10*xtrain.shape[0])
adv_models["RBF"] = Adversarial_Kernel_SHAP_Model(racist_model_f(), innocuous_model_psi(), generator = "RBF", generator_specs = generator_specs).\
train(xtrain, ytrain, feature_names=features, dummy_idcs=dummy_idcs, integer_idcs=integer_attributes)
adv_models["Forest"] = Adversarial_Kernel_SHAP_Model(racist_model_f(), innocuous_model_psi(), generator = "Forest", generator_specs = generator_specs).\
train(xtrain, ytrain, feature_names=features, dummy_idcs=dummy_idcs, integer_idcs=integer_attributes)
for adversarial in ["Perturbation", "DropoutVAE", "RBF", "Forest"]:
adv_shap = adv_models[adversarial]
# Explainers
adv_kernel_explainers = dict()
adv_kernel_explainers["Perturbation"] = shap.KernelExplainer(adv_shap.predict, background_distribution)
adv_kernel_explainers["DropoutVAE"] = shap.KernelExplainer(adv_shap.predict, xtrain, generator="DropoutVAE", generator_specs=generator_specs,\
dummy_idcs=dummy_idcs, integer_idcs=integer_attributes, instance_multiplier=100)
adv_kernel_explainers["RBF"] = shap.KernelExplainer(adv_shap.predict, xtrain, generator="RBF", generator_specs=generator_specs,\
dummy_idcs=dummy_idcs)
adv_kernel_explainers["Forest"] = shap.KernelExplainer(adv_shap.predict, xtrain, generator="Forest", generator_specs=generator_specs,\
dummy_idcs=dummy_idcs)
adv_kernel_explainers["ForestFill"] = shap.KernelExplainer(adv_shap.predict, xtrain, generator="Forest", generator_specs=generator_specs,\
dummy_idcs=dummy_idcs)
for explainer in ["Perturbation", "DropoutVAE", "RBF", "Forest", "ForestFill"]:
adv_kernel_explainer = adv_kernel_explainers[explainer]
if explainer == "ForestFill":
explanations = adv_kernel_explainer.shap_values(xtest, fill_data=True, data_location="..\Data/cc_forest_shap.csv")
else:
explanations = adv_kernel_explainer.shap_values(xtest)
# format for display
formatted_explanations = []
for exp in explanations:
if explainer == "Perturbation":
formatted_explanations.append([(features[i], exp[i]) for i in range(len(exp))])
else:
formatted_explanations.append([(original_names[i], exp[i]) for i in range(len(exp))])
print (f"SHAP Ranks and Pct Occurances one unrelated feature, adversarial: {adversarial}, explainer: {explainer}:")
if explainer == "Perturbation":
summary = experiment_summary(formatted_explanations, features)
else:
summary = experiment_summary(formatted_explanations, original_names)
print (summary)
print ("Fidelity:",round(adv_shap.fidelity(xtest),2))
file_name = f"../Results/CCShap/ccShapSummary_adversarial_{adversarial}_explainer_{explainer}.csv"
with open(file_name, "w") as output:
w = csv.writer(output)
for key, val in summary.items():
w.writerow([key] + [pair for pair in val])
##################################################
# Two unrelated (innocuous_model_psi_two is used)
##################################################
background_distribution = shap.kmeans(xtrain, n_clusters)
generator_specs = {"original_dim": original_dim, "intermediate_dim": 8, "latent_dim": latent_dim, "epochs": 100, "dropout": 0.2,\
"experiment": "CC", "feature_names": features}
# Adversarial models
adv_models = dict()
adv_models["Perturbation"] = Adversarial_Kernel_SHAP_Model(racist_model_f(), innocuous_model_psi_two()).train(xtrain, ytrain, feature_names=features)
adv_models["DropoutVAE"] = Adversarial_Kernel_SHAP_Model(racist_model_f(), innocuous_model_psi_two(), generator = "DropoutVAE", generator_specs = generator_specs).\
train(xtrain, ytrain, feature_names=features, dummy_idcs=dummy_idcs, integer_idcs=integer_attributes, n_samples=10*xtrain.shape[0])
adv_models["RBF"] = Adversarial_Kernel_SHAP_Model(racist_model_f(), innocuous_model_psi_two(), generator = "RBF", generator_specs = generator_specs).\
train(xtrain, ytrain, feature_names=features, dummy_idcs=dummy_idcs, integer_idcs=integer_attributes)
adv_models["Forest"] = Adversarial_Kernel_SHAP_Model(racist_model_f(), innocuous_model_psi_two(), generator = "Forest", generator_specs = generator_specs).\
train(xtrain, ytrain, feature_names=features, dummy_idcs=dummy_idcs, integer_idcs=integer_attributes)
for adversarial in ["Perturbation", "DropoutVAE", "RBF", "Forest"]:
adv_shap = adv_models[adversarial]
# Explainers
adv_kernel_explainers = dict()
adv_kernel_explainers["Perturbation"] = shap.KernelExplainer(adv_shap.predict, background_distribution)
adv_kernel_explainers["DropoutVAE"] = shap.KernelExplainer(adv_shap.predict, xtrain, generator="DropoutVAE", generator_specs=generator_specs,\
dummy_idcs=dummy_idcs, integer_idcs=integer_attributes, instance_multiplier=100)
adv_kernel_explainers["RBF"] = shap.KernelExplainer(adv_shap.predict, xtrain, generator="RBF", generator_specs=generator_specs,\
dummy_idcs=dummy_idcs)
adv_kernel_explainers["Forest"] = shap.KernelExplainer(adv_shap.predict, xtrain, generator="Forest", generator_specs=generator_specs,\
dummy_idcs=dummy_idcs)
adv_kernel_explainers["ForestFill"] = shap.KernelExplainer(adv_shap.predict, xtrain, generator="Forest", generator_specs=generator_specs,\
dummy_idcs=dummy_idcs)
for explainer in ["Perturbation", "DropoutVAE", "RBF", "Forest", "ForestFill"]:
adv_kernel_explainer = adv_kernel_explainers[explainer]
if explainer == "ForestFill":
explanations = adv_kernel_explainer.shap_values(xtest, fill_data=True, data_location="..\Data/cc_forest_shap.csv")
else:
explanations = adv_kernel_explainer.shap_values(xtest)
# format for display
formatted_explanations = []
for exp in explanations:
if explainer == "Perturbation":
formatted_explanations.append([(features[i], exp[i]) for i in range(len(exp))])
else:
formatted_explanations.append([(original_names[i], exp[i]) for i in range(len(exp))])
print (f"SHAP Ranks and Pct Occurances two unrelated features, adversarial: {adversarial}, explainer: {explainer}:")
if explainer == "Perturbation":
summary = experiment_summary(formatted_explanations, features)
else:
summary = experiment_summary(formatted_explanations, original_names)
print (summary)
print ("Fidelity:",round(adv_shap.fidelity(xtest),2))
file_name = f"../Results/CCShap/ccShapSummary2_adversarial_{adversarial}_explainer_{explainer}.csv"
with open(file_name, "w") as output:
w = csv.writer(output)
for key, val in summary.items():
w.writerow([key] + [pair for pair in val])
print ('---------------------')
if __name__ == "__main__":
experiment_main()
| 44.269113 | 165 | 0.739431 | 1,977 | 14,476 | 5.189176 | 0.195245 | 0.04094 | 0.031387 | 0.03821 | 0.568964 | 0.536894 | 0.5328 | 0.526757 | 0.511941 | 0.499269 | 0 | 0.011237 | 0.139334 | 14,476 | 326 | 166 | 44.404908 | 0.812184 | 0.161509 | 0 | 0.452128 | 0 | 0 | 0.14111 | 0.031671 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037234 | false | 0 | 0.090426 | 0.021277 | 0.170213 | 0.037234 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7ecc865ec16c2002b73d85c179384040a3205dd6 | 7,303 | py | Python | handler/config/zookeeper.py | knightoning/zkdash | de1eaf1f02629f87e4a75f9709bb242fcaf56c10 | [
"Apache-2.0"
] | 748 | 2015-10-15T04:06:31.000Z | 2022-01-29T09:28:37.000Z | handler/config/zookeeper.py | knightoning/zkdash | de1eaf1f02629f87e4a75f9709bb242fcaf56c10 | [
"Apache-2.0"
] | 30 | 2015-10-18T09:56:42.000Z | 2021-02-19T05:55:23.000Z | handler/config/zookeeper.py | knightoning/zkdash | de1eaf1f02629f87e4a75f9709bb242fcaf56c10 | [
"Apache-2.0"
] | 243 | 2015-10-15T06:59:48.000Z | 2022-03-19T18:58:05.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2014,掌阅科技
All rights reserved.
摘 要: zookeeper.py
创 建 者: warship
创建日期: 2015-06-23
"""
import urllib
import operator
import json
from tornado.web import authenticated
from peewee import OperationalError
from handler.bases import CommonBaseHandler
from handler.bases import ArgsMap
from lib import route
from lib.excel import ExcelWorkBook
from model.db.zd_zookeeper import ZdZookeeper
from service import zookeeper as ZookeeperService
from conf import log
@route(r'/config/zookeeper/index', '查看')
class ZdZookeeperIndexHandler(CommonBaseHandler):
'''index, 查看
'''
args_list = [
ArgsMap('pageSize', 'page_size', default=30),
ArgsMap('pageCurrent', 'current_page', default=1),
ArgsMap('orderDirection', 'order_direction', default="asc"),
ArgsMap('orderField', 'order_field', default="id"),
]
@authenticated
def response(self):
'''index
'''
clauses = self.parse_query(ZdZookeeper)
order = getattr(ZdZookeeper, self.order_field)
records = ZdZookeeper.select().order_by(
getattr(order, self.order_direction)()
).where(reduce(operator.and_, clauses))
self.render('config/zookeeper/index.html',
action='/config/zookeeper/index',
total=records.count(),
current_page=self.current_page,
page_size=self.page_size,
records=records.paginate(self.current_page, self.page_size))
@route(r'/config/zookeeper/show', '状态查看')
class ZdZookeeperViewHandler(CommonBaseHandler):
'''index, 查看
'''
@authenticated
def response(self):
'''index
'''
zk_clusters = ZdZookeeper.select().where(
ZdZookeeper.deleted == "0")
self.render('config/zookeeper/stat.html',
zk_clusters=zk_clusters)
@route(r'/config/zookeeper/stat')
class ZdZookeeperStatHandler(CommonBaseHandler):
"""stat
"""
args_list = [
ArgsMap('host', required=True)
]
@authenticated
def response(self):
"""stat
"""
cluster_info = ZookeeperService.get_stat(self.host)
self.render('config/zookeeper/statdetail.html',
cluster_info=cluster_info)
@route(r'/config/zookeeper/search')
class ZdZookeeperSearchHandler(CommonBaseHandler):
'''search,搜索
'''
args_list = [
ArgsMap('pageSize', 'page_size', default=30),
ArgsMap('pageCurrent', 'current_page', default=1),
ArgsMap('orderDirection', 'order_direction', default="asc"),
ArgsMap('orderField', 'order_field', default="id"),
]
@authenticated
def response(self):
'''search
'''
clauses = self.parse_query(ZdZookeeper)
order = getattr(ZdZookeeper, self.order_field)
records = ZdZookeeper.select().order_by(
getattr(order, self.order_direction)()
).where(reduce(operator.and_, clauses))
self.render('config/zookeeper/datagrid.html',
total=records.count(),
current_page=self.current_page,
page_size=self.page_size,
records=records.paginate(self.current_page, self.page_size))
@route(r'/config/zookeeper/save')
class ZdZookeeperSaveHandler(CommonBaseHandler):
"""save
"""
args_list = [
ArgsMap('id', default=''),
ArgsMap('cluster_name', default=''),
ArgsMap('hosts', default=''),
ArgsMap('business', default=''),
]
@authenticated
def response(self):
'''add
'''
if self.id:
# 修改记录
tb_inst = ZdZookeeper.one(id=self.id)
else:
# 新增记录
zookeeper = ZdZookeeper.one(cluster_name=self.cluster_name, deleted='0')
# 检验集群名称是否重复
if zookeeper:
return self.ajax_popup(code=300, msg="zookeeper集群名称重复!")
else:
tb_inst = ZdZookeeper()
if self.id:
tb_inst.id = self.id
if self.cluster_name:
tb_inst.cluster_name = self.cluster_name
if self.hosts:
tb_inst.hosts = self.hosts
if self.business:
tb_inst.business = self.business
tb_inst.save()
return self.ajax_ok(forward="/config/zookeeper/index")
@route(r'/config/zookeeper/add', '新增')
class ZdZookeeperAddHandler(CommonBaseHandler):
'''add, 新增
'''
@authenticated
def response(self):
'''add
'''
return self.render('config/zookeeper/add.html',
action='config/zookeeper/save')
@route(r'/config/zookeeper/edit', '修改')
class ZdZookeeperEditHandler(CommonBaseHandler):
"""edit, 修改
"""
args_list = [
ArgsMap('info_ids', default=''),
]
def response(self):
'''edit
'''
if self.info_ids:
id_li = self.info_ids.split(',')
if len(id_li) != 1:
return self.ajax_popup(close_current=False, code=300, msg="请选择单条记录进行修改")
record = ZdZookeeper.one(id=id_li[0])
return self.render('config/zookeeper/edit.html',
action='/config/zookeeper/save',
record=record)
else:
return self.ajax_popup(close_current=False, code=300, msg="请选择某条记录进行修改")
@route(r'/config/zookeeper/delete', '删除')
class ZdZookeeperDeleteHandler(CommonBaseHandler):
"""delete, 删除
"""
args_list = [
ArgsMap('info_ids', default=''),
]
def response(self):
'''delete
'''
if not self.info_ids:
return self.ajax_popup(close_current=False, code=300, msg="请选择某条记录进行删除")
id_list = self.info_ids.split(',')
try:
del_query = ZdZookeeper.delete().where(ZdZookeeper.id << id_list)
del_query.execute()
except OperationalError as exc:
log.error("error occurred while delete zookeepers, ids: %s\n%s", id_list, str(exc))
return self.ajax_popup(close_current=False, code=300, msg="删除失败!")
return self.ajax_ok(close_current=False)
@route(r'/config/zookeeper/export', '导出')
class ZdZookeeperExportHandler(CommonBaseHandler):
"""export,导出数据到excel
"""
args_list = [
ArgsMap('info_ids', default=''),
]
def response(self):
'''导出选中数据到excel中
'''
id_li = self.info_ids.split(',')
sheet_text = ZdZookeeper.select().where(ZdZookeeper.id << id_li)
sheet_title = [
{'name': '集群名称'},
{'name': '集群配置'},
{'name': '集群业务'},
]
bind_attr = (
'cluster_name',
'hosts',
'business',
)
ewb = ExcelWorkBook()
sheet_name = ZdZookeeper._meta.db_table
ewb.add_sheet(sheet_name)
ewb.add_title(sheet_name, sheet_title)
ewb.add_text(sheet_name, sheet_text, bind=bind_attr)
filename = '{}.xls'.format(sheet_name)
filename = urllib.urlencode({'filename': filename})
self.set_header('Content-Disposition', 'attachment;{}'.format(filename))
self.finish(ewb.get_stream())
| 29.095618 | 95 | 0.591812 | 758 | 7,303 | 5.560686 | 0.259894 | 0.067616 | 0.025623 | 0.04484 | 0.387663 | 0.323132 | 0.313642 | 0.313642 | 0.313642 | 0.282325 | 0 | 0.0072 | 0.277283 | 7,303 | 250 | 96 | 29.212 | 0.791398 | 0.059154 | 0 | 0.353659 | 0 | 0 | 0.136357 | 0.067809 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054878 | false | 0 | 0.073171 | 0 | 0.280488 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7eccb6a95030a4d96c51e5bd713941b372e4bc45 | 863 | py | Python | code/count_word_lengts.py | philiptkd/squad | d0030d45829e087c0eef8896e853289ead560e6f | [
"Apache-2.0"
] | null | null | null | code/count_word_lengts.py | philiptkd/squad | d0030d45829e087c0eef8896e853289ead560e6f | [
"Apache-2.0"
] | null | null | null | code/count_word_lengts.py | philiptkd/squad | d0030d45829e087c0eef8896e853289ead560e6f | [
"Apache-2.0"
] | null | null | null | import matplotlib.pyplot as plt
def get_word_lengths(filename):
word_lens = []
with open(filename, 'r') as f:
for line in f:
word_list = line.split()
for word in word_list:
word_lens.append(len(word))
return word_lens
plt.subplot(221)
context_lens = get_word_lengths('train.context')
plt.hist(context_lens, range=(1,20))
plt.title('Context Word Lengths')
plt.subplot(222)
question_lens = get_word_lengths('train.question')
plt.hist(question_lens, range=(1,20))
plt.title('Question Word Lengths')
plt.subplot(223)
context_lens = get_word_lengths('train.context')
plt.hist(context_lens, range=(20,50))
#plt.title('Context Word Lengths')
plt.subplot(224)
question_lens = get_word_lengths('train.question')
plt.hist(question_lens, range=(20,50))
#plt.title('Question Word Lengths')
plt.show()
| 25.382353 | 50 | 0.70336 | 129 | 863 | 4.527132 | 0.294574 | 0.169521 | 0.119863 | 0.123288 | 0.671233 | 0.671233 | 0.544521 | 0.421233 | 0.421233 | 0.421233 | 0 | 0.035912 | 0.161066 | 863 | 33 | 51 | 26.151515 | 0.770718 | 0.077636 | 0 | 0.166667 | 0 | 0 | 0.121059 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.041667 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7ece9b61955f1688202b924feba0455e9a35006f | 2,435 | py | Python | converter/converter_tensorflow.py | darianfrajberg/polimidl_converter | 059e1edaaab1cf236aa679d882490b17f9491c0d | [
"MIT"
] | 1 | 2019-04-12T17:15:21.000Z | 2019-04-12T17:15:21.000Z | converter/converter_tensorflow.py | darianfrajberg/polimidl_converter | 059e1edaaab1cf236aa679d882490b17f9491c0d | [
"MIT"
] | null | null | null | converter/converter_tensorflow.py | darianfrajberg/polimidl_converter | 059e1edaaab1cf236aa679d882490b17f9491c0d | [
"MIT"
] | null | null | null | #!/usr/bin/python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import tensorflow as tf
from converter.converter import *
from converter.layer_tensorflow import *
os.environ["CUDA_VISIBLE_DEVICES"]= '-1' # use CPU
os.environ['TF_CPP_MIN_LOG_LEVEL']= '2' # INFO and WARNING messages are not printed
def convert(model_path, model_name, output_path = 'converted_models'):
converter = ConverterTensorflow(model_path, model_name, output_path)
converter.execute()
class ConverterTensorflow(Converter):
def __init__(self, model_path, model_name, output_path):
super(ConverterTensorflow, self).__init__(model_path, model_name, output_path)
self.graph = None
def load_model(self):
if not os.path.exists(self.model_path):
raise ValueError('The specified file does not exist: {}'.format(self.model_path))
graph_def = None
try:
with tf.gfile.GFile(self.model_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
except BaseException as e:
raise ValueError('Error loading the graph definition')
try:
assert graph_def is not None
tf.import_graph_def(graph_def, name='')
self.graph = tf.get_default_graph()
except BaseException as e:
raise e
def get_tensors_with_weights(tf_operation):
return [op_input for op_input in tf_operation.inputs if op_input.name.endswith('read:0')]
def parse_layers(self):
layer_factory = self.layer_factory()
layers = []
constant_operations = ['Const', 'Placeholder','Identity', 'Shape', 'Squeeze', 'Reshape']
with tf.Session() as sess:
operations = [op for op in sess.graph.get_operations() if not op.type in constant_operations]
for op in operations:
layer = layer_factory.create_layer(op)
if layer is not None:
layer.parse(op, sess)
layers.append(layer)
else:
raise ValueError('Not supported layer: {}'.format(op))
return layers
def layer_factory(self):
return LayerTensorflowFactory()
| 33.356164 | 105 | 0.633265 | 290 | 2,435 | 5.051724 | 0.386207 | 0.043003 | 0.043686 | 0.049147 | 0.113311 | 0.076451 | 0 | 0 | 0 | 0 | 0 | 0.001715 | 0.281725 | 2,435 | 72 | 106 | 33.819444 | 0.835906 | 0.027105 | 0 | 0.078431 | 0 | 0 | 0.086221 | 0 | 0 | 0 | 0 | 0 | 0.019608 | 1 | 0.117647 | false | 0 | 0.176471 | 0.039216 | 0.372549 | 0.019608 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7ecff9a6f47f5e695b0cfd7ef8279b71a169f16c | 3,179 | py | Python | kiwi/main.py | jaywayjayway/kiwi | 6f98fb7ee4e8e0b8450f4e826f74ef38e248966d | [
"Apache-2.0"
] | null | null | null | kiwi/main.py | jaywayjayway/kiwi | 6f98fb7ee4e8e0b8450f4e826f74ef38e248966d | [
"Apache-2.0"
] | null | null | null | kiwi/main.py | jaywayjayway/kiwi | 6f98fb7ee4e8e0b8450f4e826f74ef38e248966d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import os
import sys
import argparse
import logging
import manager
import defaults
import interface
import firewall
import utils as logging
LOG = logging.getLogger(__name__)
def parse_args():
p = argparse.ArgumentParser()
p.add_argument('--agent-id', '--id')
p.add_argument('--refresh-interval',
default=defaults.refresh_interval,
type=int)
p.add_argument('--reconnect-interval',
default=defaults.reconnect_interval,
type=int)
g = p.add_argument_group('API endpoints')
g.add_argument('--kube-endpoint', '-k',
default=defaults.kube_endpoint)
g.add_argument('--etcd-endpoint', '-s',
default=defaults.etcd_endpoint)
g.add_argument('--etcd-prefix', '-p',
default=defaults.etcd_prefix)
g = p.add_argument_group('Network options')
g.add_argument('--interface', '-i',
default=defaults.interface)
g.add_argument('--fwchain',
default=defaults.fwchain)
g.add_argument('--fwmark',
type=int,
default=defaults.fwmark)
g.add_argument('--cidr-range', '-r',
action='append')
g.add_argument('--no-driver', '-n',
action='store_true')
g = p.add_argument_group('Logging options')
g.add_argument('--verbose', '-v',
action='store_const',
#const=logging.INFO,
const="INFO",
dest='loglevel')
g.add_argument('--debug', '-d',
action='store_const',
#const=logging.DEBUG,
const="DEBUG",
dest='loglevel')
g.add_argument('--debug-requests',
action='store_true')
#p.set_defaults(loglevel=logging.WARN)
return p.parse_args()
def main():
args = parse_args()
#logging.basicConfig(
# level=args.loglevel,
# format='%(name)s [%(process)d] %(levelname)s %(message)s')
#if args.loglevel and not args.debug_requests:
# logging.getLogger('requests').setLevel(logging.DEBUG)
LOG.info('Starting up')
LOG.info('Kubernetes is %s', args.kube_endpoint)
LOG.info('Etcd is %s', args.etcd_endpoint)
LOG.info('Managing interface %s', args.interface)
if args.no_driver:
iface_driver = None
fw_driver = None
else:
iface_driver = interface.Interface(args.interface)
fw_driver = firewall.Firewall(fwchain=args.fwchain,
fwmark=args.fwmark)
mgr = manager.Manager(etcd_endpoint=args.etcd_endpoint,
kube_endpoint=args.kube_endpoint,
etcd_prefix=args.etcd_prefix,
iface_driver=iface_driver,
fw_driver=fw_driver,
cidr_ranges=args.cidr_range,
refresh_interval=args.refresh_interval,
id=args.agent_id)
LOG.info('My id is: %s', mgr.id)
mgr.run()
if __name__ == '__main__':
main()
| 30.27619 | 67 | 0.558666 | 342 | 3,179 | 4.994152 | 0.27193 | 0.109485 | 0.077283 | 0.022834 | 0.126464 | 0.033958 | 0 | 0 | 0 | 0 | 0 | 0 | 0.315823 | 3,179 | 104 | 68 | 30.567308 | 0.785287 | 0.094369 | 0 | 0.106667 | 0 | 0 | 0.135192 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026667 | false | 0 | 0.12 | 0 | 0.16 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7ed189dba44b07d4a35925692faa29cdb17a3b18 | 795 | py | Python | api/classifier/categorizer/tokenizer/symspell_example.py | JonathanRys/fooddata | 1872b4661aeabc8a98f1e71db2290040b825223e | [
"Apache-2.0"
] | null | null | null | api/classifier/categorizer/tokenizer/symspell_example.py | JonathanRys/fooddata | 1872b4661aeabc8a98f1e71db2290040b825223e | [
"Apache-2.0"
] | null | null | null | api/classifier/categorizer/tokenizer/symspell_example.py | JonathanRys/fooddata | 1872b4661aeabc8a98f1e71db2290040b825223e | [
"Apache-2.0"
] | null | null | null | import os
from sympound import sympound
import platform
distancefun = None
if platform.system() != "Windows":
from pyxdameraulevenshtein import damerau_levenshtein_distance
distancefun = damerau_levenshtein_distance
else:
from jellyfish import levenshtein_distance
distancefun = levenshtein_distance
ssc = sympound(distancefun=distancefun, maxDictionaryEditDistance=3)
def test():
if ssc.load_dictionary("big.txt"):
print(ssc.lookup_compound(input_string="brocoli", edit_distance_max=3))
result = distancefun("crapple", "apple")
print(result)
#ssc.save_pickle("symspell.pickle")
#ssc.load_pickle("symspell.pickle")
#print(ssc.lookup_compound(input_string="བཀྲ་ཤས་བད་ལེགས། ལ་མ་", edit_distance_max=3))
if __name__ == '__main__':
test()
| 26.5 | 89 | 0.744654 | 102 | 795 | 5.637255 | 0.5 | 0.132174 | 0.090435 | 0.076522 | 0.114783 | 0.114783 | 0 | 0 | 0 | 0 | 0 | 0.004412 | 0.144654 | 795 | 29 | 90 | 27.413793 | 0.829412 | 0.191195 | 0 | 0 | 0 | 0 | 0.064063 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.277778 | 0 | 0.333333 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7ed1ce007a5103a18cc72ac0d86bb123f9d77dfb | 3,721 | py | Python | ex2d_pointSource.py | mjjjjm/helmholtz | 8ebe8e28fc4752c4da1d97b0ab3f25ae9debc44e | [
"MIT"
] | null | null | null | ex2d_pointSource.py | mjjjjm/helmholtz | 8ebe8e28fc4752c4da1d97b0ab3f25ae9debc44e | [
"MIT"
] | null | null | null | ex2d_pointSource.py | mjjjjm/helmholtz | 8ebe8e28fc4752c4da1d97b0ab3f25ae9debc44e | [
"MIT"
] | 1 | 2020-05-27T21:26:06.000Z | 2020-05-27T21:26:06.000Z | import os, sys, time
import numpy as np
import dolfin as df
from HelmholtzSolver import *
from scipy.special import hankel1
#import matplotlib.pylab as plt
#from mpl_toolkits.mplot3d import Axes3D
#from matplotlib import cm
## =====================================================================================
## -------------------------------------------------------------------------------------
def waveSpeed(x):
return 1.0
## -------------------------------------------------------------------------------------
def damping(x):
return 0.0
## -------------------------------------------------------------------------------------
def density(x):
return 1.0
## -------------------------------------------------------------------------------------
omega = 2.0*np.pi*8.0
meshOpt = {'nXElem':100,\
'nYElem':100,\
'polynomialOrder':1,\
'stretchMesh':False,\
}
bcOpt = {'left':{'DBC':True,\
'real':df.Constant(0.0),\
'imag':df.Constant(0.0),},\
'right':{'DBC':True,\
'real':df.Constant(0.0),\
'imag':df.Constant(0.0),},\
'bottom':{'DBC':True,\
'real':df.Constant(0.0),\
'imag':df.Constant(0.0),},\
'top':{'DBC':True,\
'real':df.Constant(0.0),\
'imag':df.Constant(0.0),},\
}
sourceOpt = {'real':{'choice':'pointSource',\
'pointSourceLoc':np.array([0.5,0.5]),\
'pointSourceMag':1.0},
'imag':{'choice':'none'},\
}
WN = WaveNumber(omega,waveSpeed,damping)
materialOpt = {'waveNumber':WN.evalWaveNumber,\
'density':density,\
}
pmlOpt = {'left':True,\
'right':True,\
'bottom':True,\
'top':True,\
'exponent':2,\
'sigmaMax':5000,\
'numPmlWaveLengths':2,\
'pmlWaveNumber':omega,\
}
## Instantiate Helmoltz solver class with the options
## ==================================================
HS = HelmholtzSolver(2,meshOpt,bcOpt,sourceOpt,materialOpt,pmlOpt)
## Write numerical solution to vtk file
## ====================================
file = df.File("ex2d_pointSource_realNumericalSoln.pvd")
file << HS.uSolnReal
file = df.File("ex2d_pointSource_imagNumericalSoln.pvd")
file << HS.uSolnImag
## Plot the numerical soltion
## ==========================
'''
df.plot(HS.domains,title="domain partitioned")
df.plot(HS.uSolnReal,title="real(numerical soln)")
df.plot(HS.uSolnImag,title="imag(numerical soln)")
df.interactive()
'''
## Compare solution with exact solution u = i*H_0^1(ik|x|)/4
## =========================================================
'''
def analyticalSoln(X,Y,sourceLoc,waveNumber,returnReal=True):
Z = np.sqrt((X-sourceLoc[0])**2 + (Y-sourceLoc[1])**2)
uAnalyticalSoln = (1j/4.0)*hankel1(0,waveNumber*Z)
if returnReal:
return np.real(uAnalyticalSoln)
else:
return np.imag(uAnalyticalSoln)
x = np.linspace(0,1,501)
y = np.linspace(0,1,501)
X,Y = np.meshgrid(x,y)
sourceLoc = sourceOpt['real']['pointSourceLoc']
aSolnReal = analyticalSoln(X,Y,sourceLoc,omega)
aSolnImag = analyticalSoln(X,Y,sourceLoc,omega,returnReal = False)
fig = plt.figure(1)
fig.clf()
ax1 = fig.add_subplot(121)
plot1 = ax1.contourf(X,Y,aSolnReal,cmap=cm.coolwarm)
cbar1 = plt.colorbar(plot1)
cbar1.ax.set_ylabel('real(analytical solution)')
ax2 = fig.add_subplot(122)
plot2 = ax2.contourf(X,Y,aSolnImag,cmap=cm.coolwarm)
cbar2 = plt.colorbar(plot2)
cbar2.ax.set_ylabel('imag(analytical solution)')
plt.show()
'''
| 30.252033 | 88 | 0.494222 | 382 | 3,721 | 4.787958 | 0.36911 | 0.009841 | 0.048114 | 0.052488 | 0.161837 | 0.085293 | 0.085293 | 0.085293 | 0.085293 | 0.085293 | 0 | 0.031081 | 0.204515 | 3,721 | 122 | 89 | 30.5 | 0.586824 | 0.233808 | 0 | 0.188679 | 0 | 0 | 0.182232 | 0.04328 | 0 | 0 | 0 | 0 | 0 | 1 | 0.056604 | false | 0 | 0.09434 | 0.056604 | 0.207547 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7ed2c5c2983f9a79dac07747ec18377c4b57d177 | 1,034 | py | Python | conf.py | sinoroc/kb | c3b7d88c3b6b4911f5da5414647bb421080cc5c5 | [
"CC0-1.0"
] | 3 | 2019-01-29T22:16:38.000Z | 2019-11-17T17:42:19.000Z | conf.py | sinoroc/kb | c3b7d88c3b6b4911f5da5414647bb421080cc5c5 | [
"CC0-1.0"
] | 2 | 2021-03-21T09:44:02.000Z | 2021-06-28T17:06:25.000Z | conf.py | sinoroc/kb | c3b7d88c3b6b4911f5da5414647bb421080cc5c5 | [
"CC0-1.0"
] | null | null | null | """ Sphinx documentation generator configuration
"""
AUTHOR = 'sinoroc'
MASTER_DOCUMENT = 'contents'
SUBTITLE = "Bits of knowledge"
TITLE = "Sinoroc KB"
#
# General
#
extensions = [
'sphinx.ext.graphviz',
]
master_doc = MASTER_DOCUMENT
suppress_warnings = [
'download.not_readable',
]
templates_path = [
'src/_templates',
]
#
# Project
#
project = TITLE
#
# HTML
#
html_show_copyright = False
html_show_sphinx = False
html_sidebars = {
# 'about.html' provided by 'alabaster' theme
'**': [
'about.html',
'globaltoc.html',
'searchbox.html',
],
}
html_theme_options = {
'description': SUBTITLE,
}
html_title = TITLE
html_use_modindex = False
html_use_index = False
#
# Latex
#
latex_documents = [(
MASTER_DOCUMENT,
'{}.tex'.format(TITLE.lower().replace(' ', '')),
TITLE,
AUTHOR,
'manual',
)]
latex_elements = {
'papersize': 'a4paper',
}
latex_show_pagerefs = True
latex_show_urls = 'footnote'
latex_toplevel_sectioning = 'part'
# EOF
| 12.023256 | 52 | 0.643133 | 106 | 1,034 | 6.009434 | 0.603774 | 0.065934 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001238 | 0.218569 | 1,034 | 85 | 53 | 12.164706 | 0.787129 | 0.116054 | 0 | 0 | 0 | 0 | 0.210526 | 0.023516 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7ed6b069109cb7bd51d1f1d78c3e3daa6be5a88a | 13,510 | py | Python | applications/incompressible_fluid_application/test_examples/edgebased_fixed_press.gid/run_benchmark.py | lcirrott/Kratos | 8406e73e0ad214c4f89df4e75e9b29d0eb4a47ea | [
"BSD-4-Clause"
] | 2 | 2019-10-25T09:28:10.000Z | 2019-11-21T12:51:46.000Z | applications/incompressible_fluid_application/test_examples/edgebased_fixed_press.gid/run_benchmark.py | lcirrott/Kratos | 8406e73e0ad214c4f89df4e75e9b29d0eb4a47ea | [
"BSD-4-Clause"
] | 13 | 2019-10-07T12:06:51.000Z | 2020-02-18T08:48:33.000Z | applications/incompressible_fluid_application/test_examples/edgebased_fixed_press.gid/run_benchmark.py | lcirrott/Kratos | 8406e73e0ad214c4f89df4e75e9b29d0eb4a47ea | [
"BSD-4-Clause"
] | 1 | 2020-06-12T08:51:24.000Z | 2020-06-12T08:51:24.000Z | from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7
import problem_settings
#
#
# setting the domain size for the problem to be solved
domain_size = problem_settings.domain_size
#
#
# ATTENTION: here the order is important
# including kratos path
import sys
# kratos_root/benchmarking
kratos_benchmarking_path = '../../../../benchmarking'
sys.path.append(kratos_benchmarking_path)
# importing Kratos main library
from KratosMultiphysics import *
# from now on the order is not anymore crucial
#
#
from KratosMultiphysics.IncompressibleFluidApplication import *
from KratosMultiphysics.MeshingApplication import *
import benchmarking
# defining a model part for the fluid and one for the structure
fluid_model_part = ModelPart("FluidPart")
cut_model_part = ModelPart("CutPart")
#
# importing the solvers needed
import edgebased_levelset_solver
edgebased_levelset_solver.AddVariables(fluid_model_part)
# introducing input file name
input_file_name = problem_settings.problem_name
# reading the fluid part
gid_mode = GiDPostMode.GiD_PostBinary
multifile = MultiFileFlag.MultipleFiles
deformed_mesh_flag = WriteDeformedMeshFlag.WriteUndeformed
write_conditions = WriteConditionsFlag.WriteConditions
# selecting output format
gid_io = GidIO(
input_file_name,
gid_mode,
multifile,
deformed_mesh_flag,
write_conditions)
model_part_io_fluid = ModelPartIO(input_file_name)
model_part_io_fluid.ReadModelPart(fluid_model_part)
# setting up the buffer size: SHOULD BE DONE AFTER READING!!!
fluid_model_part.SetBufferSize(2)
# adding dofs
edgebased_levelset_solver.AddDofs(fluid_model_part)
# we assume here that all of the internal nodes are marked with a negative distance
# set the distance of all of the internal nodes to a small value
small_value = 0.0001
n_active = 0
for node in fluid_model_part.Nodes:
dist = node.GetSolutionStepValue(DISTANCE)
if(dist < 0.0):
n_active = n_active + 1
node.SetSolutionStepValue(DISTANCE, 0, -small_value)
else:
node.SetSolutionStepValue(DISTANCE, 0, small_value)
if(n_active == 0):
raise "ERROR. At least one node has to be initialized with a distance lesser than 0"
# make sure that the porosity is not zero on any node (set by default to
# fluid only)
for node in fluid_model_part.Nodes:
if(node.GetSolutionStepValue(POROSITY) == 0.0):
node.SetSolutionStepValue(POROSITY, 0, 1.0)
if(node.GetSolutionStepValue(DIAMETER) == 0.0):
node.SetSolutionStepValue(DIAMETER, 0, 1.0)
# constructing the solver
body_force = Vector(3)
body_force[0] = problem_settings.body_force_x
body_force[1] = problem_settings.body_force_y
body_force[2] = problem_settings.body_force_z
if(body_force[0] == 0.0 and body_force[1] == 0.0 and body_force[2] == 0.0):
raise "ERROR. Body Force cannot be a ZERO VECTOR"
viscosity = problem_settings.viscosity
density = problem_settings.density
fluid_solver = edgebased_levelset_solver.EdgeBasedLevelSetSolver(
fluid_model_part, domain_size, body_force, viscosity, density)
fluid_solver.redistance_frequency = problem_settings.redistance_frequency
fluid_solver.extrapolation_layers = int(problem_settings.extrapolation_layers)
fluid_solver.stabdt_pressure_factor = problem_settings.stabdt_pressure_factor
fluid_solver.stabdt_convection_factor = problem_settings.stabdt_convection_factor
fluid_solver.use_mass_correction = problem_settings.use_mass_correction
fluid_solver.tau2_factor = problem_settings.tau2_factor
fluid_solver.edge_detection_angle = problem_settings.edge_detection_angle
fluid_solver.assume_constant_pressure = problem_settings.assume_constant_pressure
# 0 = None; 1 = Ergun; 2 = Custom;
fluid_solver.compute_porous_resistance_law = int(
problem_settings.compute_porous_resistance_law)
pressure_fixed = problem_settings.pressure_fixed
fix_location = problem_settings.fix_location
Z_coord_free_surface = problem_settings.Z_coord_free_surface
Z_coord_bottom = problem_settings.Z_coord_bottom
X_coord_I_O = problem_settings.X_coord_I_O
free_surface_Z_coord = problem_settings.free_surface_Z_coord
X1 = problem_settings.X1
Y1 = problem_settings.Y1
X2 = problem_settings.X2
Y2 = problem_settings.Y2
X3 = problem_settings.X3
Y3 = problem_settings.Y3
X4 = problem_settings.X4
Y4 = problem_settings.Y4
X5 = problem_settings.X5
Y5 = problem_settings.Y5
Xtol = problem_settings.Xtol
Ytol = problem_settings.Ytol
# print "compute_porous_resistance_law ", fluid_solver.compute_porous_resistance_law
if(pressure_fixed == "ON"):
node_list = []
for node in fluid_model_part.Nodes:
if(fix_location == "outlet"):
if(node.X > X_coord_I_O - 0.001):
if(node.Z < Z_coord_free_surface + 0.001):
if(node.Z > Z_coord_bottom - 0.001):
node_list.append(node)
if(fix_location == "inlet"):
if(node.X < X_coord_I_O + 0.001):
if(node.Z < Z_coord_free_surface + 0.001):
if(node.Z > Z_coord_bottom - 0.001):
node_list.append(node)
import math
for node in node_list:
Zcoord = node.Z
p = (Z_coord_free_surface - Zcoord) * (-body_force[2]) * density
node.SetSolutionStepValue(PRESSURE, 0, p)
node.Fix(PRESSURE)
fluid_solver.Initialize()
if(problem_settings.wall_law_y > 1e-10):
fluid_solver.fluid_solver.ActivateWallResistance(
problem_settings.wall_law_y)
#
def BenchmarkCheck(time, model_part):
outlet_node = model_part.Nodes[2215]
benchmarking.Output(time, "Time")
benchmarking.Output(
outlet_node.GetSolutionStepValue(DISTANCE),
"distance on node 2215 ",
None,
0.01)
print("fluid solver created")
# settings to be changed
max_Dt = problem_settings.max_time_step
initial_Dt = 0.001 * max_Dt
final_time = problem_settings.max_time
output_dt = problem_settings.output_dt
safety_factor = problem_settings.safety_factor
number_of_inital_steps = problem_settings.number_of_inital_steps
initial_time_step = problem_settings.initial_time_step
out = 0
original_max_dt = max_Dt
max_safety_factor = safety_factor
time = 0.0
step = 0
next_output_time = output_dt
if(free_surface_Z_coord == 1):
number_points_x = 1
number_points_y = 1
X_position = [X1]
Y_position = [Y1]
position_tolerance_X = Xtol
position_tolerance_Y = Ytol
if(free_surface_Z_coord == 2):
number_points_x = 2
number_points_y = 2
X_position = [X1, X2]
Y_position = [Y1, Y2]
position_tolerance_X = Xtol
position_tolerance_Y = Ytol
if(free_surface_Z_coord == 3):
number_points_x = 3
number_points_y = 3
X_position = [X1, X2, X3]
Y_position = [Y1, Y2, Y3]
position_tolerance_X = Xtol
position_tolerance_Y = Ytol
if(free_surface_Z_coord == 4):
number_points_x = 4
number_points_y = 4
X_position = [X1, X2, X3, X4]
Y_position = [Y1, Y2, Y3, Y4]
position_tolerance_X = Xtol
position_tolerance_Y = Ytol
if(free_surface_Z_coord == 5):
number_points_x = 5
number_points_y = 5
X_position = [X1, X2, X3, X4, X5]
Y_position = [Y1, Y2, Y3, Y4, Y5]
position_tolerance_X = Xtol
position_tolerance_Y = Ytol
if(free_surface_Z_coord > 0):
f = open("altura_sup_lib.txt", "w")
f.write(' Tiempo ')
i = 0
for j in range(0, (number_points_y)):
for i in range(0, (number_points_x)):
if(i == j):
if (i + j < (number_points_x + number_points_y - 2)):
f.write(
'X = ' + str(X_position[i]) + ', Y = ' + str(Y_position[j]) + ' ')
if (i + j == (number_points_x + number_points_y - 2)):
f.write(
'X = ' + str(X_position[i]) + ', Y = ' + str(Y_position[j]) + '\n')
i = i + 1
j = j + 1
f.close()
while(time < final_time):
if(step < number_of_inital_steps):
max_Dt = initial_time_step
else:
max_Dt = original_max_dt
# progressively increment the safety factor
# in the steps that follow a reduction of it
safety_factor = safety_factor * 1.2
if(safety_factor > max_safety_factor):
safety_factor = max_safety_factor
Dt = fluid_solver.EstimateTimeStep(safety_factor, max_Dt)
time = time + Dt
fluid_model_part.CloneTimeStep(time)
cut_model_part.CloneTimeStep(time)
print("******** CURRENT TIME = ", time)
if(step >= 3):
fluid_solver.Solve()
check_dt = fluid_solver.EstimateTimeStep(0.95, max_Dt)
if(check_dt < Dt):
print("***********************************************************")
print("***********************************************************")
print("***********************************************************")
print(" *** REDUCING THE TIME STEP ***")
print("***********************************************************")
print("***********************************************************")
print("***********************************************************")
# we found a velocity too large! we need to reduce the time step
# this is to set the database to the value at the beginning of the
# step
fluid_solver.fluid_solver.ReduceTimeStep(fluid_model_part, time)
safety_factor *= problem_settings.reduction_on_failure
reduced_dt = fluid_solver.EstimateTimeStep(safety_factor, max_Dt)
print("time before reduction= ", time)
time = time - Dt + reduced_dt
print("reduced time = ", time)
print("Dt = ", Dt)
print("reduced_dt = ", reduced_dt)
# this is to set the database to the value at the beginning of the
# step
fluid_solver.fluid_solver.ReduceTimeStep(fluid_model_part, time)
fluid_solver.Solve()
# plotting benchmarking data
BenchmarkCheck(time, fluid_model_part)
if(time >= next_output_time):
Cut_App = Cutting_Isosurface_Application()
Cut_App.DeleteCutData(cut_model_part)
variable = DISTANCE
isovalue = 0
tolerance = 0.000000001
Cut_App.GenerateScalarVarCut(
fluid_model_part,
cut_model_part,
variable,
isovalue,
1,
tolerance)
if(free_surface_Z_coord > 0):
f = open("altura_sup_lib.txt", "a")
f.write(str(time) + ' ')
Cut_App.UpdateCutData(cut_model_part, fluid_model_part)
i = 1
for j in range(0, (number_points_y)):
for i in range(0, (number_points_x)):
n = 0
h = 0
if(i == j):
for node in cut_model_part.Nodes:
if (node.X > (X_position[i] - position_tolerance_X) and node.X < (X_position[i] + position_tolerance_X)):
if (node.Y > (Y_position[j] - position_tolerance_Y) and node.Y < (Y_position[j] + position_tolerance_Y)):
n = n + 1
h = h + node.Z
if (i + j < (number_points_x + number_points_y - 2)):
if (n > 0):
h_tot = h / n
print("Altura de la superficie libre en X = ", X_position[i], " , Y = ", Y_position[j], " , ", h_tot)
f.write(str(h_tot) + ' ')
else:
f.write('No data available ')
if (i + j == (number_points_x + number_points_y - 2)):
if (n > 0):
h_tot = h / n
print("Altura de la superficie libre en X = ", X_position[i], " , Y = ", Y_position[j], " , ", h_tot)
f.write(str(h_tot) + ' ' + '\n')
else:
f.write('No data available ' + '\n')
i = i + 1
j = j + 1
f.close()
Cut_App.AddModelPartElements(fluid_model_part, cut_model_part, 2)
# meh to be printed
gid_io.InitializeMesh(time)
gid_io.WriteMesh((cut_model_part).GetMesh())
gid_io.FinalizeMesh()
gid_io.InitializeResults(time, (cut_model_part).GetMesh())
Cut_App.UpdateCutData(cut_model_part, fluid_model_part)
gid_io.WriteNodalResults(PRESSURE, cut_model_part.Nodes, time, 0)
gid_io.WriteNodalResults(POROSITY, cut_model_part.Nodes, time, 0)
gid_io.WriteNodalResults(VELOCITY, cut_model_part.Nodes, time, 0)
gid_io.WriteNodalResults(DISTANCE, cut_model_part.Nodes, time, 0)
gid_io.WriteNodalResults(PRESS_PROJ, cut_model_part.Nodes, time, 0)
gid_io.WriteNodalResults(LIN_DARCY_COEF, cut_model_part.Nodes, time, 0)
gid_io.WriteNodalResults(
NONLIN_DARCY_COEF,
cut_model_part.Nodes,
time,
0)
gid_io.Flush()
gid_io.FinalizeResults()
next_output_time = time + output_dt
out = 0
out = out + 1
step = step + 1
# gid_io.FinalizeResults()
| 34.28934 | 137 | 0.629978 | 1,718 | 13,510 | 4.651921 | 0.183353 | 0.082583 | 0.02978 | 0.021271 | 0.32007 | 0.286787 | 0.245496 | 0.241992 | 0.206456 | 0.162287 | 0 | 0.020862 | 0.258475 | 13,510 | 393 | 138 | 34.37659 | 0.776902 | 0.094078 | 0 | 0.223404 | 0 | 0 | 0.076974 | 0.030986 | 0 | 0 | 0 | 0 | 0 | 1 | 0.003546 | false | 0 | 0.031915 | 0 | 0.035461 | 0.056738 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7ed9b4129266e9f83be3f1bad375f5ef1c917868 | 7,365 | py | Python | fill_citi_bike_database.py | lucien-sim/citi-bike | 9b12083708d0166f7952c0136dab1ac12fa2937a | [
"MIT"
] | null | null | null | fill_citi_bike_database.py | lucien-sim/citi-bike | 9b12083708d0166f7952c0136dab1ac12fa2937a | [
"MIT"
] | null | null | null | fill_citi_bike_database.py | lucien-sim/citi-bike | 9b12083708d0166f7952c0136dab1ac12fa2937a | [
"MIT"
] | null | null | null |
# coding: utf-8
# # Initialize and fill citi_bike MySQL database
# In[3]:
import pandas as pd
import os
import pickle
data_path = './data'
# Connect to database with sqlalchemy library.
import sqlalchemy
from external_variables import sql_un,sql_pwd
database_username = sql_un
database_password = sql_pwd
database_ip = 'localhost'
conn_alchemy = sqlalchemy.create_engine('mysql+mysqlconnector://{0}:{1}@{2}'.
format(database_username, database_password,
database_ip)).connect()
# ## 1. Create the database, create a table to hold dock counts
# * DATABASE NAME: 'citi_bike'
# * TABLE NAME: 'dock_counts'
# In[4]:
# If citi_bike database already exists, use it. If not, create it, use it, create dock_counts table.
trans = conn_alchemy.begin()
try:
r2=conn_alchemy.execute("USE citi_bike")
trans.commit()
except:
r1=conn_alchemy.execute("CREATE DATABASE citi_bike")
r2=conn_alchemy.execute("USE citi_bike")
r3=conn_alchemy.execute("""
CREATE TABLE dock_counts
(
row_id INT NOT NULL AUTO_INCREMENT,
dock_id INT NOT NULL,
date_time DATETIME NOT NULL,
avail_bikes INT,
avail_docks INT,
tot_docks INT,
in_service INT,
status_key INT,
PRIMARY KEY (row_id)
);
""")
trans.commit()
# ## 2. Fill dock_counts with dock count data
# The cell below accomplishes two important tasks:
# * Add data to the dock_counts table in the citi_bike database, using pandas to_sql function. For each dock/time for which data is available, I save:
# * dock_id: ID's the CitiBike dock station
# * date_time: timestamp--derived from info in the count .csv files using the function: 'create_timestamp_citibike'
# * avail_bikes: number of bikes available at the dock station
# * avail_docks: number of docks available at the dock station
# * tot_docks: total number of docks at the dock station
# * in_service: indicates whether the dock station is in service (1 = yes)
# * status_key: not sure what this means so I saved it
# * Store information on all the docks in dock_dict. In dock_dict, keys are str(dock_id), entries are separate dicts that hold the full name, latitude, and longitude of each dock. I'll need all this information for mapping, but don't want to store it millions of times in the database.
# In[ ]:
def create_timestamp_citibike(orig_date,orig_hr,orig_min,pm):
# Year, month, day
year = int(orig_date[0:2])+2000
month = int(orig_date[3:5])
day = int(orig_date[6:8])
# Hour, minute, second
if pm == 1 and orig_hr != 12:
hour = orig_hr+12
elif pm == 0 and orig_hr == 12:
hour = 0
else:
hour = orig_hr
minute = orig_min
second = 0
return pd.Timestamp(year=year,month=month,day=day,hour=hour,minute=minute,second=second)
count_fnames = sorted(os.listdir(os.path.join(data_path,'dock_counts')))
dock_dict = {}
dtypes = {'dock_id': int, 'dock_name': str, 'date': str, 'hour': int, 'minute': int, 'pm': int,
'avail_bikes': int, 'avail_docks': int, 'tot_docks': int, '_lat': float,
'_long': float, 'in_service': int, 'status_key': int}
for fname in count_fnames:
fpathname = os.path.join(data_path,'dock_counts',fname)
# Skip 2015 -> there are issues with these files; I'll come back to them later.
if fpathname[-11:-7] != '2018':
continue
print(fpathname)
# Read dock counts from csv in chunks
for monthly_data in pd.read_csv(fpathname,sep='\t',dtype=dtypes,usecols=list(range(0,13)),chunksize=10**4):
# Create single timestamp column
monthly_data['date_time'] = [create_timestamp_citibike(row['date'],row['hour'],row['minute'],row['pm']) for _,row in monthly_data.iterrows()]
# Add any new docks to dock_dict
unique_docks = monthly_data['dock_id'].unique()
for dock in unique_docks:
if str(dock) not in dock_dict.keys():
row = monthly_data.loc[monthly_data['dock_id'] == dock].iloc[0,:]
dock_dict[str(row['dock_id'])] = {'dock_name': row['dock_name'], 'lat': row['_lat'], 'lon': row['_long']}
# Keep only needed columns
col_names = ['dock_id','date_time','avail_bikes','avail_docks','tot_docks','in_service','status_key']
monthly_data = monthly_data[col_names]
# Add to SQL database table...
try:
monthly_data.to_sql(name='dock_counts',con=conn_alchemy,if_exists='append',index=False)
except:
print('Issue adding dock_count data to database table.')
raise
# Save dock_dict after completing each file -> in case the system crashes.
from general_functions import save_pkl
save_pkl(os.path.join(data_path,'dock_dict.pkl'),dock_dict)
# ## 3. Add second table to hold info on the docks
# * Add new table dock_info to hold this data. Columns:
# * dock_id: dock ID
# * dock_name: name of dock
# * lat: latitude
# * lon: longitude
# * PRIMARY KEY: dock_id
# * Then fill the table using data from dock_dict
# * And add a FOREIGN KEY to dock_counts linking each dock_id in dock_counts to the corresponding row in the dock_info table.
# In[10]:
# Create new table to hold info on each dock_id
sql_create_docks = """CREATE TABLE dock_info
(
dock_id INT NOT NULL PRIMARY KEY,
dock_name VARCHAR(75) NOT NULL,
lat DEC(8,5) NOT NULL,
lon DEC(8,5) NOT NULL
);
"""
trans = conn_alchemy.begin()
try:
r1 = conn_alchemy.execute(sql_create_docks)
trans.commit()
except:
trans.rollback()
#raise
# Enter dock_info into new table.
from general_functions import save_pkl, load_pkl
dock_dict = load_pkl(os.path.join(data_path,'dock_dict.pkl'))
dock_ids,dock_name,dock_lat,dock_lon = [],[],[],[]
for dock_id in dock_dict.keys():
dock_ids.append(int(dock_id))
dock_name.append(dock_dict[dock_id]['dock_name'])
dock_lat.append(dock_dict[dock_id]['lat'])
dock_lon.append(dock_dict[dock_id]['lon'])
dock_info = pd.DataFrame({
'dock_id': dock_ids,
'dock_name': dock_name,
'lat': dock_lat,
'lon': dock_lon
})
try:
dock_info.to_sql(name='dock_info',con=conn_alchemy,if_exists='append',index=False)
except:
print('Issue adding dock information to dock_info database table.')
raise
# Add foreign key to dock_counts table.
sql_add_fk = """ALTER TABLE dock_counts
ADD CONSTRAINT dock_info_dock_id_fk
FOREIGN KEY (dock_id)
REFERENCES dock_info (dock_id);
"""
trans = conn_alchemy.begin()
try:
r1 = conn_alchemy.execute(sql_add_fk)
trans.commit()
except:
trans.rollback()
raise
# Close database connection.
conn_alchemy.close()
| 35.071429 | 286 | 0.620367 | 1,022 | 7,365 | 4.271037 | 0.24364 | 0.031615 | 0.016037 | 0.012829 | 0.229553 | 0.153494 | 0.11134 | 0.084307 | 0.084307 | 0.067354 | 0 | 0.011656 | 0.2778 | 7,365 | 209 | 287 | 35.239234 | 0.808987 | 0.305092 | 0 | 0.235294 | 0 | 0 | 0.326946 | 0.006717 | 0 | 0 | 0 | 0 | 0 | 1 | 0.008403 | false | 0.016807 | 0.058824 | 0 | 0.07563 | 0.02521 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7eda07cff3334840e0bbf23225b8df4ed68152c5 | 5,625 | py | Python | binarySearchTree.py | syeddabeer/0projects | e132628f3693ed40c5ea9055a6c79f8266196bae | [
"Apache-2.0"
] | null | null | null | binarySearchTree.py | syeddabeer/0projects | e132628f3693ed40c5ea9055a6c79f8266196bae | [
"Apache-2.0"
] | null | null | null | binarySearchTree.py | syeddabeer/0projects | e132628f3693ed40c5ea9055a6c79f8266196bae | [
"Apache-2.0"
] | null | null | null | # binary search tree
"""
Binary Search Tree (BST) on the other hand, is a special form of Binary Tree data structure
where each node has a comparable value, and smaller valued children attached to left and
larger valued children attached to the right.
"""
"""
O(log n) - for search, remove, add
O(n) - for iteration
"""
"""
Author: Dabeeruddin Syed
Code based on class of George Heineman
The code uses recursion a lot.
root, left, right are pointers.
"""
class BinaryNode:
# I function
def __init__(self, value):
"""Create binary node."""
self.value = value
# left and right are pointers here. Also, root is a pointer.
self.left = None
self.right = None
# IV function - when self.root.add function is called.
def add(self, val):
"""
Add a new node to the tree with value. Respond based on Set semantics, less equal or greater than.
"""
if val <= self.value:
self.left = self.addToSubTree(self.left, val)
elif val > self.value:
self.right = self.addToSubTree(self.right, val)
# V function - when self.root.add function is called.
def addToSubTree(self, parent, val):
"""Add val to parent subtree (if exists) and return root of that subtree."""
if parent is None:
return BinaryNode(val)
parent.add(val)
return parent
# VII function - when self.root.remove function is called.
# add the element with the largest value from the left sub tree
def remove(self, val):
"""
Remove val of self from BinaryTree.
"""
if val < self.value:
self.left = self.removeFromParent(self.left, val)
elif val > self.value:
self.right = self.removeFromParent(self.right, val)
else:
# when the val is equal to the search value
# find the largest value in the left subtree
if self.left is None:
return self.right
child = self.left
while child.right:
child = child.right
childKey = child.value;
self.left = self.removeFromParent(self.left, childKey)
self.value = childKey;
return self
# VIII function - finish with __contains__ function
def removeFromParent(self, parent, val):
"""Helper method for remove. Ensures proper behavior when removing node that
has children."""
if parent:
return parent.remove(val)
return None
def __repr__(self):
"""Useful debugging function to produce linear tree representation."""
leftS = ''
rightS = ''
if self.left:
leftS = str(self.left)
if self.right:
rightS = str(self.right)
return "(L:" + leftS + " " + str(self.value) + " R:" + rightS + ")"
def inorder(self):
"""In order traversal generator of tree rooted at given node."""
if self.left:
for v in self.left.inorder():
yield v
yield self.value
if self.right:
for v in self.right.inorder():
yield v
class BinaryTree:
# II function
def __init__(self):
"""Create empty binary tree."""
self.root = None
# III function
def add(self, value):
"""Insert value into proper location in Binary Tree."""
if self.root is None:
self.root = BinaryNode(value)
else:
self.root.add(value)
# VI function
def remove(self, val):
"""Remove value from tree."""
if self.root:
self.root = self.root.remove(val)
def getMin(self):
"""Returns minimum value."""
if self.root is None:
raise ValueError("Binary Tree is empty")
n = self.root
while n.left != None:
n = n.left
return n.value
def getMax(self):
"""Returns maximum value."""
if self.root is None:
raise ValueError("Binary Tree is empty")
n = self.root
while n.right != None:
n = n.right
return n.value
# IX function - finish with this function
def __contains__(self, target):
"""Check whether BST contains target value."""
node = self.root
while node:
if target < node.value :
node = node.left
elif target > node.value:
node = node.right
else:
return True
return False
def closest(self, target):
"""
Return value closest to target. If there are several, then
return one of them.
"""
if self.root is None:
return None
best = node = self.root
distance = abs(self.root.value - target)
while node:
if abs(node.value - target) < distance:
distance = abs(node.value - target)
best = node
if target < node.value:
node = node.left
elif target > node.value:
node = node.right
else:
return target
return best.value
def __iter__(self):
"""In order traversal of elements in the tree."""
if self.root:
for e in self.root.inorder():
yield e
def __repr__(self):
if self.root is None:
return "binary:()"
return "binary:" + str(self.root)
"""
Change Log:
-----------
"""
| 28.69898 | 106 | 0.548444 | 676 | 5,625 | 4.522189 | 0.236686 | 0.057573 | 0.022898 | 0.019627 | 0.216879 | 0.197252 | 0.182859 | 0.147203 | 0.147203 | 0.119725 | 0 | 0 | 0.360178 | 5,625 | 195 | 107 | 28.846154 | 0.849403 | 0.267378 | 0 | 0.376147 | 0 | 0 | 0.017246 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.146789 | false | 0 | 0 | 0 | 0.311927 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7edac394361130920e6e934f61c419b14c1b5942 | 1,044 | py | Python | terminalone/models/audiencesegment.py | amehta1/t1-python | 4f7eb0bec7671b29baf3105b8cafafb373107e7b | [
"Apache-2.0"
] | 24 | 2015-07-09T18:49:10.000Z | 2021-06-07T18:36:58.000Z | terminalone/models/audiencesegment.py | amehta1/t1-python | 4f7eb0bec7671b29baf3105b8cafafb373107e7b | [
"Apache-2.0"
] | 100 | 2015-07-13T20:24:50.000Z | 2020-08-10T11:16:39.000Z | terminalone/models/audiencesegment.py | amehta1/t1-python | 4f7eb0bec7671b29baf3105b8cafafb373107e7b | [
"Apache-2.0"
] | 36 | 2015-07-09T18:51:48.000Z | 2022-02-14T22:44:37.000Z | # -*- coding: utf-8 -*-
"""Provides audience segment object."""
from __future__ import absolute_import
from .. import t1types
from ..entity import Entity
class AudienceSegment(Entity):
"""Audience segment entity object"""
collection = 'audience_segments'
resource = 'audience_segment'
_relations = {
'audience_vendor',
'parent',
}
_pull = {
'audience_vendor_id': int,
'buyable': t1types.int_to_bool,
'child_count': int,
'code': None,
'created_on': t1types.strpt,
'full_path': None,
'id': int,
'name': None,
'parent_audience_segment_id': int,
'retail_cpm': float,
'wholesale_cpm': float,
'tag': None,
'uniques': int,
'updated_on': t1types.strpt,
'version': int,
}
_push = _pull.copy()
_push.update({
'buyable': int,
})
def __init__(self, session, properties=None, **kwargs):
super(AudienceSegment, self).__init__(session, properties, **kwargs)
| 24.857143 | 76 | 0.586207 | 104 | 1,044 | 5.557692 | 0.528846 | 0.103806 | 0.048443 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006631 | 0.277778 | 1,044 | 41 | 77 | 25.463415 | 0.759947 | 0.083333 | 0 | 0 | 0 | 0 | 0.213531 | 0.027484 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030303 | false | 0 | 0.090909 | 0 | 0.30303 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7edba9882761987ea9d46c1c13d642ec4f17d86e | 5,802 | py | Python | getsequences.py | 82ndAirborneDiv/LpSubP | a92fc4db5a15ddba24249ca37075378ad331c702 | [
"CC0-1.0"
] | 1 | 2020-02-13T21:28:15.000Z | 2020-02-13T21:28:15.000Z | getsequences.py | 82ndAirborneDiv/LpSubP | a92fc4db5a15ddba24249ca37075378ad331c702 | [
"CC0-1.0"
] | null | null | null | getsequences.py | 82ndAirborneDiv/LpSubP | a92fc4db5a15ddba24249ca37075378ad331c702 | [
"CC0-1.0"
] | 3 | 2020-10-20T15:49:50.000Z | 2020-12-21T20:52:46.000Z | import os
import subprocess
import sys
script=sys.argv[0]
base_dir=sys.argv[1]+"/prod_fasta/"
coregenome_dir=sys.argv[1]+"../coregene/"
os.chdir(base_dir)
def MakeBlastDB():
os.system("mkdir output")
for genome in os.listdir('.'):
genome=os.path.join(base_dir,genome)
base=os.path.basename(genome)
basename=base.split(".")[0]
subprocess.call(["makeblastdb","-in",genome,"-dbtype","nucl","-out","./output"+"/"+basename])
MakeBlastDB()
child_processes=[]
def RunBlast():
os.system("mkdir blastoutput")
for query in os.listdir(coregenome_dir):
if query.endswith(".fasta"):
query=os.path.join(coregenome_dir,query)
baseq=os.path.basename(query)
filename =os.path.splitext(baseq)[0]
for database in os.listdir(base_dir+"/output"):
database=os.path.join(base_dir+"/output",database)
basedb=os.path.basename(database)
print(basedb)
dbname=basedb.split(".")[0]
databasename =os.path.join(base_dir+"/output",basedb.split(".")[0])
p=subprocess.Popen(["blastn","-query",query,"-db",databasename,"-outfmt","6 qseqid sseqid pident qlen qstart qend sstart send","-out","./blastoutput"+"/"+filename+"_"+dbname+".blast"])
child_processes.append(p)
for cp in child_processes:
cp.wait()
RunBlast()
print("blast is done")
os.chdir(base_dir+"/blastoutput")
def filter():
os.system("mkdir sorted_blast_pair")
for blastresult in os.listdir('.'):
if blastresult.endswith(".blast"):
genomename=os.path.basename(blastresult)
blastresult=open(blastresult)
for line in blastresult:
try:
gene={}
line = line.split( )
qseqid=line[0]
sseqid=line[1]
pident=float(line[2])
qlength=float(line[3])
qstart=float(line[4])
qend=float(line[5])
sstart=float(line[6])
sstart=float(line[6])
send=float(line[7])
if (pident>85) & (((qend-qstart+1)/qlength)>0.75) :
gene[qseqid]=sseqid
for key in gene:
with open("./sorted_blast_pair"+"/"+key+"_"+genomename+".pair","w") as ofile:
ofile.write(key+"\t"+gene.get(key))
ofile.close
except IOError:
print("no input")
blastresult.close()
filter()
print("Filtering blast result is done")
####GetSequence#####
os.chdir(base_dir)
os.system("mkdir seqrecords")
def Parse(filename,seqs):
file = open(filename)
seqs={}
name = ''
for line in file:
line = line.rstrip()
if line.startswith('>'):
name=line.replace('>',"")
seqs[name] = ''
else:
seqs[name] = seqs[name] + line
file.close
return seqs
seqs={}
for genome in os.listdir('.'):
if genome.endswith(".fasta"):
seqs=dict(seqs,**Parse(genome,seqs))
for file in os.listdir(base_dir+'/blastoutput/sorted_blast_pair'):
genomename=file.split("_")[2]
file=open(os.path.join(base_dir+'/blastoutput/sorted_blast_pair',file))
for line in file:
genename=line.split("\t")[1]+" "
coregenename=line.split("\t")[0]
for key in seqs:
if key.find(str(genename))!= -1:
with open("./seqrecords"+"/"+coregenename+"_"+genename+"_"+genomename+".fasta","w") as ofile:
ofile.write(">"+coregenename+"_"+genename+"_"+genomename+"\n"+seqs.get(key))
ofile.close()
file.close()
print("Getting sequences are done")
os.chdir(base_dir+'/seqrecords')
os.system('mkdir pergene_seqrecords')
genelist=open(os.path.join(sys.argv[1]+"../",'new_49gene.list'))
for gene in genelist:
gene=gene.rstrip()
for seqrecord in os.listdir("."):
if seqrecord.startswith(gene):
seq=open(os.path.join(base_dir+'/seqrecords',seqrecord))
for seqline in seq:
seqline=seqline.rstrip()
with open("./pergene_seqrecords"+"/"+gene+"_"+"unaligned"+".fasta","a") as pfile:
pfile.write(seqline+"\n")
pfile.close
seq.close()
genelist.close()
print("Sequences are sorted by each locus")
#####PRESENCE/ABSENCE#####
os.chdir(base_dir)
filelist1=os.listdir(base_dir+"/blastoutput")
filelist2=os.listdir(base_dir+"/blastoutput/sorted_blast_pair")
sys.stdout=open('test','a')
for beforefile in filelist1:
if beforefile.endswith(".blast"):
base=beforefile.split(".")[0]
coregenename=base.split("_")[0]
genomename=base.split("_")[1]
if str(filelist2).find(str(beforefile))!= -1:
sys.stdout.write(coregenename+"\t"+genomename+"\t"+"yes"+"\n")
else:
sys.stdout.write(coregenename+"\t"+genomename+"\t"+"no"+"\n")
sys.stdout.close()
test=open("test")
no=open("notest",'a')
for line in test:
line=line.rstrip()
core=line.split()[0]
subject=line.split()[1]
if (line.startswith("lpg")) & (line.find("no")!= -1):
for blastresult in filelist1:
if blastresult.startswith(core+"_"+subject):
f=open(os.path.join(base_dir+"/blastoutput",blastresult))
no.write(line+"\t"+str(f.readlines()).replace("t","").replace("n","")+"\n")
no.close()
| 35.163636 | 201 | 0.544812 | 656 | 5,802 | 4.746951 | 0.22561 | 0.035967 | 0.02569 | 0.026975 | 0.154785 | 0.098266 | 0.071933 | 0.026975 | 0 | 0 | 0 | 0.010189 | 0.289555 | 5,802 | 164 | 202 | 35.378049 | 0.745269 | 0.004654 | 0 | 0.107143 | 0 | 0 | 0.127345 | 0.015636 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028571 | false | 0 | 0.021429 | 0 | 0.057143 | 0.042857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7edc2acd15bf40e4835a842803f28f5cd0ad7e8b | 2,413 | py | Python | install/copy_music.py | dwilliams-github/homepage | 7db2e063648f4210bff493787d02833a1c1d45d4 | [
"MIT"
] | null | null | null | install/copy_music.py | dwilliams-github/homepage | 7db2e063648f4210bff493787d02833a1c1d45d4 | [
"MIT"
] | 27 | 2019-11-07T03:48:33.000Z | 2022-03-25T00:46:35.000Z | install/copy_music.py | dwilliams-github/homepage | 7db2e063648f4210bff493787d02833a1c1d45d4 | [
"MIT"
] | null | null | null | import pymysql
from pymongo import MongoClient
import datetime
#
# Connect to old database (copied to local machine)
#
dbold = pymysql.connect( database="dwilliams_django", user="dwilliams", password="wierdo" )
cursor = dbold.cursor()
#
# Connect to our new mongoDB
#
client = MongoClient('mongodb+srv://webhome:oKcpOUraIdmPLyIb@cluster0-gyfof.mongodb.net/music?retryWrites=true&w=majority&ssl_cert_reqs=CERT_NONE')
db = client.home
#
# Port groups
#
groups = {}
cursor.execute( "SELECT id, name, url FROM music_group" )
for row in cursor.fetchall():
[id,name,url] = row
result = db.Group.insert_one({
'name': name,
'url': url
})
groups[id] = result.inserted_id
#
# Port directors
#
directors = {}
cursor.execute( "SELECT id, name, email, url FROM music_director" )
for row in cursor.fetchall():
[id,name,email,url] = row
result = db.Director.insert_one({
'name': name,
'email': email,
'url': url
})
directors[id] = result.inserted_id
#
# Port venues
#
venues = {}
cursor.execute( "SELECT id, name, address, url FROM music_venue" )
for row in cursor.fetchall():
[id,name,address,url] = row
result = db.Venue.insert_one({
'name': name,
'address': address,
'url': url
})
venues[id] = result.inserted_id
#
# Finally, gigs
#
# Note that pymongo doesn't support dates with times, so add a blank time to dates
#
"""
CREATE TABLE `music_gig` (
`id` int(11) NOT NULL,
`title` varchar(80) NOT NULL,
`group_id` int(11) NOT NULL,
`director_id` int(11) NOT NULL,
`venue_id` int(11) NOT NULL,
`start_date` date NOT NULL,
`end_date` date NOT NULL,
`position` varchar(60) NOT NULL,
`authors` varchar(100) NOT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
"""
cursor.execute( "SELECT title, group_id, director_id, venue_id, start_date, end_date, position, authors FROM music_gig" )
for row in cursor.fetchall():
[title,group_id,director_id,venue_id,start_date,end_date,position,authors] = row
result = db.Gig.insert_one({
'title': title,
'authors': authors,
'group': groups[group_id],
'director': directors[director_id],
'venue': venues[venue_id],
'position': position,
'start_date': datetime.datetime.combine(start_date, datetime.time.min),
'end_date': datetime.datetime.combine(end_date, datetime.time.min)
})
| 25.4 | 147 | 0.665976 | 326 | 2,413 | 4.812883 | 0.312883 | 0.040153 | 0.048439 | 0.035692 | 0.255577 | 0.130019 | 0.130019 | 0.076482 | 0.076482 | 0.076482 | 0 | 0.00879 | 0.198508 | 2,413 | 94 | 148 | 25.670213 | 0.802482 | 0.087029 | 0 | 0.285714 | 0 | 0.020408 | 0.2593 | 0.067287 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.020408 | 0.061224 | 0 | 0.061224 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7edd3d2db915c42f818487dfdd1965618bdb1112 | 10,001 | py | Python | tastyworks/models/order.py | alexonab/tastyworks_api | 05442b0534fd58f4b1707b9662ece61307a88335 | [
"Apache-2.0"
] | null | null | null | tastyworks/models/order.py | alexonab/tastyworks_api | 05442b0534fd58f4b1707b9662ece61307a88335 | [
"Apache-2.0"
] | 1 | 2020-09-09T11:30:43.000Z | 2020-09-09T11:30:43.000Z | tastyworks/models/order.py | alexonab/tastyworks_api | 05442b0534fd58f4b1707b9662ece61307a88335 | [
"Apache-2.0"
] | 2 | 2020-08-11T21:33:25.000Z | 2020-12-18T09:11:29.000Z | import logging
from datetime import datetime
from decimal import Decimal
from enum import Enum
from typing import List
from tastyworks.models.option import Option, OptionType
from tastyworks.models.underlying import UnderlyingType
import aiohttp
from dataclasses import dataclass, field
from tastyworks.models.security import Security
LOGGER = logging.getLogger(__name__)
class OrderType(Enum):
LIMIT = 'Limit'
MARKET = 'Market'
STOP_LIMIT = 'Stop Limit'
STOP = 'Stop'
class OrderPriceEffect(Enum):
CREDIT = 'Credit'
DEBIT = 'Debit'
class OrderStatus(Enum):
CONTINGENT = 'Contingent'
ROUTED = 'Routed'
RECEIVED = 'Received'
CANCELLED = 'Cancelled'
FILLED = 'Filled'
EXPIRED = 'Expired'
LIVE = 'Live'
REJECTED = 'Rejected'
CANCEL_REQUESTED = 'Cancel Requested'
IN_FLIGHT = 'In Flight'
REPLACE_REQUESTED = "Replace Requested"
def is_active(self):
return self in (OrderStatus.LIVE, OrderStatus.RECEIVED, OrderStatus.CANCEL_REQUESTED, OrderStatus.IN_FLIGHT, OrderStatus.REPLACE_REQUESTED)
class TimeInForce(Enum):
DAY = 'Day'
GTC = 'GTC'
GTD = 'GTD'
@dataclass
class OrderDetails(object):
order_id = None
ticker = None
type: OrderType = None
time_in_force: TimeInForce = TimeInForce.DAY
gtc_date: datetime = None
price: Decimal = None
fill_price: Decimal = None
stop_trigger: Decimal = None
price_effect: OrderPriceEffect = None
status: OrderStatus = None
legs: List[Security] = field(default_factory=list)
source: str = 'WBT'
def is_executable(self) -> bool:
required_data = all([
self.time_in_force,
self.type,
self.source
])
if self.type == OrderType.STOP or self.type == OrderType.MARKET:
non_stop_required_data = all([
self.price_effect,
self.price is None
])
else:
non_stop_required_data = all([
self.price_effect,
self.price is not None
])
if not required_data:
return False
if not non_stop_required_data:
return False
if not self.legs:
return False
if self.time_in_force == TimeInForce.GTD:
try:
datetime.strptime(self.gtc_date, '%Y-%m-%d')
except ValueError:
return False
return True
class Order(Security):
def __init__(self, order_details: OrderDetails):
"""
Initiates a new order object.
Args:
order_details (OrderDetails): An object specifying order-level details.
"""
self.details = order_details
def check_is_order_executable(self):
return self.details.is_executable()
def add_leg(self, security: Security):
self.details.legs.append(security)
def get_equity_leg_from_dict(self, input_dict: dict):
exp_date = datetime.strptime(input_dict['symbol'][6:12], '%y%m%d').date()
option_type = OptionType(input_dict['symbol'][12:13])
strike = Decimal(input_dict['symbol'][13:]) / 1000
return Option(ticker=self.details.ticker, quantity=input_dict['quantity'], expiry=exp_date, strike=strike, option_type=option_type, underlying_type=UnderlyingType.EQUITY)
@classmethod
def from_dict(cls, input_dict: dict):
"""
Parses an Order object from a dict.
"""
input_dict = input_dict.get('order') or input_dict
details = OrderDetails(input_dict.get('underlying-symbol')) or OrderDetails(input_dict.get('symbol'))
details.order_id = input_dict.get('id')
details.ticker = input_dict.get('underlying-symbol')
details.price = Decimal(input_dict.get('price', 0))
details.fill_price = Decimal('0')
# there have been instances where there was no price in dict.
legs = input_dict.get('legs')
if len(legs) > 0:
fills = input_dict.get('legs')[0].get('fills')
if len(fills) > 0:
details.fill_price = Decimal(input_dict.get('legs')[0].get('fills')[0].get('fill-price'))
details.stop_trigger = Decimal(input_dict.get('stop-trigger', 0))
if input_dict.get('price-effect') != None:
details.price_effect = OrderPriceEffect(input_dict.get('price-effect'))
details.type = OrderType(input_dict.get('order-type'))
details.status = OrderStatus(input_dict.get('status'))
details.time_in_force = input_dict.get('time-in-force')
details.gtc_date = input_dict.get('gtc-date')
order = cls(order_details=details)
for leg in input_dict.get('legs'):
if leg.get('instrument-type') == 'Equity Option':
if leg.get('action') == 'Sell to Close':
details.price_effect = OrderPriceEffect.CREDIT
elif leg.get('action') == 'Buy to Open':
details.price_effect = OrderPriceEffect.DEBIT
leg_obj = order.get_equity_leg_from_dict(leg)
order.details.legs.append(leg_obj)
return order
@classmethod
async def get_remote_orders(cls, session, account, **kwargs) -> List:
"""
Gets all orders on Tastyworks.
Args:
session (TastyAPISession): The session to use.
account (TradingAccount): The account_id to get orders on.
Keyword arguments specifying filtering conditions, these include:
`status`, `time-in-force`, etc.
Returns:
list(Order): A list of Orders
"""
if not session.logged_in:
raise Exception('Tastyworks session not logged in.')
filters = kwargs
url = '{}/accounts/{}/orders'.format(
session.API_url,
account.account_number
)
url = '{}?{}'.format(
url,
'&'.join([f'{k}={v}' for k, v in filters.items()])
)
res = []
async with aiohttp.request('GET', url, headers=session.get_request_headers()) as resp:
if resp.status != 200:
raise Exception('Could not get current open orders')
data = (await resp.json())['data']['items']
for order_data in data:
order = cls.from_dict(order_data)
res.append(order)
return res
@classmethod
async def get_live_orders(cls, session, account, **kwargs) -> List:
"""
Gets all live orders on Tastyworks.
Args:
session (TastyAPISession): The session to use.
account (TradingAccount): The account_id to get orders on.
Keyword arguments specifying filtering conditions, these include:
`status`, `time-in-force`, etc.
Returns:
list(Order): A list of Orders
"""
if not session.logged_in:
raise Exception('Tastyworks session not logged in.')
filters = kwargs
url = '{}/accounts/{}/orders/live'.format(
session.API_url,
account.account_number
)
url = '{}?{}'.format(
url,
'&'.join([f'{k}={v}' for k, v in filters.items()])
)
res = []
async with aiohttp.request('GET', url, headers=session.get_request_headers()) as resp:
if resp.status != 200:
raise Exception('Could not get current open orders')
data = (await resp.json())['data']['items']
for order_data in data:
if (not order_data.get('price-effect') and order_data.get('order-type') != 'Stop'):
continue
order = cls.from_dict(order_data)
if not order.details.status.is_active():
continue
res.append(order)
return res
@classmethod
async def cancel_order(cls, session, account, order_id):
"""
cancels an order on Tastyworks.
Args:
session (TastyAPISession): The session to use.
account (TradingAccount): The account_id to get orders on.
order_id (OrderDetails): The order_id returned from get_remote_orders.
Returns:
A single order. The order will have a cancalled status if successfull.
"""
if not session.logged_in:
raise Exception('Tastyworks session not logged in.')
url = '{}/accounts/{}/orders/{}'.format(
session.API_url,
account.account_number,
order_id
)
async with aiohttp.request('DELETE', url, headers=session.get_request_headers()) as resp:
if resp.status != 200:
raise Exception('Could not delete the order')
data = (await resp.json())['data']
order = cls.from_dict(data)
return order.details.status
@classmethod
async def get_order(cls, session, account, order_id):
"""
gets an order by the order id on Tastyworks.
Args:
session (TastyAPISession): The session to use.
account (TradingAccount): The account_id to get orders on.
order_id (OrderDetails): The order_id returned from get_remote_orders.
Returns:
A single order. The order will have a cancalled status if successfull.
"""
if not session.logged_in:
raise Exception('Tastyworks session not logged in.')
url = '{}/accounts/{}/orders/{}'.format(
session.API_url,
account.account_number,
order_id
)
async with aiohttp.request('GET', url, headers=session.get_request_headers()) as resp:
if resp.status != 200:
raise Exception('Could not retreive the order')
data = (await resp.json())['data']
order = cls.from_dict(data)
return order
| 33.673401 | 178 | 0.59504 | 1,146 | 10,001 | 5.055846 | 0.173647 | 0.038833 | 0.035209 | 0.011046 | 0.489299 | 0.446151 | 0.417846 | 0.409216 | 0.380911 | 0.380911 | 0 | 0.004722 | 0.30127 | 10,001 | 296 | 179 | 33.787162 | 0.824413 | 0.020898 | 0 | 0.366834 | 0 | 0 | 0.104388 | 0.011612 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035176 | false | 0 | 0.050251 | 0.01005 | 0.341709 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7edf2e2273f7cf07f11d45912ee4d110be45e480 | 2,822 | py | Python | app/home/addWebcamDialog.py | priyanshu-rsi/project-green-hammer | 9084a2efb69351b183fcad3800070516f238dcbf | [
"MIT"
] | null | null | null | app/home/addWebcamDialog.py | priyanshu-rsi/project-green-hammer | 9084a2efb69351b183fcad3800070516f238dcbf | [
"MIT"
] | 7 | 2020-10-27T22:31:10.000Z | 2022-03-12T00:26:18.000Z | app/home/addWebcamDialog.py | priyanshu-rsi/project-green-hammer | 9084a2efb69351b183fcad3800070516f238dcbf | [
"MIT"
] | null | null | null | import cv2, time, os, subprocess, sys
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
class WebcamDialog:
def __init__(self, builder):
self.builder = builder
print("Inited AddWebcamDialog")
self.makeList()
def makeList(self):
print("Making list of available webcams")
webcamsList = self.builder.get_object("webcams_list")
cams = subprocess.check_output("ls /dev/ | grep video | awk '{print $0}'", shell=True).decode(sys.stdout.encoding)
camsList = cams.splitlines()
for cam in camsList:
_id = camsList.index(cam)
print("Adding camera ", cam, _id)
Gtk.ComboBoxText.append(webcamsList, str(_id), str(cam))
# addWebcamSpecific actions
def addWebcamCloseDialog(self, widget):
self.builder.get_object("webcam_settings_dialog").hide()
def addWebcamSave(self, widget):
print("Add webcam save")
self.addWebcamCloseDialog(widget)
def previewSource(self, camid):
#get dropdown item
webcamsList = self.builder.get_object("webcams_list")
active_cam = webcamsList.get_active()
print("---- ACTIVE CAM ---- ", active_cam)
cv2.namedWindow('Webcam-Preview', cv2.WINDOW_NORMAL)
cam = cv2.VideoCapture( active_cam )
FirstTime = True
while True:
ret_val, img = cam.read()
cv2.imshow('Webcam-Preview', img)
# if cv2.getWindowProperty('Webcam-Preview', 0) >= 0:
# break
if cv2.waitKey(1) == 27:
break # esc to quit
if cv2.getWindowProperty('Webcam-Preview',cv2.WND_PROP_VISIBLE) < 1:
break
# cv2.resizeWindow('Webcam-Preview', cv2.getWindowImageRect('Webcam-Preview')[2], cv2.getWindowImageRect('Webcam-Preview')[3])
cv2.destroyAllWindows()
def addWebcam(self, widget):
print("ADDING WEBCAM")
addWebcamWindow = self.builder.get_object("webcam_settings_dialog")
print("Bind cancel btn")
# Bind cancel btn
addWebcamCancelBtn = self.builder.get_object("webcam_settings_cancel_btn")
addWebcamCancelBtn.connect("clicked", self.addWebcamCloseDialog)
# Bind save btn
addWebcamCancelBtn = self.builder.get_object("webcam_settings_save_btn")
addWebcamCancelBtn.connect("clicked", self.addWebcamSave)
# Bind preview btn
webcamPreviewBtn = self.builder.get_object("preview-webcam-source")
webcamPreviewBtn.connect("clicked", self.previewSource)
# Set char limit
ipInputBox = self.builder.get_object("webcam_settings_dialog_ip_input")
# ipInputBox.set_max_length(12)
addWebcamWindow.show()
| 36.179487 | 138 | 0.632176 | 298 | 2,822 | 5.842282 | 0.40604 | 0.063182 | 0.064331 | 0.091901 | 0.265365 | 0.180356 | 0.180356 | 0.063182 | 0 | 0 | 0 | 0.012434 | 0.259036 | 2,822 | 78 | 139 | 36.179487 | 0.820182 | 0.118001 | 0 | 0.08 | 0 | 0 | 0.166263 | 0.058918 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12 | false | 0 | 0.06 | 0 | 0.2 | 0.16 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7ee432b9f9cee3a48190076af59fbb4bff47855a | 9,115 | py | Python | ETL_ORCID.py | GabrielePisciotta/papendex | 4929cb35b41ff30b7ab7f48599ab8154790b3bf7 | [
"0BSD"
] | null | null | null | ETL_ORCID.py | GabrielePisciotta/papendex | 4929cb35b41ff30b7ab7f48599ab8154790b3bf7 | [
"0BSD"
] | null | null | null | ETL_ORCID.py | GabrielePisciotta/papendex | 4929cb35b41ff30b7ab7f48599ab8154790b3bf7 | [
"0BSD"
] | null | null | null | from lxml import etree
from tqdm import tqdm
import json
import pysolr
import time
from os import listdir
from os.path import isfile, join
import re
import tarfile
import os
import gc
from threading import Thread
import argparse
from pathlib import Path
# Get list of file inside the dir
def get_files_in_dir(path):
list_of_files = [f for f in listdir(path) if isfile(join(path, f))]
list_of_files.sort(key=lambda f: int(re.sub('\D', '', f)))
return list_of_files
def write_to_file(to_store, output_path):
file_id = 0
print("Writing to file...")
to_write = []
Path(os.path.join(output_path,'docs')).mkdir(parents=True, exist_ok=True)
for doi, authors_to_store in tqdm(to_store.items()):
# Deduplicate them
authors = [dict(t) for t in {tuple(d.items()) for d in authors_to_store}]
to_write.append({
"id": doi,
"authors": json.dumps(authors)
})
if len(to_write) == 20000:
with open(os.path.join(output_path, 'docs', '{}.json'.format(file_id)), 'w') as f:
json.dump(to_write, f)
file_id += 1
to_write.clear()
gc.collect()
if len(to_write) > 0:
with open(os.path.join(output_path, 'docs', '{}.json'.format(file_id)), 'w') as f:
json.dump(to_write, f)
file_id += 1
to_write.clear()
def store_data(output_path):
print("Storing data in SOLR...")
try:
solr = pysolr.Solr('http://localhost:8983/solr/orcid', always_commit=True, timeout=100)
import os
for file in tqdm(os.listdir(os.path.join(output_path, "docs"))):
with open(os.path.join(output_path,'docs/{}'.format(file)), "r") as f:
to_add = json.load(f)
# Add it
response = solr.add(to_add)
# Get response
response = json.loads(response)
solr.get_session().close()
gc.collect()
# If something goes wrong, raise an exception
if response['responseHeader']['status'] != 0:
print("Exception with file {}".format(f))
raise Exception
to_add.clear()
except Exception as e:
print(e)
def thread_parallel(func):
def parallel_func(*args, **kw):
p = Thread(target=func, args=args, kwargs=kw)
p.daemon = True
p.start()
return parallel_func
@thread_parallel
def save_orcid_to_file(to_save_orcid, output_path):
for e in to_save_orcid:
orcid = e['orcid']
with open(os.path.join(output_path, 'orcid/{}.txt'.format(orcid)), 'w') as author_file:
json.dump(e, author_file)
@thread_parallel
def save_exception_file(root, orcid, output_path):
if orcid != None:
if 'doi' in etree.tounicode(root, pretty_print=True):
with open(os.path.join(output_path,'exceptions/{}.xml'.format(orcid)), 'w') as f:
f.write(etree.tounicode(root, pretty_print=True))
def orcid_ETL(summaries_dump, output_path):
print("Extracting Orcid dump... This may take a while.")
start = time.time()
to_store = {}
to_save_orcid = []
print("Extracting {}".format(summaries_dump))
with tarfile.open(summaries_dump, 'r:gz') as extracted_archive:
dir_in_extracted_archive = extracted_archive.getmembers()
for f in tqdm(dir_in_extracted_archive):
f = extracted_archive.extractfile(f)
# It may happen that is read something that isn't a file, so this is to skip
if f is None:
continue
parser = etree.XMLParser()
try:
tree = etree.parse(f, parser)
root = tree.getroot()
orcid = root.find('{http://www.orcid.org/ns/common}orcid-identifier') \
.find('{http://www.orcid.org/ns/common}path') \
.text
if orcid is None:
continue
name = root.find('{http://www.orcid.org/ns/person}person') \
.find('{http://www.orcid.org/ns/person}name')
if name is not None:
given_names = name.find('{http://www.orcid.org/ns/personal-details}given-names')
if given_names is not None:
given_names = given_names.text
else:
given_names = ""
family_name = name.find('{http://www.orcid.org/ns/personal-details}family-name')
if family_name is not None:
family_name = family_name.text
else:
family_name = ""
else:
given_names = ""
family_name = ""
works = root.find('{http://www.orcid.org/ns/activities}activities-summary') \
.findall('{http://www.orcid.org/ns/activities}works')
dois = []
for w in works:
groups = w.findall('{http://www.orcid.org/ns/activities}group')
# It's possible that are listed multiple works for each author.
# This part is to extract each DOI, check if is valid and in the end
# save it as normalised DOI.
for g in groups:
if g is not None:
try:
a1 = g.findall('{http://www.orcid.org/ns/common}external-ids')
for aa1 in a1:
if aa1 is not None:
b1 = aa1.findall('{http://www.orcid.org/ns/common}external-id')
for bb1 in b1:
if bb1 is not None:
t1 = bb1.findall('{http://www.orcid.org/ns/common}external-id-type')
for type in t1:
if type is not None and type.text == 'doi':
c1 = bb1.find('{http://www.orcid.org/ns/common}external-id-normalized')
if c1 is not None:
normalised_doi = c1.text
if normalised_doi is not None:
dois.append(normalised_doi)
"""else:
c1 = bb1.find('{http://www.orcid.org/ns/common}external-id-value')
if c1 is not None:
normalised_doi = c1.text
if normalised_doi is not None:
dois.append(normalised_doi)"""
except AttributeError as ex:
print(ex.with_traceback())
continue
if len(dois) != 0:
to_save_orcid.append({"orcid": orcid,
"given_names": given_names,
"family_name": family_name,
"dois": dois})
for doi in dois:
# If the doi is already present in the local batch, we append the orcid to its list
if to_store.__contains__(doi):
to_store[doi].append({
'orcid': orcid,
'given_names': given_names,
'family_name': family_name
})
else:
to_store[doi] = [{
'orcid': orcid,
'given_names': given_names,
'family_name': family_name
}]
except Exception as ex:
save_exception_file(root, orcid, output_path)
continue
# Flush...
if len(to_store) != 0:
write_to_file(to_store, output_path)
gc.collect()
store_data(output_path)
end = time.time()
print("Processed in {:.3f}s".format((end-start)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("output_path", help="Path where will be stored everything")
parser.add_argument("summaries_dump", help="Summaries dump")
args = parser.parse_args()
orcid_ETL(summaries_dump = args.summaries_dump, output_path=args.output_path)
| 39.630435 | 127 | 0.478113 | 999 | 9,115 | 4.211211 | 0.219219 | 0.042786 | 0.039933 | 0.049917 | 0.335393 | 0.314951 | 0.285477 | 0.192061 | 0.175422 | 0.13739 | 0 | 0.007606 | 0.423039 | 9,115 | 229 | 128 | 39.803493 | 0.792356 | 0.047614 | 0 | 0.222222 | 0 | 0 | 0.128365 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046784 | false | 0 | 0.087719 | 0 | 0.146199 | 0.05848 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7eea3b04e31b5881d299c0f204b91b44091508c8 | 1,148 | py | Python | mainEmberrite/npcs/barmaidClass.py | evvanErb/Emberrite | 1e65ef69188619684e093f01febc6f92f8b02716 | [
"Apache-2.0"
] | null | null | null | mainEmberrite/npcs/barmaidClass.py | evvanErb/Emberrite | 1e65ef69188619684e093f01febc6f92f8b02716 | [
"Apache-2.0"
] | null | null | null | mainEmberrite/npcs/barmaidClass.py | evvanErb/Emberrite | 1e65ef69188619684e093f01febc6f92f8b02716 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import vendorClass
class barmaid(vendorClass.vendor):
def __init__(self,name,health,armor,damageDeal,inventory):
vendorClass.vendor.__init__(self,name,health,armor,damageDeal,inventory)
def conversation(self, inv, hero):
convo = True
while (convo):
purchase = raw_input("\n'ello love what can I get for you?\n>>> ")
if ((purchase == "room") or (purchase == "a room") or (purchase == "a place to sleep") or (purchase == "sleep")):
if (inv.returnGold() >= 20):
print("\nThat'll be 20 gold pieces. Thanks!")
hero.heal(20)
inv.spendGold(20)
else:
print("\nSorry love you don't have enough gold!")
elif (purchase == "drink" or purchase == "mead" or purchase == "beer" or purchase == "ale" or purchase == "meat" or purchase == "a drink"):
if (inv.returnGold() >= 5):
print("\nThat'll be 5 gold pieces. Thanks!")
inv.spendGold(5)
else:
print("\nSorry love you don't have enough gold!")
elif ((purchase == "stop") or (purchase == "leave") or (purchase == "bye") or (purchase == "goodbye")):
convo = False
else:
print("\nSorry love we don't have that!") | 41 | 142 | 0.63676 | 159 | 1,148 | 4.540881 | 0.45283 | 0.152355 | 0.045706 | 0.078947 | 0.260388 | 0.260388 | 0.260388 | 0.144044 | 0.144044 | 0.144044 | 0 | 0.011892 | 0.194251 | 1,148 | 28 | 143 | 41 | 0.768649 | 0.013937 | 0 | 0.2 | 0 | 0 | 0.266784 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.04 | 0 | 0.16 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7eea86050e92eee964d9f26e65d0cec6fa7cbaee | 2,811 | py | Python | rect.py | downsmash/core | 91a6f9ce273abb598cd18f6c573a645c7c7df152 | [
"MIT"
] | null | null | null | rect.py | downsmash/core | 91a6f9ce273abb598cd18f6c573a645c7c7df152 | [
"MIT"
] | null | null | null | rect.py | downsmash/core | 91a6f9ce273abb598cd18f6c573a645c7c7df152 | [
"MIT"
] | null | null | null | #!/usr/bin/python
"""Module for Rect class.
"""
import numpy as np
class Rect:
"""This class represents a rectangle.
Used for representing the game screen and regions of interest (ROIs.)
The `children` class attribute allows for some hierarchical structure.
"""
def __init__(self, top, left, height, width):
self.top = int(top)
self.left = int(left)
self.height = int(height)
self.width = int(width)
self.children = []
def __repr__(self):
this_repr = ['{height}x{width}+{top}+{left}'.format(**self.__dict__)]
for child in self.children:
for line in repr(child).split("\n"):
this_repr.append("-> " + line)
return "\n".join(this_repr)
def __and__(self, other):
"""Return the intersection of `self` and `other`.
"""
overlap_left = max(self.left, other.left)
overlap_top = max(self.top, other.top)
overlap_right = min(self.left + self.width, other.left + other.width)
overlap_bottom = min(self.top + self.height, other.top + other.height)
if overlap_left > overlap_right or overlap_top > overlap_bottom:
return None
return Rect(overlap_top,
overlap_left,
overlap_bottom - overlap_top,
overlap_right - overlap_left)
def to_mask(self, height, width, color=True):
"""Generate a zero-one mask from this Rect, viewed as part of a larger
rectangle. The mask will have the same height, width, and
top-left as this Rect.
Ones/zeros correspond to pixels and denote "in mask"/"not in mask"
respectively.
Parameters:
`height`: height of the larger rectangle.
`width`: width of the larger rectangle.
"""
if color:
shape = (height, width, 3)
fill = (1, 1, 1)
else:
shape = (height, width)
fill = 1
mask = np.zeros(shape)
mask[self.top:(self.top + self.height + 1),
self.left:(self.left + self.width + 1)] = fill
return mask.astype(np.uint8)
def subregion(self, pct_top, pct_left, pct_height, pct_width,
padding=0):
"""Return the subregion from
(pct_top)*100% to (pct_top + pct_height)*100%,
(pct_left)*100% to (pct_left + pct_width)*100%,
plus a bevel of (padding)*100%,
intersected with the screen.
"""
top = self.top + (pct_top - padding) * self.height
left = self.left + (pct_left - padding) * self.width
height = (padding + pct_height + padding) * self.height
width = (padding + pct_width + padding) * self.width
return Rect(top, left, height, width) & self
| 33.070588 | 78 | 0.579153 | 356 | 2,811 | 4.438202 | 0.27809 | 0.031013 | 0.022785 | 0.022785 | 0.027848 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012339 | 0.308075 | 2,811 | 84 | 79 | 33.464286 | 0.8 | 0.279972 | 0 | 0 | 0 | 0 | 0.019169 | 0.015442 | 0 | 0 | 0 | 0 | 0 | 1 | 0.116279 | false | 0 | 0.023256 | 0 | 0.27907 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7eeecf45c764183baaa65c098c1b17678b8f5d13 | 1,033 | py | Python | toolbox/include-contrib.py | Rix565/skiftos-dailybuild | ef3abf0dda028b6d7b3e658946a031b1a1ba44c9 | [
"MIT"
] | 1,724 | 2019-03-02T18:31:16.000Z | 2022-03-31T18:35:42.000Z | toolbox/include-contrib.py | Rix565/skiftos-dailybuild | ef3abf0dda028b6d7b3e658946a031b1a1ba44c9 | [
"MIT"
] | 280 | 2019-03-06T09:36:49.000Z | 2022-03-30T18:56:14.000Z | toolbox/include-contrib.py | Rix565/skiftos-dailybuild | ef3abf0dda028b6d7b3e658946a031b1a1ba44c9 | [
"MIT"
] | 263 | 2019-03-14T15:04:12.000Z | 2022-03-26T19:28:36.000Z | #!/bin/env python3
import sys
import os
if not "SKIFT_SYSROOT" in os.environ:
print("Please run use-it.sh first")
exit(1)
def usage():
print(f"Usage: {sys.argv[0]} <name> <name> ...")
if len(sys.argv) < 2:
usage()
sysroot = os.environ["SKIFT_SYSROOT"]
contrib = os.environ["SKIFT_CONTRIBROOT"]
skift = os.path.join(contrib, "..")
for module in sys.argv[1:]:
path = os.path.join(contrib, module)
if not os.path.isdir(path):
print(f"{path} not found")
continue
os.chdir(path)
if not os.system("./clean-it.sh") == 0:
print(f"Error while cleaning {module}")
continue
if not os.system("./get-it.sh") == 0:
print(f"Error while downloading {module}")
continue
if not os.system("./build-it.sh") == 0:
print(f"Error while building {module}")
continue
if not os.system("./install-it.sh") == 0:
print(f"Error while installing {module}")
continue
os.chdir(skift)
os.chdir(contrib)
os.system("git checkout -- *")
| 25.825 | 52 | 0.604066 | 151 | 1,033 | 4.112583 | 0.344371 | 0.048309 | 0.056361 | 0.083736 | 0.2657 | 0.2657 | 0.135266 | 0 | 0 | 0 | 0 | 0.01125 | 0.225557 | 1,033 | 39 | 53 | 26.487179 | 0.765 | 0.016457 | 0 | 0.151515 | 0 | 0 | 0.310345 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030303 | false | 0 | 0.060606 | 0 | 0.090909 | 0.212121 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7ef144300a818fe4980fe554132435325089c012 | 6,485 | py | Python | pystman.py | yashBhosale/pystman | 2297f0610d5f80783ab5276f43240b0f501945c0 | [
"MIT"
] | 1 | 2020-12-14T00:26:37.000Z | 2020-12-14T00:26:37.000Z | pystman.py | yashBhosale/pystman | 2297f0610d5f80783ab5276f43240b0f501945c0 | [
"MIT"
] | null | null | null | pystman.py | yashBhosale/pystman | 2297f0610d5f80783ab5276f43240b0f501945c0 | [
"MIT"
] | null | null | null | import asyncio
import sys
import websockets
import base64
from PyQt5.QtCore import QObject, Qt, pyqtSignal, pyqtSlot
from PyQt5.QtWidgets import (
QAction,
QApplication,
QHBoxLayout,
QLineEdit,
QMainWindow,
QMenu,
QPushButton, QSplitter,
QTextEdit,
QToolBar,
QToolButton,
QVBoxLayout,
QWidget,
QSizePolicy,
)
from asyncqt import QEventLoop, asyncSlot
from websockets.exceptions import ConnectionClosed
from QKVLineEdit import KVList
from QTextButtonList import ButtonListView
class MainWindow(QMainWindow):
_SESSION_TIMEOUT = 1.
"""float: Session timeout."""
def __init__(self):
super().__init__()
self.initContent()
self.initToolbar()
self.setGeometry(400, 400, 450, 250)
self.setWindowTitle('Websocket Test Client')
self.show()
def initContent(self):
self.postman = Postman()
self.postman.signal.signal.connect(self.pushToHistory)
self.listyList = ButtonListView(self.postman.setValues)
self.splitter = QSplitter()
self.splitter.addWidget(self.listyList)
self.splitter.addWidget(self.postman)
self.splitter.setStretchFactor(0,1)
self.splitter.setStretchFactor(1,2)
self.setCentralWidget(self.splitter)
def initToolbar(self):
self.toolbar = QToolBar("Main toolbar")
self.addToolBar(self.toolbar)
self.toolbar.addAction(QAction("&New", self))
self.toolbar.setMouseTracking(True)
button_action = QAction("Your Button", self)
button_action.setStatusTip("This is your button")
button_action.triggered.connect(lambda x: print("toolbar button clicked"))
saveButton=QToolButton(self)
saveButton.setPopupMode(QToolButton.InstantPopup)
saveButton.setText("save")
menu = QMenu(saveButton)
menu.addAction(QAction("save all", self))
menu.addAction(QAction("save as", self))
saveButton.setMenu(menu)
self.toolbar.addWidget(saveButton)
self.toolbar.addAction(button_action)
@pyqtSlot(str, dict, str)
def pushToHistory(self, url, params, body):
b = self.listyList.wlist.itemWidget(self.listyList.wlist.item(0))
if b.url != url or b.body != body or b.params != params:
self.listyList.pushToHistory(url, body, params)
def keyPressEvent(self, e):
if e.key() == Qt.Key_Escape:
self.close()
def contextMenuEvent(self, event):
print("context menu")
super(MainWindow, self).contextMenuEvent(event)
class PostmanSignal(QObject):
def __init__(self) -> None:
super(PostmanSignal, self).__init__()
signal = pyqtSignal(str, dict, str)
class Postman(QWidget ):
def __init__(self):
super().__init__()
self.initTopBar()
self.pairList = KVList()
self.vbox.addWidget(self.pairList)
#self.addPairButton = QPushButton("Add Pair", self)
#self.addPairButton.clicked.connect(self.pairList.addKVPair)
#self.removePairButton = QPushButton("Remove Pair", self)
#self.removePairButton.clicked.connect(self.pairList.removeKVPair)
#buttons = QHBoxLayout()
#buttons.addWidget(self.addPairButton)
#buttons.addWidget(self.removePairButton)
#buttonsWidget = QWidget()
#buttonsWidget.setLayout(buttons)
#self.vbox.addWidget(buttonsWidget)
self.body = QTextEdit()
self.vbox.addWidget(self.body)
self.editResponse = QTextEdit()
self.vbox.addWidget(self.editResponse)
self.setLayout(self.vbox)
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
def initTopBar(self):
self.vbox = QVBoxLayout()
self.signal = PostmanSignal()
self.urlBar = QLineEdit()
self.topBar = QWidget()
self.topBarLayout = QHBoxLayout()
self.topBarLayout.addWidget(self.urlBar)
self.connectButton = QPushButton("Connect", self)
self.connectButton.clicked.connect(self.sendConnectRequest)
self.topBarLayout.addWidget(self.connectButton)
self.disconnectButton = QPushButton("Disconnect", self)
self.disconnectButton.clicked.connect(self.disconnect)
self.topBarLayout.addWidget(self.disconnectButton)
self.sendButton = QPushButton("Send Text")
self.sendButton.clicked.connect(self.sendMessage)
self.topBarLayout.addWidget(self.sendButton)
self.topBar.setLayout(self.topBarLayout)
self.vbox.addWidget(self.topBar)
def setValues(self, url, params=None, body=None):
self.urlBar.setText(url)
self.pairList.setPairs(params if params else {})
self.body.setText(body if body else '')
@asyncSlot()
async def disconnect(self) -> None:
await self.ws.close()
@asyncSlot()
async def sendConnectRequest(self):
self.signal.signal.emit(
self.urlBar.text(),
self.pairList.getPairs(),
''
)
try:
username = self.pairList.pairs[0].getKey()
password = self.pairList.pairs[0].getValue()
auth_header = 'Basic ' + base64.b64encode(bytes(username + ':' + password, 'utf-8')).decode('utf-8')
self.ws = await websockets.connect(self.urlBar.text(),
extra_headers={'Authorization': auth_header})
except Exception as exc:
print(exc)
self.body.setText(str(exc))
return
else:
print("Connected")
try:
async for recieved in self.ws:
if isinstance(recieved, bytes):
recieved = recieved.decode('utf-8')
self.body.setText(recieved)
except ConnectionClosed as c:
print("Connection Closed: code {} \n {}".format(c.code, c.reason))
@asyncSlot()
async def sendMessage(self):
await self.ws.send(self.editResponse.toPlainText())
app = QApplication(sys.argv)
loop = QEventLoop(app)
# this allows me to use async/await with pyqt.
# Apparently it's not super performant, but I don't really need it to be.
asyncio.set_event_loop(loop)
window = MainWindow()
window.show()
# Start the event loop.
with loop:
sys.exit(loop.run_forever())
| 30.446009 | 112 | 0.635158 | 661 | 6,485 | 6.175492 | 0.322239 | 0.038217 | 0.020823 | 0.020578 | 0.026458 | 0.011759 | 0 | 0 | 0 | 0 | 0 | 0.006434 | 0.257055 | 6,485 | 212 | 113 | 30.589623 | 0.840805 | 0.087587 | 0 | 0.060811 | 0 | 0 | 0.037794 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.067568 | false | 0.013514 | 0.067568 | 0 | 0.175676 | 0.033784 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7ef5fb15626219ce80894ae9e28525aa15b2946d | 289 | py | Python | 2019/Lecture08/02Example/file_write.py | cbchoi/SIT32004 | 699598fc321845e46e5cce81c6c2a60999698e6e | [
"MIT"
] | 1 | 2019-03-04T05:35:37.000Z | 2019-03-04T05:35:37.000Z | 2019/Lecture08/02Example/file_write.py | cbchoi/SIT32004 | 699598fc321845e46e5cce81c6c2a60999698e6e | [
"MIT"
] | null | null | null | 2019/Lecture08/02Example/file_write.py | cbchoi/SIT32004 | 699598fc321845e46e5cce81c6c2a60999698e6e | [
"MIT"
] | 6 | 2019-03-10T23:39:10.000Z | 2020-03-20T11:37:12.000Z | """file_open.py"""
import random
f = open("test.txt", "w")
for i in range(100):
f.write(str(random.randint(0, 100)))
f.write("\n")
f.close()
f1 = open("append.txt", "a")
f2 = open("test.txt", "r")
for line in f2:
f1.write(str(int(line) + 10))
f1.write("\n")
f1.close()
f2.close() | 14.45 | 37 | 0.591696 | 54 | 289 | 3.148148 | 0.518519 | 0.094118 | 0.129412 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.064777 | 0.145329 | 289 | 20 | 38 | 14.45 | 0.623482 | 0.041522 | 0 | 0 | 0 | 0 | 0.121324 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.076923 | 0 | 0.076923 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7ef7b9d57905ac367709dbf6f71f2690c44f863b | 13,505 | py | Python | run_dataset_generation.py | skanav/cst_transform | 361a23293cf0359af7a7d17cf465483ffe4e7545 | [
"Apache-2.0"
] | null | null | null | run_dataset_generation.py | skanav/cst_transform | 361a23293cf0359af7a7d17cf465483ffe4e7545 | [
"Apache-2.0"
] | null | null | null | run_dataset_generation.py | skanav/cst_transform | 361a23293cf0359af7a7d17cf465483ffe4e7545 | [
"Apache-2.0"
] | 1 | 2021-07-02T16:04:14.000Z | 2021-07-02T16:04:14.000Z | import os
import argparse
import logging
import json
import shutil
from tqdm import tqdm
from glob import glob
from urllib import request
from cst_transform.utils import svcomp, label_utils
from cst_transform.data import preprocessor as p
from cst_transform.data import vocab_utils, lmdb_utils
DEFAULT_DATASETS = {
"bmc-ki": "https://www.sosy-lab.org/research/strategy-selection/supplements.tar.bz2",
"algorithms": "https://www.sosy-lab.org/research/strategy-selection/supplements.tar.bz2",
"sc": "https://www.sosy-lab.org/research/strategy-selection/supplements.tar.bz2",
"tools": "https://sv-comp.sosy-lab.org/2018/results/results-verified/All-Raw.zip"
}
DEFAULT_TOOL_SELECTION = {
"bmc-ki": "./cst_transform/resources/bmc-ki.json",
"algorithms": "./cst_transform/resources/algorithms.json",
"sc": "./cst_transform/resources/sc.json",
"tools": "./cst_transform/resources/tools.json"
}
DEFAULT_PREFIX = {
"bmc-ki": "../programs/benchmarks/",
"algorithms": "../programs/benchmarks/",
"sc": "../programs/benchmarks/",
"tools": "../sv-benchmarks/c/"
}
class DownloadHook:
def __call__(self, chunk_id, chunk_size, total_chunks):
if not hasattr(self, "pbar"):
total_size = total_chunks // chunk_size
self.pbar = tqdm(total=total_size+1, desc="Download:")
self.last_chunk = 0
chunk_update = chunk_id - self.last_chunk
self.pbar.update(chunk_update)
if chunk_id == total_chunks:
self.pbar.close()
else:
self.last_chunk = chunk_id
def download_dataset(benchmark_url, data_dir):
file_name = benchmark_url.split("/")[-1]
file_path = os.path.join(data_dir, file_name)
if os.path.exists(file_path):
logging.info("Benchmark is already downloaded. Skip.")
return file_path
request.urlretrieve(benchmark_url, file_path, reporthook=DownloadHook())
return file_path
def _unpack_bz2(file_path):
dir_name = os.path.dirname(file_path)
target_dir = os.path.join(dir_name, "content")
if not os.path.exists(target_dir):
os.makedirs(target_dir)
else:
logging.info("File seems already unpacked. Skip.")
return target_dir
import tarfile
tar = tarfile.open(file_path, "r:bz2")
tar.extractall(target_dir)
tar.close()
return target_dir
def _unpack_zip(file_path):
dir_name = os.path.dirname(file_path)
target_dir = os.path.join(dir_name, "content")
if not os.path.exists(target_dir):
os.makedirs(target_dir)
else:
logging.info("File seems already unpacked. Skip.")
return target_dir
import zipfile
with zipfile.ZipFile(file_path, 'r') as zip_ref:
zip_ref.extractall(target_dir)
return target_dir
def unpack_file(file_path):
if file_path.endswith(".tar.bz2"):
return _unpack_bz2(file_path)
if file_path.endswith(".zip"):
return _unpack_zip(file_path)
return file_path
def _process_set_file(file_path, tmp_dir):
all_xml_files = []
with open(file_path, "r") as i:
files = i.readlines()
for f in files:
if not os.path.exists(f):
file_path = download_dataset(f, tmp_dir)
xml_files = find_xml_files(file_path, tmp_dir)
else:
xml_files = find_xml_files(f, tmp_dir)
all_xml_files.extend(xml_files)
return all_xml_files
def _scan_dir_for_xml(dir_path):
R = glob(os.path.join(dir_path, "**", "*.xml"), recursive=True)
if len(R) == 0:
R = glob(os.path.join(dir_path, "**", "*.xml.bz2"), recursive=True)
return R
def find_xml_files(file_path, tmp_dir):
if os.path.isdir(file_path):
return _scan_dir_for_xml(file_path)
if file_path.endswith(".set"):
return _process_set_file(file_path, tmp_dir)
if file_path.endswith(".tar.bz2") or file_path.endswith(".zip"):
dir_path = unpack_file(file_path)
return _scan_dir_for_xml(dir_path)
if file_path.endswith(".xml") or file_path.endswith(".xml.bz2"):
return [file_path]
def _should_parse(file_path, tool_selection):
if tool_selection is None:
return True
file_name = os.path.basename(file_path)
tool = file_name.split(".")[0]
return tool in tool_selection
def parse_benchmark_files(xml_files, tool_selection=None, prefix=""):
if tool_selection:
tool_selection = set(tool_selection)
result = {}
for xml_file in tqdm(xml_files):
# Typically the tool name occurs in the file path
if not _should_parse(xml_file, tool_selection):
logging.info("Skip %s as it not in tool selection." % xml_file)
continue
file_name = os.path.basename(xml_file)
tool = file_name.split(".")[0]
if xml_file.endswith(".bz2"):
svcomp.read_svcomp_bz2(xml_file, result, tool_name=tool, prefix=prefix)
else:
with open(xml_file, "r") as i:
svcomp.read_svcomp_xml_str(i.read(), result, tool_name=tool, prefix=prefix)
return result
def _collect_files(label_result, prefix, allowed_keys=None):
label_result = label_result['reachability']
for tool_results in label_result.values():
for key, result in tool_results.items():
if allowed_keys is not None and key not in allowed_keys:
continue
yield result['file'].replace(prefix, "")
def _scan_dir_for_c(file_path):
return glob(os.path.join(file_path, "**", "*.[c|i]"), recursive=True)
def _load_split(split_path):
with open(split_path, "r") as i:
split = json.load(i)
return split
def _stream_proto_objs(scan, pipeline, processed_files=None):
c = 0
nix = 0
for f in tqdm(scan):
R = pipeline(f)
if R is None:
c += 1
print("Number of failed attempts: %d" % c)
else:
o = nix
nix += 1
if processed_files:
processed_files.append(f)
yield o, R
print("Number of failed attempts: %d" % c)
def phase1_labels(args):
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
if not os.path.exists(args.tmp_dir):
os.makedirs(args.tmp_dir)
if args.benchmark_url in DEFAULT_DATASETS:
args.bench_prefix = DEFAULT_PREFIX[args.benchmark_url]
args.tool_selection = DEFAULT_TOOL_SELECTION[args.benchmark_url]
args.benchmark_url = DEFAULT_DATASETS[args.benchmark_url]
# Test if benchmark is local file
if not os.path.exists(args.benchmark_url):
file_path = download_dataset(args.benchmark_url, args.tmp_dir)
xml_files = find_xml_files(file_path, args.tmp_dir)
else:
xml_files = find_xml_files(args.benchmark_url, args.tmp_dir)
logging.info("Compress all benchmark files into one single label file.")
if args.tool_selection:
with open(args.tool_selection, "r") as i:
args.tool_selection = json.load(i)
labels = parse_benchmark_files(xml_files, tool_selection=args.tool_selection,
prefix=args.bench_prefix)
allowed_files = set(
_collect_files(labels, args.bench_prefix,
allowed_keys=allowed_keys)
)
if args.rank_mode:
logging.error("Rank mode is currently unsupported")
if args.cleanup:
logging.info("Cleanup tmp folder.")
shutil.rmtree(args.tmp_dir)
exit()
else:
labels = label_utils.result_to_correct_labels(labels, args.tool_selection)
logging.info("Save gold labels...")
with open(os.path.join(args.output_dir, "gold_labels.json"), "w") as o:
json.dump(labels, o, indent=4)
if args.prune_unsolvable:
labels = label_utils.prune_unsolvable(labels)
logging.info("Save labels...")
with open(os.path.join(args.output_dir, "labels.json"), "w") as o:
json.dump(labels, o, indent=4)
if args.cleanup:
logging.info("Cleanup tmp folder.")
shutil.rmtree(args.tmp_dir)
del labels
del xml_files
return allowed_files
def phase2_preprocess_code(args, allowed_files):
found_files = set(
[file_path for file_path in _scan_dir_for_c(args.benchmark_code_dir)
if file_path.replace(args.benchmark_code_dir, "") in allowed_files]
)
vocab_file_path = os.path.join(
args.output_dir, "vocabulary.json"
)
lmdb_dir_path = os.path.join(
args.output_dir, "dataset"
)
lmdb_index_path = os.path.join(
args.output_dir, "index.lst"
)
if args.split:
lmdb_dir_path = os.path.join(
args.tmp_dir, "dataset"
)
if os.path.exists(lmdb_dir_path):
logging.info("Path %s seem to exists. Delete before proceeding. Skip" % lmdb_dir_path)
return lmdb_dir_path
cst_indexer = vocab_utils.MultiIndexer()
if os.path.exists(vocab_file_path):
with open(vocab_file_path, "r") as i:
cst_indexer.from_json_io(i)
processed_files = []
pipeline = p.Pipeline(
[
p.ClangASTParser(),
p.AST2CSTProcessor(token_index=cst_indexer,
set_semantic=not args.seq_semantic),
p.CSTCollate()
]
)
try:
lmdb_utils.feed_lmdb(lmdb_dir_path,
_stream_proto_objs(found_files, pipeline,
processed_files=processed_files)
)
finally:
with open(vocab_file_path, "w") as o:
cst_indexer.to_json(file_object=o)
with open(lmdb_index_path, "w") as o:
for file_path in processed_files:
o.write(f"{file_path}\n")
return lmdb_dir_path
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--benchmark_url", required=True,
help="Url to a repository of benchmark results. Alternatively can point to a local file \
or a set file containing multiple entries. \
Shortcuts: [bmc-ki, algorithms, sc, tools] for standard datasets")
parser.add_argument(
"--benchmark_code_dir", required=True,
help="Directory path to a clone of SV-Benchmark.\
For reproducing results, clone from https://github.com/sosy-lab/sv-benchmarks/tree/svcomp18"
)
parser.add_argument("--output_dir", required=True,
help="The output directory to store the dataset")
parser.add_argument(
"--tmp_dir", help="A temporary directory for intermediate files. Default: The same as output directory."
)
parser.add_argument(
"--split", help="Path to a file containing the dataset split. If not given, it will result in a monolithic dataset."
)
parser.add_argument(
"--rank_mode", action="store_true",
help="Whether preprocess the labels as a ranking."
)
parser.add_argument(
"--isomorph_dedup", action="store_true",
help="Whether to deduplicate isomorph CSTs in the dataset"
)
parser.add_argument(
"--prune_unsolvable", action="store_true",
help="Prune all instances which cannot be solved"
)
parser.add_argument(
"--tool_selection",
help="Process only a subset of tools for labeling. Default: All tools will be processed. If a standard dataset is used, this option will be set automatically."
)
parser.add_argument(
"--bench_prefix",
help="Benchmark files often include a system dependent prefix. Used to normalize data.",
default="../sv-benchmarks/c/"
)
parser.add_argument("--seq_semantic", action="store_true",
help="If set, handle ast elements as a sequence instead of set")
parser.add_argument(
"--cleanup", action="store_true",
help="Cleanup the tmp folder after every step. Only possible if output folder and tmp folder differ."
)
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
if not args.tmp_dir:
args.tmp_dir = args.output_dir
if args.tmp_dir == args.output_dir and args.cleanup:
logging.warning("Cannot cleanup as tmp folder and output folder match.")
args.cleanup = False
allowed_keys = None
if args.split:
splits = _load_split(args.split)
allowed_keys = set.union(
*[set(v) for v in splits.values()]
)
# Start processing
logging.info("######## Phase 1: Downloading and preprocessing labels ###########")
allowed_files = phase1_labels(args)
logging.info("######## Phase 2: Generate CSTs from C programs ###########")
if not os.path.exists(args.tmp_dir):
os.makedirs(args.tmp_dir)
phase2_preprocess_code(args, allowed_files)
| 29.878319 | 168 | 0.61666 | 1,732 | 13,505 | 4.579677 | 0.174365 | 0.048412 | 0.016389 | 0.009708 | 0.290343 | 0.236006 | 0.185451 | 0.136662 | 0.122794 | 0.104892 | 0 | 0.003798 | 0.278712 | 13,505 | 451 | 169 | 29.944568 | 0.810492 | 0.007108 | 0 | 0.184713 | 0 | 0.022293 | 0.183109 | 0.016674 | 0 | 0 | 0 | 0 | 0 | 1 | 0.050955 | false | 0 | 0.041401 | 0.003185 | 0.16879 | 0.006369 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7efab80019cfb00ccc43dce1f8091790aec914d7 | 621 | py | Python | Source Codes Testing/cow and bull for n number of digits.py | prathimacode-hub/Python | 4d343945f58662cb6f52f38dc54089c045f3ba62 | [
"MIT"
] | 6 | 2019-03-08T12:32:01.000Z | 2022-02-27T11:03:28.000Z | Source Codes Testing/cow and bull for n number of digits.py | prathimacode-hub/Python | 4d343945f58662cb6f52f38dc54089c045f3ba62 | [
"MIT"
] | 10 | 2018-10-28T07:18:15.000Z | 2021-10-01T06:45:59.000Z | Source Codes Testing/cow and bull for n number of digits.py | prathimacode-hub/Python | 4d343945f58662cb6f52f38dc54089c045f3ba62 | [
"MIT"
] | 10 | 2018-10-29T17:19:26.000Z | 2021-05-20T12:17:02.000Z | import random as ro
def cow_bull(na, n):
un = input("enter a number you guess")
ua = list(str(un))
cow = 0
bull = 0
for i in ua:
if int(i) in na:
cow += 1
for i in range(n):
if na[i] == int(ua[i]):
bull += 1
cow -= 1
print("cow :{0},bull :{1}".format(cow, bull))
return bull
n = int(input("how many digits number you need? "))
na = ro.sample(range(10), n)
a = map(str, na)
answer = ''.join(a)
ans = cow_bull(na, n)
while ans != n:
ans = cow_bull(na, n)
print("answer", answer)
print("YOU WON!")
| 20.032258 | 52 | 0.492754 | 101 | 621 | 3 | 0.415842 | 0.092409 | 0.089109 | 0.09901 | 0.085809 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022222 | 0.347826 | 621 | 30 | 53 | 20.7 | 0.725926 | 0 | 0 | 0.083333 | 0 | 0 | 0.153976 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.041667 | 0 | 0.125 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7efc24dfb9ae6ec2e7bac6c7a07dfd85e41fcf84 | 521 | py | Python | controlpanel/api/models/userapp.py | ministryofjustice/analytics-platform-control-panel-public | 289143280ed79a05be470d57dc3b1fd9179758cf | [
"MIT"
] | null | null | null | controlpanel/api/models/userapp.py | ministryofjustice/analytics-platform-control-panel-public | 289143280ed79a05be470d57dc3b1fd9179758cf | [
"MIT"
] | 93 | 2021-08-09T16:09:59.000Z | 2022-03-28T16:13:31.000Z | controlpanel/api/models/userapp.py | ministryofjustice/analytics-platform-control-panel-public | 289143280ed79a05be470d57dc3b1fd9179758cf | [
"MIT"
] | 2 | 2021-03-30T15:13:24.000Z | 2021-04-11T06:26:09.000Z | from django.db import models
from django_extensions.db.models import TimeStampedModel
class UserApp(TimeStampedModel):
user = models.ForeignKey(
"User", on_delete=models.CASCADE, related_name='userapps')
app = models.ForeignKey(
"App", on_delete=models.CASCADE, related_name='userapps')
is_admin = models.BooleanField(default=False)
class Meta:
db_table = "control_panel_api_userapp"
unique_together = (
('app', 'user'),
)
ordering = ('id',)
| 28.944444 | 66 | 0.662188 | 57 | 521 | 5.859649 | 0.561404 | 0.05988 | 0.083832 | 0.125749 | 0.239521 | 0.239521 | 0.239521 | 0 | 0 | 0 | 0 | 0 | 0.226488 | 521 | 17 | 67 | 30.647059 | 0.828784 | 0 | 0 | 0 | 0 | 0 | 0.109405 | 0.047985 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7efc5bd10d26cc916efd1b9f0a6c6939c9b34e0a | 315 | py | Python | Python-Programs/chat_app/server.py | adityaverma121/Simple-Programs | 8450560b97f89e0fa3da16a623ad35c0b26409c9 | [
"MIT"
] | 71 | 2021-09-30T11:25:12.000Z | 2021-10-03T11:33:22.000Z | Python-Programs/chat_app/server.py | adityaverma121/Simple-Programs | 8450560b97f89e0fa3da16a623ad35c0b26409c9 | [
"MIT"
] | 186 | 2021-09-30T12:25:16.000Z | 2021-10-03T13:45:04.000Z | Python-Programs/chat_app/server.py | adityaverma121/Simple-Programs | 8450560b97f89e0fa3da16a623ad35c0b26409c9 | [
"MIT"
] | 385 | 2021-09-30T11:34:23.000Z | 2021-10-03T13:41:00.000Z | import socket
host = ""
port = 50000
s = socket.socket()
s.bind((host, port))
s.listen(1)
conn, addr = s.accept()
print("Connected by : ", addr)
while True:
data = conn.recv(2000)
print(data.decode())
response = raw_input("Response : ")
bytes = response.encode()
conn.send(bytes)
s.close()
| 15 | 39 | 0.625397 | 44 | 315 | 4.454545 | 0.636364 | 0.081633 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.039841 | 0.203175 | 315 | 20 | 40 | 15.75 | 0.741036 | 0 | 0 | 0 | 0 | 0 | 0.08254 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.066667 | 0 | 0.066667 | 0.133333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7effeab442d1a876c9be3f9bf01356da8fdce95a | 6,430 | py | Python | automation/spe2pngFFNew.py | h-jones/quinlab | d88e2ff945e104edbd268e21d40760a4a55e4b60 | [
"MIT"
] | 2 | 2019-08-27T16:28:27.000Z | 2019-11-30T13:04:09.000Z | automation/spe2pngFFNew.py | h-jones/quinlab | d88e2ff945e104edbd268e21d40760a4a55e4b60 | [
"MIT"
] | null | null | null | automation/spe2pngFFNew.py | h-jones/quinlab | d88e2ff945e104edbd268e21d40760a4a55e4b60 | [
"MIT"
] | null | null | null | import math
import glob
import re
import numpy as np
import matplotlib.pylab as pylab
import matplotlib.pyplot as plt
import spereadNew as sperd
def spe2pngFFNew(directory='./'):
dirname = directory
#dirname = '070510/sample/'; #put directory name on top of output filenames
pl = 770e-7
##########################################################
############# CHANGE THE RANGE OF WAVELENGTH #############
offset_nm = 0 #to raise the curve
Snm=711.943 + offset_nm; #Shorter wavelength displayed. center @770nm; 671.855(@730)681.875(@740)691.895(@750)701.915(@760)
Lnm=827.815 + offset_nm; #Longer wavelength displayed. center @770nm; 787.906(@730)797.855(@740)807.863(@750)817.842(@760)
#########################################################
#671.855(@730)681.875(@740)691.895(@750)701.915(@760)711.935(@770)721.955(@780)731.976(@790)
#787.906(@730)797.855(@740)807.863(@750)817.842(@760)827.82(@770)837.798(@780)847.776(@790)
#########################################################
#########################################################
anglelim = 50
#klim=math.sin(anglelim*math.pi/180)*(2*math.pi/pl)
filenames = glob.glob(dirname + '*.spe')
#filenames = glob.glob('*300gmm.spe')
num_files=len(filenames)
# Acquire the background image
background_filename = 'back.spe'
(background_image,background_xaxis,error_code,message) = sperd.spereadJanis(background_filename)
if (error_code == -1):
print ('WARNING function spe2png_py: Background file %s not found in'
' directory %s.\n' %(background_filename,dirname))
background_exists = 0
elif (error_code <= 0):
print(message)
print('error code: %d.\n' %error_code)
exit()
else:
background_exists = 1
for file in filenames:
input_filename = file
#input_filename_up = filename.upper()
# directory name + input filename for plot convenience
dir_input_filename = dirname + input_filename
png_filename = input_filename.rstrip('.spe').rstrip('.SPE') + '.png'
png_filename_log = 'FFLog' + png_filename
png_filename_sum = 'FF' + png_filename
# ascii_filename = re.sub('.spe','.dat',input_filename)
# filesize = filenames[i] #Not done, but is never used.
(image, xaxis, error_code, message) = sperd.spereadJanis(input_filename)
if (error_code <= 0):
print(message)
print('error code: %d.\n' %error_code)
exit()
if (background_exists):
if (image.size == background_image.size):
if (background_xaxis == xaxis):
image = image - background_image
else:
print ('WARNING function spe2png_py: Background image',
'%s has different xaxis than %s.\n' %(background_image,
input_filename))
else:
print ('WARNING function spe2png_py: Background image %s is of',
'different size than %s.\n' %(background_filename,
input_filename))
# Angle (y-axis) calibration
fobj = 4000 # objective focal length in um
ypixsize = 20 # pixel size in um
yindex = np.linspace(-199,200,400)
yaxis = np.arcsin(ypixsize*yindex/fobj)*180/math.pi #degrees
yk = (ypixsize*yindex/fobj)*2*math.pi/pl #degrees
# set display range for the spectral image
xgap = abs(xaxis[0] - xaxis[1])/2
iniIdx = min(np.where(abs(xaxis-Snm)<xgap))
endIdx = max(np.where(abs(xaxis-Lnm)<xgap))
if not iniIdx.any():
iniIdx = 0
else:
iniIdx = np.amin(iniIdx)
if not endIdx.any():
endIdx = max(xaxis.shape)
else:
endIdx = np.amax(endIdx) + 1
angle_correction = -0.4 # shift the center to right
ygap = abs(yaxis[0]-yaxis[1])/2
iniy = min(np.where(abs(yaxis+anglelim+angle_correction)<ygap))
endy = max(np.where(abs(yaxis-anglelim+angle_correction)<ygap))
if not iniy.any():
iniy = 0
else:
iniy = np.amin(iniy)
if not endy.any():
endy = max(yaxis.shape)
else:
endy = np.amax(endy) + 1
# ykgap=abs(yk[0]-yk[1])/2
# iniyk=min(np.where(abs(yk+klim)<ykgap))
# endyk=max(np.where(abs(yk-klim)<ykgap))
# if not iniyk.any():
# iniyk = 0
# else:
# iniyk = np.amin(iniyk)
# if not endyk.any():
# endyk = max(yk.shape)
# else:
# endyk = np.amax(endyk) + 1
# Region of interest for the figure
#ROIx = xaxis[iniIdx:endIdx]
#ROIy = yaxis[iniy:endy] # +angle_correction
#ROIyk = yk[iniyk:endyk]
#ROIyk2 = np.sin(ROIy*math.pi/180)*2*math.pi/pl
#ROIxeV = 1238.82/ROIx
# definition of ROIimg (Region of Interest)
ROIimg = image[:,iniy:endy,iniIdx:endIdx].reshape(endy-iniy,endIdx-iniIdx)
#ROIimg = ROIimg / np.amax(ROIimg) #intensity normalization
# clims = np.array([100, np.amax(ROIimg[50:300,750:1000])+1000])
vmin = 100
vmax = np.amax(ROIimg[50:300,750:1000])+1000
imgplot = plt.imshow(ROIimg.astype(float), aspect='auto', cmap='jet',
vmin = vmin, vmax = vmax,
extent=[xaxis[iniIdx],xaxis[endIdx-1],
yaxis[iniy],yaxis[endy-1]])
#imgplot = plt.imshow(ROIimg, aspect='auto', cmap='jet')
# plt.colorbar()
plt.ylabel('angle (degrees)')
#plt.ylabel('pixel index')
#plt.xlabel('Spatial Distance (um)')
plt.xlabel('Wavelength (nm)')
A = input_filename.partition('.')[0]
A = '1/24/14 C3997.5' + A
#plt.ylim(-199, 200)
#plt.xlim(768.2, 772)
# plt.xlim(775, 790)
# plt.ylim(-40, 40) #IMGs: x is for pixels, y: angle
#axis square
A = A.replace('_',' ')
A = A.replace('p','.')
plt.title(A)
pylab.savefig(png_filename, bbox_inches='tight')
plt.close()
if __name__ == '__main__':
spe2pngFFNew()
| 41.753247 | 130 | 0.536236 | 772 | 6,430 | 4.38601 | 0.331606 | 0.042233 | 0.01772 | 0.007974 | 0.199941 | 0.199941 | 0.153574 | 0.142942 | 0.101595 | 0.072652 | 0 | 0.080973 | 0.297045 | 6,430 | 154 | 131 | 41.753247 | 0.668142 | 0.292691 | 0 | 0.154639 | 0 | 0 | 0.0875 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.010309 | false | 0 | 0.072165 | 0 | 0.082474 | 0.072165 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d0137642cd7424ac51051ceae9522e9fb62fb43 | 2,237 | py | Python | torch_geometric/nn/models/mlp.py | ljhOfGithub/pytorch_geometric | ade2caff9e46f20e7a1ff0e9a8396c9e884ced4b | [
"MIT"
] | 1 | 2022-03-09T08:21:11.000Z | 2022-03-09T08:21:11.000Z | torch_geometric/nn/models/mlp.py | ChenShengsGitHub/pytorch_geometric | 86308313d6f1af56e5931e2ca89bb1a867c10ff3 | [
"MIT"
] | 1 | 2021-11-29T18:14:13.000Z | 2021-11-29T18:14:13.000Z | torch_geometric/nn/models/mlp.py | ChenShengsGitHub/pytorch_geometric | 86308313d6f1af56e5931e2ca89bb1a867c10ff3 | [
"MIT"
] | null | null | null | from typing import List
import torch
from torch import Tensor
import torch.nn.functional as F
from torch.nn import Linear, BatchNorm1d, Identity
class MLP(torch.nn.Module):
r"""A multi-layer perception (MLP) model.
Args:
channel_list (List[int]): List of input, intermediate and output
channels.
:obj:`len(channel_list) - 1` denotes the number of layers of the
MLP.
dropout (float, optional): Dropout probability of each hidden
embedding. (default: :obj:`0.`)
batch_norm (bool, optional): If set to :obj:`False`, will not make use
of batch normalization. (default: :obj:`True`)
relu_first (bool, optional): If set to :obj:`True`, ReLU activation is
applied before batch normalization. (default: :obj:`False`)
"""
def __init__(self, channel_list: List[int], dropout: float = 0.,
batch_norm: bool = True, relu_first: bool = False):
super().__init__()
assert len(channel_list) >= 2
self.channel_list = channel_list
self.dropout = dropout
self.relu_first = relu_first
self.lins = torch.nn.ModuleList()
for dims in zip(channel_list[:-1], channel_list[1:]):
self.lins.append(Linear(*dims))
self.norms = torch.nn.ModuleList()
for dim in zip(channel_list[1:-1]):
self.norms.append(BatchNorm1d(dim) if batch_norm else Identity())
self.reset_parameters()
def reset_parameters(self):
for lin in self.lins:
lin.reset_parameters()
for norm in self.norms:
if hasattr(norm, 'reset_parameters'):
norm.reset_parameters()
def forward(self, x: Tensor) -> Tensor:
""""""
for lin, norm in zip(self.lins[:-1], self.norms):
x = lin.forward(x)
if self.relu_first:
x = x.relu_()
x = norm(x)
if not self.relu_first:
x = x.relu_()
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.lins[-1](x)
return x
def __repr__(self) -> str:
return f'{self.__class__.__name__}({str(self.channel_list)[1:-1]})'
| 34.953125 | 78 | 0.588735 | 289 | 2,237 | 4.397924 | 0.32526 | 0.086546 | 0.047207 | 0.028324 | 0.09284 | 0.06609 | 0.031471 | 0 | 0 | 0 | 0 | 0.008855 | 0.29325 | 2,237 | 63 | 79 | 35.507937 | 0.795066 | 0.266428 | 0 | 0.05 | 0 | 0 | 0.046203 | 0.036076 | 0 | 0 | 0 | 0 | 0.025 | 1 | 0.1 | false | 0 | 0.125 | 0.025 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d01c7461dce851ea5e803f1b0f137c21097c55f | 1,818 | py | Python | settings/test_parameters.py | vibes-keti/VibePredictionEngine_2ndYear | ed867494d4f9b503e8905dbba5d7d33257e0f529 | [
"MIT"
] | null | null | null | settings/test_parameters.py | vibes-keti/VibePredictionEngine_2ndYear | ed867494d4f9b503e8905dbba5d7d33257e0f529 | [
"MIT"
] | null | null | null | settings/test_parameters.py | vibes-keti/VibePredictionEngine_2ndYear | ed867494d4f9b503e8905dbba5d7d33257e0f529 | [
"MIT"
] | null | null | null | #### List
target_feature_list = ['CO2ppm','H2Sppm','Humidity','Temperature','NH3ppm']
past_min_list = [80, 120, 720]
future_min_list = [12, 20, 60]
re_frequency_min_list = [4, 10]
all_features =['CO2ppm', 'H2Sppm', 'Humidity', 'NH3ppm', 'Temperature', 'out_CO',
'out_NO2', 'out_O3', 'out_PM10', 'out_PM25', 'out_SO2', 'out_humid',
'out_pressure', 'out_rainfall', 'out_sunshine', 'out_temp',
'out_wind_direction', 'out_wind_speed']
####
target_feature ='CO2ppm'
features_list_set=[['CO2ppm'], ['CO2ppm','NH3ppm'],['CO2ppm','H2Sppm','Humidity','Temperature','NH3ppm'],
['CO2ppm','NH3ppm','Humidity','Temperature','out_humid','out_temp'], all_features]
features_list= features_list_set[3]
features ={'target_feature':target_feature, "feature_list": features_list}
time_min={'past_min' :past_min_list[0], "future_min": future_min_list[0],"re_frequency_min":re_frequency_min_list[0] }
learning_method_num = 4
## CO2ppm ##
test_y_quantile_set=[600, 800]
""""""
"""
features_list=['CO2ppm', 'H2Sppm', 'Humidity', 'NH3ppm', 'Temperature','out_humid','out_temp', 'out_wind_speed']
features ={'target_feature':target_feature, "feature_list": features_list}
time_min={'past_min' :120, "future_min": 20,"re_frequency_min":4 }
learning_method_num = 0
#CO2ppm
test_y_quantile_set=[600, 800]
"""
"""
features_list=['CO2ppm', 'H2Sppm', 'Humidity', 'NH3ppm', 'Temperature']
features ={'target_feature':target_feature, "feature_list": features_list}
time_min={'past_min' :120, "future_min": 20,"re_frequency_min":4 }
learning_method_num = 4
features_list=['CO2ppm', 'H2Sppm', 'Humidity', 'NH3ppm', 'Temperature']
features ={'target_feature':target_feature, "feature_list": features_list}
time_min={'past_min' :720, "future_min": 60,"re_frequency_min":10 }
learning_method_num = 4
"""
| 40.4 | 118 | 0.710121 | 244 | 1,818 | 4.889344 | 0.204918 | 0.108969 | 0.100587 | 0.080469 | 0.615256 | 0.521375 | 0.485331 | 0.485331 | 0.485331 | 0.485331 | 0 | 0.054202 | 0.09681 | 1,818 | 44 | 119 | 41.318182 | 0.672351 | 0.006051 | 0 | 0 | 0 | 0 | 0.379487 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d046d25d9d046cbc84345710ba9ae2a8087cab0 | 726 | py | Python | corc/providers/apache/helpers.py | rasmunk/corc | 2d2ba92ab791f50fa46e1ff2cdc0035925032671 | [
"MIT"
] | 2 | 2020-10-31T14:55:26.000Z | 2022-02-07T19:53:33.000Z | corc/providers/apache/helpers.py | rasmunk/corc | 2d2ba92ab791f50fa46e1ff2cdc0035925032671 | [
"MIT"
] | 3 | 2020-08-27T14:10:16.000Z | 2021-09-23T23:31:19.000Z | corc/providers/apache/helpers.py | rasmunk/corc | 2d2ba92ab791f50fa46e1ff2cdc0035925032671 | [
"MIT"
] | 1 | 2022-02-07T19:53:34.000Z | 2022-02-07T19:53:34.000Z | from libcloud.compute.providers import get_driver
from corc.helpers import import_from_module
def discover_apache_driver(provider):
return get_driver(provider)
def discover_apache_driver_options(provider):
loader = import_from_module(
"corc.providers.apache.config_{}".format(provider),
"config_{}".format(provider),
"load_driver_options",
)
return loader
def new_apache_client(provider, provider_kwargs, **kwargs):
# Discover driver
driver = discover_apache_driver(provider)
options_loader = discover_apache_driver_options(provider)
driver_args, driver_kwargs = options_loader(provider, provider_kwargs, **kwargs)
return driver(*driver_args, **driver_kwargs)
| 30.25 | 84 | 0.758953 | 85 | 726 | 6.129412 | 0.282353 | 0.107486 | 0.153551 | 0.088292 | 0.134357 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.152893 | 726 | 23 | 85 | 31.565217 | 0.847154 | 0.020661 | 0 | 0 | 0 | 0 | 0.083216 | 0.043724 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1875 | false | 0 | 0.1875 | 0.0625 | 0.5625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d046e29f34e9bbd0983dd2217d21aaa93bc864e | 4,275 | py | Python | examples/level_o_meter.py | twynham/pi-pico | 1e7b099c1c1e4f904773e6b3e46beeef96a22fc1 | [
"MIT"
] | null | null | null | examples/level_o_meter.py | twynham/pi-pico | 1e7b099c1c1e4f904773e6b3e46beeef96a22fc1 | [
"MIT"
] | null | null | null | examples/level_o_meter.py | twynham/pi-pico | 1e7b099c1c1e4f904773e6b3e46beeef96a22fc1 | [
"MIT"
] | 1 | 2021-06-09T13:24:10.000Z | 2021-06-09T13:24:10.000Z | import utime
import random
import picoexplorer as display
from machine import Pin, ADC, Timer
# Level-o-meter
# By Stewart Twynham
#
# Display an animated level counter on the display with six LEDs run as a bargraph
#
width = display.get_width()
height = display.get_height()
centre = round(width / 2)
middle = round(height / 2)
display_buffer = bytearray(width * height * 2) # 2-bytes per pixel (RGB565)
display.init(display_buffer)
# initialise the colours
class Colour:
def __init__(self, bk, bb, fl, tx):
self.background = bk
self.bubble = bb
self.fill = fl
self.text = tx
Colours = []
# red
Colours.append(
Colour(
display.create_pen(128,64,64), display.create_pen(255,128,128), display.create_pen(128,64,64), display.create_pen(255,0,0)
)
)
# blue
Colours.append(
Colour(
display.create_pen(64,64,128), display.create_pen(128,128,255), display.create_pen(64,64,128), display.create_pen(255,255,255)
)
)
# initialise our LED bargraph
bartimer = Timer()
leds = []
for i in range (0,6):
leds.append(Pin(i + 1, Pin.OUT))
def update_bargraph(reading):
for i in range (0,6):
if (reading > i):
leds[5-i].value(1)
else:
leds[5-i].value(0)
def update_leds(bartimer):
if (bargraph_value == 0):
for i in range (0,5):
leds[i].value(0)
leds[5].toggle()
else:
for i in range (0,6):
if (bargraph_value > i):
leds[5-i].value(1)
else:
leds[5-i].value(0)
def scale(reading, max_value):
return (round((reading - 250) / 65285 * max_value))
class Bubble:
def __init__(self, x, y, r, t):
self.xpos = x
self.ypos = y
self.radius = r
self.age = t
pot = ADC(1).read_u16()
bargraph_value = scale(pot, 6)
bartimer.init(freq=2.5, mode=Timer.PERIODIC, callback=update_leds)
# initialise bubbles
Bubbles = []
for i in range(0, 25):
r = random.randint(0, (3 + scale(pot, 10))) + 3
Bubbles.append(
Bubble(
random.randint(r, r + (width - 2 * r)),
random.randint(r, r + (height - 60 - 1 * r)) + 60,
r,
0
)
)
while True:
this_colour = 1
pot = ADC(1).read_u16()
litres = scale(pot, 500)
percentage = scale(pot, 100)
bargraph_value = scale(pot, 6)
if (bargraph_value < 2):
this_colour = 0
level = scale(pot, height-60)
max_bubbles = scale(pot, 30)
display.set_pen(40, 40, 40)
display.clear()
display.set_pen(Colours[this_colour].bubble)
display.rectangle(0,height - level - 2,width,2)
display.set_pen(Colours[this_colour].background)
display.rectangle(0,height - level,width,level)
display.set_pen(Colours[this_colour].text)
display.text ("Fresh water", 4, 4, 236, 2)
display.text ("{:} l".format(litres), 4, 24, 236, 4)
display.text ("{:.0f}%".format(percentage), 126, 18, 236, 5)
count = 0
for bubble in Bubbles:
bubble.ypos -= (0.07 * bubble.age * bubble.age)
ymin = bubble.radius + height - level
bubble.age += 1
count += 1
if bubble.ypos < ymin:
r = random.randint(0, (3 + scale(pot, 10))) + 3
bubble.radius = r
bubble.ypos = height - bubble.radius
bubble.xpos = random.randint(r, r + (height - 1 * r))
bubble.age = random.randint(0,4)
#display.set_pen(bubble.colour)
if (bubble.radius < level and count < max_bubbles):
display.set_pen(Colours[this_colour].bubble)
display.circle(int(bubble.xpos), int(bubble.ypos), int(bubble.radius))
display.set_pen(Colours[this_colour].fill)
display.circle(int(bubble.xpos), int(bubble.ypos), int(bubble.radius)-2)
# show warning message if very low
if (bargraph_value < 2):
display.set_pen(Colours[this_colour].text)
display.rectangle(centre-80,middle - 40,160,80)
display.set_pen(Colours[this_colour].background)
display.text ("Level very low", centre-70, middle-30, 140, 3)
display.update()
utime.sleep(0.01) | 27.056962 | 134 | 0.58924 | 596 | 4,275 | 4.137584 | 0.246644 | 0.036496 | 0.047445 | 0.056772 | 0.381995 | 0.306569 | 0.273723 | 0.260746 | 0.15734 | 0.103812 | 0 | 0.065415 | 0.277661 | 4,275 | 158 | 135 | 27.056962 | 0.733161 | 0.065965 | 0 | 0.245614 | 0 | 0 | 0.009296 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04386 | false | 0 | 0.035088 | 0.008772 | 0.105263 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d04a038c1e92544ff56323c4a1dd7a4c303bf95 | 2,069 | py | Python | telegram_upload/management.py | oxosec/telegram-upload | 0c68ab0a672695ff7f447b600bcb1afab509c16f | [
"MIT"
] | 1 | 2021-12-06T06:21:30.000Z | 2021-12-06T06:21:30.000Z | telegram_upload/management.py | oxosec/telegram-upload | 0c68ab0a672695ff7f447b600bcb1afab509c16f | [
"MIT"
] | null | null | null | telegram_upload/management.py | oxosec/telegram-upload | 0c68ab0a672695ff7f447b600bcb1afab509c16f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Console script for telegram-upload."""
import click
from telegram_upload.client import Client
from telegram_upload.config import default_config, CONFIG_FILE
@click.command()
@click.argument('files', nargs=-1)
@click.option('--to', default='me', help='Phone number, username, chat id or "me" (saved messages). By default "me".')
@click.option('--config', default=None, help='Configuration file to use. By default "{}".'.format(CONFIG_FILE))
@click.option('-d', '--delete-on-success', is_flag=True, help='Delete local file after successful upload.')
@click.option('--print-file-id', is_flag=True, help='Print the id of the uploaded file after the upload.')
def upload(files, to, config, delete_on_success, print_file_id):
"""Upload one or more files to Telegram using your personal account.
The maximum file size is 1.5 GiB and by default they will be saved in
your saved messages.
"""
client = Client(config or default_config())
client.start()
client.send_files(to, files, delete_on_success, print_file_id)
@click.command()
@click.option('--from', '-f', 'from_', default='me',
help='Phone number, username, chat id or "me" (saved messages). By default "me".')
@click.option('--config', default=None, help='Configuration file to use. By default "{}".'.format(CONFIG_FILE))
@click.option('-d', '--delete-on-success', is_flag=True,
help='Delete telegram message after successful download. Useful for creating a download queue.')
def download(from_, config, delete_on_success):
"""Download all the latest messages that are files in a chatt, by default download
from "saved messages". It is recommended to forward the files to download to
"saved messages" and use parameter ``--delete-on-success``. Forwarded messages will
be removed from the chat after downloading, such as a download queue.
"""
client = Client(config or default_config())
client.start()
messages = client.find_files(from_)
client.download_files(from_, messages, delete_on_success)
| 49.261905 | 118 | 0.712905 | 297 | 2,069 | 4.86532 | 0.306397 | 0.053287 | 0.072664 | 0.029066 | 0.36263 | 0.36263 | 0.326644 | 0.326644 | 0.265744 | 0.265744 | 0 | 0.002283 | 0.153214 | 2,069 | 41 | 119 | 50.463415 | 0.822489 | 0.254229 | 0 | 0.333333 | 0 | 0 | 0.343353 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.125 | 0 | 0.208333 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d054b66909405378783203c437caba234f28583 | 2,886 | py | Python | datapro/views.py | salmon-buck/salmon-buck-web | 465f8ca3fbff8578814a2a95a39d83dfd32749d7 | [
"MIT"
] | null | null | null | datapro/views.py | salmon-buck/salmon-buck-web | 465f8ca3fbff8578814a2a95a39d83dfd32749d7 | [
"MIT"
] | 8 | 2020-02-12T00:32:57.000Z | 2022-02-10T11:30:16.000Z | datapro/views.py | salmon-buck/salmon-buck-web | 465f8ca3fbff8578814a2a95a39d83dfd32749d7 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from .queryBoolean import *
from .synon import *
from .match import *
from .readDB import *
# import nltk
# Create your views here.
def home(request):
return render(request,'home.html',{'db':db})
def description_search(request):
query = request.GET['query1']
q_processed=preProcessing(query)
query=query.split()
candidate=synonyms(query, q_processed)
query_list=randomPick(candidate)
avgResult = match(query_list)
list = []
db_list = []
for item in range(5):
list.append(avgResult[item][0])
db_list.append(db[avgResult[item][0]])
original_data = []
for i in range(5):
f = open('static/recipe.txt','rt',encoding ='UTF8')
while True:
line = f.readline()
if not line: break
data = line.split(';')
data[2] = data[2].split('\\')
data[4] = data[4].split('\\')
data[5] = data[5].split(':')
del data[5][0]
for j in range(len(data[5])-1):
data[5][j] = data[5][j].strip()
data[5][j] = data[5][j][:-1]
# data[7] = data[7].strip()
if data[1].strip() in db_list[i]['name'].strip():
original_data.append(data)
print("yes")
f.close()
return render(request,'description_result.html',{'query':query,'avgResult':list,'db_list':db_list,'data':original_data})
def ingredient_search(request):
query = request.GET['query2']
candidate = FindN(query)
NAdata_name = NA_Ingredient_nameonly(candidate, db) # 소팅되지 않은 해당 음식들 전부
NAdata_all = NA_Ingredient_all(candidate, db) # 소팅되지 않은 음식들의 값 전부 계산
NAdata = NA_Ingredient(candidate, db) # 소팅된 음식들의 weight 빼고
NAdata_weight = NA_Ingredient_weight(candidate, db) # data에는 chicken과 onion이 포함된 커리만 출력됨 - ranking을 통해 5위까지 출력
db_list = []
for item in range(5):
db_list.append(db[NAdata[item]])
original_data = []
for i in range(5):
f = open('static/recipe.txt','rt',encoding ='UTF8')
while True:
line = f.readline()
if not line: break
data = line.split(';')
data[2] = data[2].split('\\')
data[4] = data[4].split('\\')
data[5] = data[5].split(':')
del data[5][0]
for j in range(len(data[5])-1):
data[5][j] = data[5][j].strip()
data[5][j] = data[5][j][:-1]
if data[1].strip() in db_list[i]['name'].strip():
original_data.append(data)
print("yes")
f.close()
return render(request,'ingredient_result.html',{'data':original_data,'db_list':db_list,'query':query,'candidate':candidate,'NAdata_name':NAdata_name,'NAdata_all':NAdata_all,'NAdata':NAdata,'NAdata_weight':NAdata_weight}) | 35.62963 | 224 | 0.567568 | 381 | 2,886 | 4.194226 | 0.262467 | 0.050063 | 0.030038 | 0.025031 | 0.43179 | 0.396746 | 0.396746 | 0.370463 | 0.370463 | 0.370463 | 0 | 0.02148 | 0.274082 | 2,886 | 81 | 224 | 35.62963 | 0.741289 | 0.060984 | 0 | 0.597015 | 0 | 0 | 0.085091 | 0.016648 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044776 | false | 0 | 0.074627 | 0.014925 | 0.164179 | 0.029851 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d07dcf425fa9e8ea83a695623428d809c25c23f | 2,510 | py | Python | thop/onnx_profile.py | jinmingyi1998/pytorch-OpCounter | 010d2857eeab7397615249f855342806aaa3bc04 | [
"MIT"
] | 3,444 | 2018-02-11T06:51:19.000Z | 2022-03-31T06:40:39.000Z | thop/onnx_profile.py | jinmingyi1998/pytorch-OpCounter | 010d2857eeab7397615249f855342806aaa3bc04 | [
"MIT"
] | 143 | 2018-11-18T03:23:07.000Z | 2022-03-31T01:52:16.000Z | thop/onnx_profile.py | jinmingyi1998/pytorch-OpCounter | 010d2857eeab7397615249f855342806aaa3bc04 | [
"MIT"
] | 475 | 2018-02-22T07:50:36.000Z | 2022-03-26T10:26:29.000Z | import torch
import torch.nn
import onnx
from onnx import numpy_helper
import numpy as np
from thop.vision.onnx_counter import onnx_operators
class OnnxProfile():
def __init__(self) -> None:
pass
def calculate_params(self, model: onnx.ModelProto):
onnx_weights = model.graph.initializer
params = 0
for onnx_w in onnx_weights:
try:
weight = numpy_helper.to_array(onnx_w)
params += np.prod(weight.shape)
except Exception as _:
pass
return params
def create_dict(self, weight, input, output):
diction = {}
for w in weight:
dim = np.array(w.dims)
diction[str(w.name)] = dim
if (dim.size == 1):
diction[str(w.name)] = np.append(1, dim)
for i in input:
# print(i.type.tensor_type.shape.dim[0].dim_value)
dim = np.array(i.type.tensor_type.shape.dim[0].dim_value)
# print(i.type.tensor_type.shape.dim.__sizeof__())
#name2dims[str(i.name)] = [dim]
dim = []
for key in i.type.tensor_type.shape.dim:
dim = np.append(dim, int(key.dim_value))
# print(key.dim_value)
# print(dim)
diction[str(i.name)] = dim
if(dim.size == 1):
diction[str(i.name)] = np.append(1, dim)
for o in output:
dim = np.array(o.type.tensor_type.shape.dim[0].dim_value)
diction[str(o.name)] = [dim]
if(dim.size == 1):
diction[str(o.name)] = np.append(1, dim)
return diction
def nodes_counter(self, diction, node):
if node.op_type not in onnx_operators:
print("Sorry, we haven't add ", node.op_type, "into dictionary.")
return 0, None, None
else:
fn = onnx_operators[node.op_type]
return fn(diction, node)
def calculate_macs(self, model: onnx.ModelProto) -> torch.DoubleTensor:
macs = 0
name2dims = {}
weight = model.graph.initializer
nodes = model.graph.node
input = model.graph.input
output = model.graph.output
name2dims = self.create_dict(weight, input, output)
macs = 0
for n in nodes:
macs_adding, out_size, outname = self.nodes_counter(name2dims, n)
name2dims[outname] = out_size
macs += macs_adding
return np.array(macs[0])
| 33.026316 | 77 | 0.558566 | 322 | 2,510 | 4.220497 | 0.254658 | 0.04415 | 0.051508 | 0.069904 | 0.210449 | 0.198676 | 0.15379 | 0.129507 | 0.047093 | 0 | 0 | 0.01135 | 0.333068 | 2,510 | 75 | 78 | 33.466667 | 0.800478 | 0.063347 | 0 | 0.114754 | 0 | 0 | 0.016205 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081967 | false | 0.032787 | 0.098361 | 0 | 0.278689 | 0.016393 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d0810c284455d1bdbfe3bcdcad7c1f75dcc3159 | 7,220 | py | Python | second/data/dataset.py | zhb0920/second.pytorch | f980f3d18749b7a2830983222d1695b5cb321dae | [
"MIT"
] | null | null | null | second/data/dataset.py | zhb0920/second.pytorch | f980f3d18749b7a2830983222d1695b5cb321dae | [
"MIT"
] | null | null | null | second/data/dataset.py | zhb0920/second.pytorch | f980f3d18749b7a2830983222d1695b5cb321dae | [
"MIT"
] | null | null | null | import pathlib
import pickle
import time
from functools import partial
import numpy as np
from second.core import box_np_ops
from second.core import preprocess as prep
from second.data import kitti_common as kitti
from second.utils.eval import get_coco_eval_result, get_official_eval_result
class Dataset(object):
"""An abstract class representing a pytorch-like Dataset.
All other datasets should subclass it. All subclasses should override
``__len__``, that provides the size of the dataset, and ``__getitem__``,
supporting integer indexing in range from 0 to len(self) exclusive.
"""
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
class KittiDataset(Dataset):
def __init__(self, info_path, root_path, num_point_features,
target_assigner, feature_map_size, prep_func):
with open(info_path, 'rb') as f:
infos = pickle.load(f)
#self._kitti_infos = kitti.filter_infos_by_used_classes(infos, class_names)
self._root_path = root_path
self._kitti_infos = infos
self._num_point_features = num_point_features
print("remain number of infos:", len(self._kitti_infos))
# generate anchors cache
ret = target_assigner.generate_anchors(feature_map_size)
self._class_names = target_assigner.classes
anchors_dict = target_assigner.generate_anchors_dict(feature_map_size)
anchors = ret["anchors"]
anchors = anchors.reshape([-1, 7])
matched_thresholds = ret["matched_thresholds"]
unmatched_thresholds = ret["unmatched_thresholds"]
anchors_bv = box_np_ops.rbbox2d_to_near_bbox(
anchors[:, [0, 1, 3, 4, 6]])
anchor_cache = {
"anchors": anchors,
"anchors_bv": anchors_bv,
"matched_thresholds": matched_thresholds,
"unmatched_thresholds": unmatched_thresholds,
"anchors_dict": anchors_dict,
}
self._prep_func = partial(prep_func, anchor_cache=anchor_cache)
def __len__(self):
return len(self._kitti_infos)
@property
def ground_truth_annotations(self):
"""
If you want to eval by my eval function, you must
provide this property.
ground_truth_annotations format:
{
bbox: [N, 4], if you fill fake data, MUST HAVE >25 HEIGHT!!!!!!
alpha: [N], you can use zero.
occluded: [N], you can use zero.
truncated: [N], you can use zero.
name: [N]
location: [N, 3] center of 3d box.
dimensions: [N, 3] dim of 3d box.
rotation_y: [N] angle.
}
all fields must be filled, but some fields can fill
zero.
"""
if "annos" not in self._kitti_infos[0]:
return None
gt_annos = [info["annos"] for info in self._kitti_infos]
return gt_annos
def evaluation(self, dt_annos):
"""dt_annos have same format as ground_truth_annotations.
When you want to eval your own dataset, you MUST set correct
the z axis and box z center.
"""
gt_annos = self.ground_truth_annotations
if gt_annos is None:
return None, None
z_axis = 1 # KITTI camera format use y as regular "z" axis.
z_center = 1.0 # KITTI camera box's center is [0.5, 1, 0.5]
# for regular raw lidar data, z_axis = 2, z_center = 0.5.
result_official = get_official_eval_result(
gt_annos,
dt_annos,
self._class_names,
z_axis=z_axis,
z_center=z_center)
result_coco = get_coco_eval_result(
gt_annos,
dt_annos,
self._class_names,
z_axis=z_axis,
z_center=z_center)
return result_official, result_coco
def __getitem__(self, idx):
"""
you need to create a input dict in this function for network inference.
format: {
anchors
voxels
num_points
coordinates
ground_truth: {
gt_boxes
gt_names
[optional]difficulty
[optional]group_ids
}
[optional]anchors_mask, slow in SECOND v1.5, don't use this.
[optional]metadata, in kitti, image index is saved in metadata
}
"""
info = self._kitti_infos[idx]
kitti.convert_to_kitti_info_version2(info)
pc_info = info["point_cloud"]
if "points" not in pc_info:
velo_path = pathlib.Path(pc_info['velodyne_path'])
if not velo_path.is_absolute():
velo_path = pathlib.Path(self._root_path) / pc_info['velodyne_path']
velo_reduced_path = velo_path.parent.parent / (
velo_path.parent.stem + '_reduced') / velo_path.name
if velo_reduced_path.exists():
velo_path = velo_reduced_path
points = np.fromfile(
str(velo_path), dtype=np.float32,
count=-1).reshape([-1, self._num_point_features])
input_dict = {
'points': points,
}
if "image" in info:
input_dict["image"] = info["image"]
if "calib" in info:
calib = info["calib"]
calib_dict = {
'rect': calib['R0_rect'],
'Trv2c': calib['Tr_velo_to_cam'],
'P2': calib['P2'],
}
input_dict["calib"] = calib_dict
if 'annos' in info:
annos = info['annos']
annos_dict = {}
# we need other objects to avoid collision when sample
annos = kitti.remove_dontcare(annos)
loc = annos["location"]
dims = annos["dimensions"]
rots = annos["rotation_y"]
gt_names = annos["name"]
gt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]],
axis=1).astype(np.float32)
if "calib" in info:
calib = info["calib"]
gt_boxes = box_np_ops.box_camera_to_lidar(
gt_boxes, calib["R0_rect"], calib["Tr_velo_to_cam"])
# only center format is allowed. so we need to convert
# kitti [0.5, 0.5, 0] center to [0.5, 0.5, 0.5]
box_np_ops.change_box3d_center_(gt_boxes, [0.5, 0.5, 0], [0.5, 0.5, 0.5])
gt_dict = {
'gt_boxes': gt_boxes,
'gt_names': gt_names,
}
if 'difficulty' in annos:
gt_dict['difficulty'] = annos["difficulty"]
if 'group_ids' in annos:
gt_dict['group_ids'] = annos["group_ids"]
input_dict["ground_truth"] = gt_dict
example = self._prep_func(input_dict=input_dict)
example["metadata"] = {}
if "image" in info:
example["metadata"]["image"] = input_dict["image"]
if "anchors_mask" in example:
example["anchors_mask"] = example["anchors_mask"].astype(np.uint8)
return example
| 38 | 89 | 0.57867 | 888 | 7,220 | 4.429054 | 0.266892 | 0.006611 | 0.006102 | 0.006102 | 0.080854 | 0.048309 | 0.045258 | 0.031528 | 0.031528 | 0.031528 | 0 | 0.013832 | 0.329086 | 7,220 | 189 | 90 | 38.201058 | 0.798101 | 0.229501 | 0 | 0.15873 | 0 | 0 | 0.094246 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.071429 | 0.007937 | 0.190476 | 0.007937 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d087726cb32c9e890cb6ee72e5cfbf8363dee31 | 485 | py | Python | chapter_02/coin_flips.py | rkneusel9/MathForDeepLearning | 8db1a85ce3cef4b48aab01ebe156e3fab2dfa271 | [
"MIT"
] | 23 | 2021-10-12T19:53:35.000Z | 2022-03-29T12:41:23.000Z | chapter_02/coin_flips.py | mohit-n-rajput/MathForDeepLearning | 8db1a85ce3cef4b48aab01ebe156e3fab2dfa271 | [
"MIT"
] | null | null | null | chapter_02/coin_flips.py | mohit-n-rajput/MathForDeepLearning | 8db1a85ce3cef4b48aab01ebe156e3fab2dfa271 | [
"MIT"
] | 7 | 2021-06-16T17:21:41.000Z | 2022-03-16T09:22:50.000Z | #
# file: coin_flips.py
#
# Probability of getting 0,1,2, or 3 heads
# in three flips of a coin.
#
# RTK, 05-Jun-2020
# Last update: 05-Jun-2020
#
################################################################
import numpy as np
N = 1000000
M = 4
heads = np.zeros(M+1)
for i in range(N):
flips = np.random.randint(0,2,M)
h, _ = np.bincount(flips, minlength=2)
heads[h] += 1
prob = heads / N
print()
print("Probabilities: %s" % np.array2string(prob))
print()
| 16.166667 | 64 | 0.540206 | 73 | 485 | 3.561644 | 0.60274 | 0.038462 | 0.069231 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.076726 | 0.193814 | 485 | 29 | 65 | 16.724138 | 0.588235 | 0.274227 | 0 | 0.166667 | 0 | 0 | 0.061594 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.083333 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d0efff53cf20687e5c7ee18a8e769a3d7bab64a | 11,199 | py | Python | src/python/pants/rules/core/test.py | rahuliyer95/pants | 50ee5cc8bd9ab40ad13c3c28ccbc4e7f189292ec | [
"Apache-2.0"
] | null | null | null | src/python/pants/rules/core/test.py | rahuliyer95/pants | 50ee5cc8bd9ab40ad13c3c28ccbc4e7f189292ec | [
"Apache-2.0"
] | null | null | null | src/python/pants/rules/core/test.py | rahuliyer95/pants | 50ee5cc8bd9ab40ad13c3c28ccbc4e7f189292ec | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import itertools
import logging
from abc import ABC, abstractmethod
from dataclasses import dataclass
from enum import Enum
from pathlib import PurePath
from typing import Iterable, Optional, Type
from pants.base.exiter import PANTS_FAILED_EXIT_CODE, PANTS_SUCCEEDED_EXIT_CODE
from pants.build_graph.address import Address
from pants.engine import desktop
from pants.engine.console import Console
from pants.engine.fs import Digest, DirectoryToMaterialize, Workspace
from pants.engine.goal import Goal, GoalSubsystem
from pants.engine.interactive_runner import InteractiveProcessRequest, InteractiveRunner
from pants.engine.isolated_process import FallibleExecuteProcessResult
from pants.engine.legacy.graph import HydratedTargetsWithOrigins
from pants.engine.legacy.structs import TargetAdaptorWithOrigin
from pants.engine.objects import union
from pants.engine.rules import UnionMembership, goal_rule, rule
from pants.engine.selectors import Get, MultiGet
# TODO(#6004): use proper Logging singleton, rather than static logger.
logger = logging.getLogger(__name__)
class Status(Enum):
SUCCESS = "SUCCESS"
FAILURE = "FAILURE"
@dataclass(frozen=True)
class TestResult:
status: Status
stdout: str
stderr: str
coverage_data: Optional["CoverageData"] = None
# Prevent this class from being detected by pytest as a test class.
__test__ = False
@staticmethod
def from_fallible_execute_process_result(
process_result: FallibleExecuteProcessResult,
*,
coverage_data: Optional["CoverageData"] = None,
) -> "TestResult":
return TestResult(
status=Status.SUCCESS if process_result.exit_code == 0 else Status.FAILURE,
stdout=process_result.stdout.decode(),
stderr=process_result.stderr.decode(),
coverage_data=coverage_data,
)
@dataclass(frozen=True)
class TestDebugRequest:
ipr: InteractiveProcessRequest
# Prevent this class from being detected by pytest as a test class.
__test__ = False
@union
@dataclass(frozen=True) # type: ignore[misc] # https://github.com/python/mypy/issues/5374
class TestRunner(ABC):
adaptor_with_origin: TargetAdaptorWithOrigin
__test__ = False
@staticmethod
@abstractmethod
def is_valid_target(_: TargetAdaptorWithOrigin) -> bool:
"""Return True if the test runner can meaningfully operate on this target."""
# NB: This is only used for the sake of coordinator_of_tests. Consider inlining that rule so that
# we can remove this wrapper type.
@dataclass(frozen=True)
class WrappedTestRunner:
runner: TestRunner
@dataclass(frozen=True)
class AddressAndTestResult:
address: Address
test_result: TestResult
class CoverageData(ABC):
"""Base class for inputs to a coverage report.
Subclasses should add whichever fields they require - snapshots of coverage output or xml files, etc.
"""
@property
@abstractmethod
def batch_cls(self) -> Type["CoverageDataBatch"]:
pass
@union
class CoverageDataBatch:
pass
class CoverageReport(ABC):
"""Represents a code coverage report that can be materialized to the terminal or disk."""
def materialize(self, console: Console, workspace: Workspace) -> Optional[PurePath]:
"""Materialize this code coverage report to the terminal or disk.
:param console: A handle to the terminal.
:param workspace: A handle to local disk.
:return: If a report was materialized to disk, the path of the file in the report one might
open first to start examining the report.
"""
...
@dataclass(frozen=True)
class ConsoleCoverageReport(CoverageReport):
"""Materializes a code coverage report to the terminal."""
report: str
def materialize(self, console: Console, workspace: Workspace) -> Optional[PurePath]:
console.print_stdout(f"\n{self.report}")
return None
@dataclass(frozen=True)
class FilesystemCoverageReport(CoverageReport):
"""Materializes a code coverage report to disk."""
result_digest: Digest
directory_to_materialize_to: PurePath
report_file: Optional[PurePath]
def materialize(self, console: Console, workspace: Workspace) -> Optional[PurePath]:
workspace.materialize_directory(
DirectoryToMaterialize(
self.result_digest, path_prefix=str(self.directory_to_materialize_to),
)
)
console.print_stdout(f"\nWrote coverage report to `{self.directory_to_materialize_to}`")
return self.report_file
class TestOptions(GoalSubsystem):
"""Runs tests."""
name = "test"
required_union_implementations = (TestRunner,)
# Prevent this class from being detected by pytest as a test class.
__test__ = False
@classmethod
def register_options(cls, register) -> None:
super().register_options(register)
register(
"--debug",
type=bool,
default=False,
help="Run a single test target in an interactive process. This is necessary, for "
"example, when you add breakpoints in your code.",
)
register(
"--run-coverage",
type=bool,
default=False,
help="Generate a coverage report for this test run.",
)
register(
"--open-coverage",
type=bool,
default=False,
help="If a coverage report file is generated, open it on the local system if the "
"system supports this.",
)
class Test(Goal):
subsystem_cls = TestOptions
__test__ = False
@goal_rule
async def run_tests(
console: Console,
options: TestOptions,
runner: InteractiveRunner,
targets_with_origins: HydratedTargetsWithOrigins,
workspace: Workspace,
union_membership: UnionMembership,
) -> Test:
test_runners: Iterable[Type[TestRunner]] = union_membership.union_rules[TestRunner]
if options.values.debug:
target_with_origin = targets_with_origins.expect_single()
adaptor_with_origin = TargetAdaptorWithOrigin.create(
target_with_origin.target.adaptor, target_with_origin.origin
)
address = adaptor_with_origin.adaptor.address
valid_test_runners = [
test_runner
for test_runner in test_runners
if test_runner.is_valid_target(adaptor_with_origin)
]
if not valid_test_runners:
raise ValueError(f"No valid test runner for {address}.")
if len(valid_test_runners) > 1:
raise ValueError(
f"Multiple possible test runners for {address} "
f"({', '.join(test_runner.__name__ for test_runner in valid_test_runners)})."
)
test_runner = valid_test_runners[0]
logger.info(f"Starting test in debug mode: {address.reference()}")
request = await Get[TestDebugRequest](TestRunner, test_runner(adaptor_with_origin))
debug_result = runner.run_local_interactive_process(request.ipr)
return Test(debug_result.process_exit_code)
adaptors_with_origins = tuple(
TargetAdaptorWithOrigin.create(target_with_origin.target.adaptor, target_with_origin.origin)
for target_with_origin in targets_with_origins
if target_with_origin.target.adaptor.has_sources()
)
results = await MultiGet(
Get[AddressAndTestResult](
WrappedTestRunner, WrappedTestRunner(test_runner(adaptor_with_origin))
)
for adaptor_with_origin in adaptors_with_origins
for test_runner in test_runners
if test_runner.is_valid_target(adaptor_with_origin)
)
did_any_fail = False
for result in results:
if result.test_result.status == Status.FAILURE:
did_any_fail = True
if result.test_result.stdout:
console.write_stdout(
f"{result.address.reference()} stdout:\n{result.test_result.stdout}\n"
)
if result.test_result.stderr:
# NB: we write to stdout, rather than to stderr, to avoid potential issues interleaving
# the two streams.
console.write_stdout(
f"{result.address.reference()} stderr:\n{result.test_result.stderr}\n"
)
console.write_stdout("\n")
for result in results:
console.print_stdout(
f"{result.address.reference():80}.....{result.test_result.status.value:>10}"
)
if did_any_fail:
console.print_stderr(console.red("\nTests failed"))
exit_code = PANTS_FAILED_EXIT_CODE
else:
exit_code = PANTS_SUCCEEDED_EXIT_CODE
if options.values.run_coverage:
# TODO: consider warning if a user uses `--coverage` but the language backend does not
# provide coverage support. This might be too chatty to be worth doing?
results_with_coverage = [x for x in results if x.test_result.coverage_data is not None]
coverage_data_collections = itertools.groupby(
results_with_coverage,
lambda address_and_test_result: address_and_test_result.test_result.coverage_data.batch_cls, # type: ignore[union-attr]
)
coverage_reports = await MultiGet(
Get[CoverageReport](
CoverageDataBatch, coverage_batch_cls(tuple(addresses_and_test_results)) # type: ignore[call-arg]
)
for coverage_batch_cls, addresses_and_test_results in coverage_data_collections
)
coverage_report_files = []
for report in coverage_reports:
report_file = report.materialize(console, workspace)
if report_file is not None:
coverage_report_files.append(report_file)
if coverage_report_files and options.values.open_coverage:
desktop.ui_open(console, runner, coverage_report_files)
return Test(exit_code)
@rule
async def coordinator_of_tests(wrapped_test_runner: WrappedTestRunner) -> AddressAndTestResult:
test_runner = wrapped_test_runner.runner
adaptor_with_origin = test_runner.adaptor_with_origin
adaptor = adaptor_with_origin.adaptor
# TODO(#6004): when streaming to live TTY, rely on V2 UI for this information. When not a
# live TTY, periodically dump heavy hitters to stderr. See
# https://github.com/pantsbuild/pants/issues/6004#issuecomment-492699898.
logger.info(f"Starting tests: {adaptor.address.reference()}")
# NB: This has the effect of "casting" a TargetAdaptorWithOrigin to a member of the TestTarget
# union. If the adaptor is not a member of the union, the engine will fail at runtime with a
# useful error message.
result = await Get[TestResult](TestRunner, test_runner)
logger.info(
f"Tests {'succeeded' if result.status == Status.SUCCESS else 'failed'}: "
f"{adaptor.address.reference()}"
)
return AddressAndTestResult(adaptor.address, result)
def rules():
return [coordinator_of_tests, run_tests]
| 34.671827 | 132 | 0.696044 | 1,322 | 11,199 | 5.707262 | 0.241301 | 0.023857 | 0.021869 | 0.019085 | 0.184228 | 0.135454 | 0.113453 | 0.090126 | 0.090126 | 0.063883 | 0 | 0.004508 | 0.227431 | 11,199 | 322 | 133 | 34.779503 | 0.867545 | 0.180284 | 0 | 0.178571 | 0 | 0 | 0.112128 | 0.040684 | 0 | 0 | 0 | 0.006211 | 0 | 1 | 0.035714 | false | 0.008929 | 0.089286 | 0.008929 | 0.316964 | 0.017857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d0f1a68f120b19635334d1a413af52afe938899 | 966 | py | Python | Cipher/encoder.py | Rekid46/Python-Games | 7b4897ad5c6eccf855bb8c1f3bfa6d9e6596fc63 | [
"Apache-2.0"
] | 1 | 2021-07-15T08:15:51.000Z | 2021-07-15T08:15:51.000Z | Cipher/encoder.py | Rekid46/Python-Games | 7b4897ad5c6eccf855bb8c1f3bfa6d9e6596fc63 | [
"Apache-2.0"
] | null | null | null | Cipher/encoder.py | Rekid46/Python-Games | 7b4897ad5c6eccf855bb8c1f3bfa6d9e6596fc63 | [
"Apache-2.0"
] | null | null | null | alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z','a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
def ceaser(msg,shift,action):
""" Turn a msg into a cipher text"""
end_msg=""
if action == "decode":
shift *= -1
for letter in msg:
if letter in alphabet:
i=alphabet.index(letter)
new_i=i+shift
end_msg += alphabet[new_i]
else:
end_msg += letter
print(f"The {action}d message is: {end_msg}")
while True:
action=input("Type 'encode' to encrypt and 'decode' to decrypt:\n")
msg=input("Type your message:\n")
shift=int(input("Type the shift number:\n"))
shift=shift%26
ceaser(msg,shift,action)
result=input("continue?")
if result=="no":
print("Goodbye")
break
| 35.777778 | 270 | 0.473085 | 149 | 966 | 3.026846 | 0.456376 | 0.053215 | 0.013304 | 0.017738 | 0.115299 | 0.115299 | 0.115299 | 0.115299 | 0.115299 | 0.115299 | 0 | 0.004225 | 0.26501 | 966 | 26 | 271 | 37.153846 | 0.630986 | 0.030021 | 0 | 0 | 0 | 0 | 0.221505 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0 | 0 | 0.043478 | 0.086957 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d1157cd4adc923208f14debdd890ffa23e34a4d | 3,291 | py | Python | src/stations/istation.py | airalab/sensors-connectivity | a7eddb3cd0fa2714ff3b96487ad359fe381881ea | [
"BSD-3-Clause"
] | 5 | 2020-06-24T09:10:05.000Z | 2021-02-10T16:45:27.000Z | src/stations/istation.py | airalab/sensors-connectivity | a7eddb3cd0fa2714ff3b96487ad359fe381881ea | [
"BSD-3-Clause"
] | 13 | 2020-05-01T15:30:13.000Z | 2022-03-16T15:38:18.000Z | src/stations/istation.py | airalab/sensors-connectivity | a7eddb3cd0fa2714ff3b96487ad359fe381881ea | [
"BSD-3-Clause"
] | 7 | 2020-05-26T12:34:57.000Z | 2021-12-29T10:36:38.000Z | # This is an interface for a station
import time
import netifaces
from datetime import timedelta
import json
from dataclasses import dataclass
import rospy
import threading
import copy
STATION_VERSION = "v0.8.0"
thlock = threading.RLock()
class Measurement():
"""
Represents a single measurement
"""
def __init__(self, public: str, model: int, geo_lat: float, geo_lon: float, measurement: dict):
self.public = public
self.model = model
self.geo_lat = geo_lat
self.geo_lon = geo_lon
self.measurement = measurement
def measurement_check(self) -> dict:
with thlock:
data_copy = copy.deepcopy(self.measurement)
for key, value in data_copy.items():
if value is None:
del self.measurement[key]
return self.measurement
def __str__(self):
return f"{{Public: {self.public}, geo: ({self.geo_lat},{self.geo_lon}), measurements: {self.measurement_check()}}}"
@dataclass
class StationData:
"""
It's a wrapper for a measurement with some additional information
"""
version: str
mac: str
uptime: float
measurement: Measurement
def __str__(self):
uptime = str(timedelta(seconds=self.uptime))
return f"{{MAC: {self.mac}, Uptime: {uptime}, M: {self.measurement}}}"
def __repr__(self):
uptime = str(timedelta(seconds=self.uptime))
return f"{{MAC: {self.mac}, Uptime: {uptime}, M: {self.measurement}}}"
def _get_mac() -> str:
for interface in netifaces.interfaces():
if interface != "lo":
if 17 in netifaces.ifaddresses(interface):
_i = netifaces.ifaddresses(interface)
_i = _i[17][0]["addr"]
break
mac = _i.replace(":", "")
return mac
class IStation:
"""
Station is an input/source of data
station1 \ / output1
station2 - sensors-connectivity - output2
station3 / \ output3
Every station must implement `get_data()` method.
Keep in mind `get_data()` can be called more often than actual data arrives.
A good practice is to have a thread for data reading and a variable that keeps last record.
Have a look at COMStation and HTTPStation implementation.
"""
def __init__(self, config: dict):
"""
The station is responsible for its own settings
:param config: configuration dictionary
"""
self.config = config
self.version = STATION_VERSION
self.start_time = time.time()
self.mac_address = _get_mac()
def __str__(self):
return f"{{Version: {self.version}, Start: {self.start_time}, MAC: {self.mac_address}}}"
def get_data(self) -> [StationData]:
"""
Must return a new record of data or last measured data
Depending on a configuration file this method could be called
more often than new data is received
:return: StationData object
"""
return [StationData(
self.version,
self.mac_address,
time.time() - self.start_time,
Measurement()
)]
__all__ = ["IStation", "Measurement", "StationData"]
| 26.97541 | 123 | 0.610453 | 385 | 3,291 | 5.064935 | 0.363636 | 0.053846 | 0.027692 | 0.013333 | 0.144615 | 0.089231 | 0.089231 | 0.089231 | 0.089231 | 0.089231 | 0 | 0.006001 | 0.291097 | 3,291 | 121 | 124 | 27.198347 | 0.829833 | 0.259192 | 0 | 0.111111 | 0 | 0.031746 | 0.151356 | 0.026247 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.126984 | 0.031746 | 0.492063 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d150ee8e5e66bd6f777d79b38db3b4fdfc7b615 | 8,948 | py | Python | parsons/databases/postgres/postgres_core.py | cmc333333/parsons | 50804a3627117797570f1e9233c9bbad583f7831 | [
"Apache-2.0"
] | 3 | 2019-09-05T16:57:15.000Z | 2019-10-01T19:56:58.000Z | parsons/databases/postgres/postgres_core.py | cmc333333/parsons | 50804a3627117797570f1e9233c9bbad583f7831 | [
"Apache-2.0"
] | 22 | 2019-09-03T13:23:37.000Z | 2019-10-03T20:32:48.000Z | parsons/databases/postgres/postgres_core.py | cmc333333/parsons | 50804a3627117797570f1e9233c9bbad583f7831 | [
"Apache-2.0"
] | 2 | 2019-09-01T18:30:10.000Z | 2019-10-03T20:07:46.000Z | from contextlib import contextmanager
import psycopg2
import psycopg2.extras
from parsons.etl.table import Table
from parsons.utilities import files
import pickle
import petl
import logging
from parsons.databases.postgres.postgres_create_statement import PostgresCreateStatement
# Max number of rows that we query at a time, so we can avoid loading huge
# data sets into memory.
# 100k rows per batch at ~1k bytes each = ~100MB per batch.
QUERY_BATCH_SIZE = 100000
logger = logging.getLogger(__name__)
class PostgresCore(PostgresCreateStatement):
@contextmanager
def connection(self):
"""
Generate a Postgres connection.
The connection is set up as a python "context manager", so it will be closed
automatically (and all queries committed) when the connection goes out of scope.
When using the connection, make sure to put it in a ``with`` block (necessary for
any context manager):
``with pg.connection() as conn:``
`Returns:`
Psycopg2 `connection` object
"""
# Create a psycopg2 connection and cursor
conn = psycopg2.connect(user=self.username, password=self.password,
host=self.host, dbname=self.db, port=self.port,
connect_timeout=self.timeout)
try:
yield conn
except psycopg2.Error:
conn.rollback()
raise
else:
conn.commit()
finally:
conn.close()
@contextmanager
def cursor(self, connection):
cur = connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
try:
yield cur
finally:
cur.close()
def query(self, sql, parameters=None):
"""
Execute a query against the database. Will return ``None`` if the query returns zero rows.
To include python variables in your query, it is recommended to pass them as parameters,
following the `psycopg style <http://initd.org/psycopg/docs/usage.html#passing-parameters-to-sql-queries>`_.
Using the ``parameters`` argument ensures that values are escaped properly, and avoids SQL
injection attacks.
**Parameter Examples**
.. code-block:: python
# Note that the name contains a quote, which could break your query if not escaped
# properly.
name = "Beatrice O'Brady"
sql = "SELECT * FROM my_table WHERE name = %s"
rs.query(sql, parameters=[name])
.. code-block:: python
names = ["Allen Smith", "Beatrice O'Brady", "Cathy Thompson"]
placeholders = ', '.join('%s' for item in names)
sql = f"SELECT * FROM my_table WHERE name IN ({placeholders})"
rs.query(sql, parameters=names)
`Args:`
sql: str
A valid SQL statement
parameters: list
A list of python variables to be converted into SQL values in your query
`Returns:`
Parsons Table
See :ref:`parsons-table` for output options.
""" # noqa: E501
with self.connection() as connection:
return self.query_with_connection(sql, connection, parameters=parameters)
def query_with_connection(self, sql, connection, parameters=None, commit=True):
"""
Execute a query against the database, with an existing connection. Useful for batching
queries together. Will return ``None`` if the query returns zero rows.
`Args:`
sql: str
A valid SQL statement
connection: obj
A connection object obtained from ``redshift.connection()``
parameters: list
A list of python variables to be converted into SQL values in your query
commit: boolean
Whether to commit the transaction immediately. If ``False`` the transaction will
be committed when the connection goes out of scope and is closed (or you can
commit manually with ``connection.commit()``).
`Returns:`
Parsons Table
See :ref:`parsons-table` for output options.
"""
with self.cursor(connection) as cursor:
logger.debug(f'SQL Query: {sql}')
cursor.execute(sql, parameters)
if commit:
connection.commit()
# If the cursor is empty, don't cause an error
if not cursor.description:
logger.debug('Query returned 0 rows')
return None
else:
# Fetch the data in batches, and "pickle" the rows to a temp file.
# (We pickle rather than writing to, say, a CSV, so that we maintain
# all the type information for each field.)
temp_file = files.create_temp_file()
with open(temp_file, 'wb') as f:
# Grab the header
header = [i[0] for i in cursor.description]
pickle.dump(header, f)
while True:
batch = cursor.fetchmany(QUERY_BATCH_SIZE)
if not batch:
break
logger.debug(f'Fetched {len(batch)} rows.')
for row in batch:
pickle.dump(list(row), f)
# Load a Table from the file
final_tbl = Table(petl.frompickle(temp_file))
logger.debug(f'Query returned {final_tbl.num_rows} rows.')
return final_tbl
def _create_table_precheck(self, connection, table_name, if_exists):
"""
Helper to determine what to do when you need a table that may already exist.
`Args:`
connection: obj
A connection object obtained from ``redshift.connection()``
table_name: str
The table to check
if_exists: str
If the table already exists, either ``fail``, ``append``, ``drop``,
or ``truncate`` the table.
`Returns:`
bool
True if the table needs to be created, False otherwise.
"""
if if_exists not in ['fail', 'truncate', 'append', 'drop']:
raise ValueError("Invalid value for `if_exists` argument")
# If the table exists, evaluate the if_exists argument for next steps.
if self.table_exists_with_connection(table_name, connection):
if if_exists == 'fail':
raise ValueError('Table already exists.')
if if_exists == 'truncate':
truncate_sql = f"TRUNCATE TABLE {table_name};"
logger.info(f"Truncating {table_name}.")
self.query_with_connection(truncate_sql, connection, commit=False)
if if_exists == 'drop':
logger.info(f"Dropping {table_name}.")
drop_sql = f"DROP TABLE {table_name};"
self.query_with_connection(drop_sql, connection, commit=False)
return True
return False
else:
return True
def table_exists(self, table_name, view=True):
"""
Check if a table or view exists in the database.
`Args:`
table_name: str
The table name and schema (e.g. ``myschema.mytable``).
view: boolean
Check to see if a view exists by the same name
`Returns:`
boolean
``True`` if the table exists and ``False`` if it does not.
"""
with self.connection() as connection:
return self.table_exists_with_connection(table_name, connection, view)
def table_exists_with_connection(self, table_name, connection, view=True):
# Extract the table and schema from this. If no schema is detected then
# will default to the public schema.
try:
schema, table = table_name.lower().split('.', 1)
except ValueError:
schema, table = "public", table_name.lower()
with self.cursor(connection) as cursor:
# Check in pg tables for the table
sql = f"""select count(*) from pg_tables where schemaname='{schema}' and
tablename='{table}';"""
cursor.execute(sql)
result = cursor.fetchone()[0]
# Check in the pg_views if it is a view
if view:
sql = f"""select count(*) from pg_views where schemaname='{schema}' and
viewname='{table}';"""
cursor.execute(sql)
result += cursor.fetchone()[0]
# If in either, return boolean
if result >= 1:
return True
else:
return False
| 35.367589 | 116 | 0.571636 | 1,028 | 8,948 | 4.904669 | 0.288911 | 0.02499 | 0.015073 | 0.013685 | 0.231257 | 0.223324 | 0.166997 | 0.140817 | 0.08925 | 0.04998 | 0 | 0.004973 | 0.348234 | 8,948 | 252 | 117 | 35.507937 | 0.859568 | 0.418082 | 0 | 0.22 | 0 | 0 | 0.113746 | 0.009223 | 0 | 0 | 0 | 0 | 0 | 1 | 0.07 | false | 0.01 | 0.09 | 0 | 0.26 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d1606d474de459e210bb683d5ab3b391d786192 | 18,515 | py | Python | m_theory/dim4/so8/src/analysis.py | ParikhKadam/google-research | 00a282388e389e09ce29109eb050491c96cfab85 | [
"Apache-2.0"
] | null | null | null | m_theory/dim4/so8/src/analysis.py | ParikhKadam/google-research | 00a282388e389e09ce29109eb050491c96cfab85 | [
"Apache-2.0"
] | 110 | 2021-10-01T18:22:38.000Z | 2021-12-27T22:08:31.000Z | m_theory/dim4/so8/src/analysis.py | admariner/google-research | 7cee4b22b925581d912e8d993625c180da2a5a4f | [
"Apache-2.0"
] | 1 | 2022-02-10T10:43:10.000Z | 2022-02-10T10:43:10.000Z | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SO(8) gauged D=4 supergravity.
Usage: python3 -i -m dim4.so8.src.analysis
"""
# Naming deviates from PEP-8 conventions where this makes mathematics easier
# to read. Also, local variables may name-match module-global definitions.
# pylint:disable=invalid-name
# pylint:disable=redefined-outer-name
import collections
import itertools
import os
from dim4.generic import a123
from m_theory_lib import algebra
from m_theory_lib import m_util as mu
from m_theory_lib import supergravity
import numpy
import tensorflow as tf
### Supergravity ###
class SO8_SUGRA(supergravity.SUGRA):
"""D=4 SO(8) Supergravity.
In addition to the base class attributes, this class adds...:
Attributes:
e7: The e7 algebra that was used to define this supergravity.
"""
signature = supergravity.SUGRASignature(
name='SO8',
gauge_algebra_name='so(8)',
dim=4,
generator_scaling=+1,
dim_scalar_manifold=70,
num_model_params=1, # The 'dyonic angle'.
scalar_masses_dV_from_right=False,
scalar_masses_factor=36.0,
gravitino_masses_factor=6.0,
fermion_masses_factor=6.0,
vector_masses_factor=6.0,
num_spurious_vector_masses=28)
def __init__(self,
use_dwn_stationarity=True,
e7=algebra.g.e7,
squash_stationarity_tf_func=tf.math.asinh,
**kwargs):
"""Initializes the instance.
Args:
use_dwn_stationarity: Whether to use the de A1/A2 formula from the
de Wit-Nicolai 'SO(8) Supergravity' paper to compute the
stationarity-violation (rather than taking the naive gradient-squared).
e7: The e7 algebra to use.
squash_stationarity_tf_func: Optional 'squashing' function that is used
to squash the stationarity-violation ([]-tf.Tensor -> []-tf.Tensor).
**kwargs: keyword parameters to be passed on to superclass __init__().
"""
super().__init__(e7.t_a_ij_kl,
squash_stationarity_tf_func=squash_stationarity_tf_func,
**kwargs)
self._use_dwn_stationarity = use_dwn_stationarity
self.e7 = e7
self._tc_28_8_8 = tf.constant(
e7.su8.m_28_8_8.astype(numpy.complex128),
dtype=tf.complex128)
def _expand_ijkl(self, t_ab):
"""Index-expands 28, 28 -> [8, 8] [8, 8]."""
return 0.5 * tf.einsum(
'ijB,BIJ->ijIJ',
tf.einsum('AB,Aij->ijB', t_ab, self._tc_28_8_8),
self._tc_28_8_8)
def _canonicalize_equilibrium_sc(self, v70, diagonalize_8x8s=True,
rng=None, verbose=True):
"""Simplifies a location on the scalar manifold by rotation."""
if rng is None:
rng = numpy.random.RandomState()
m8x8s = mu.nsum('Aij,A->ij', self.e7.su8.m_35_8_8.real, v70[:35])
m8x8c = mu.nsum('Aij,A->ij', self.e7.su8.m_35_8_8.real, v70[35:])
rot = self.e7.spin8.get_diagonalizing_rotation(
m8x8s if diagonalize_8x8s else m8x8c)
decomposed_rot = mu.product_decompose_rotation(rot)
resynthesized_rot = mu.resynthesize_rotation_for_rep(
8, 8, decomposed_rot, 'ab,->ab', numpy.ones([]))
if not numpy.allclose(rot, resynthesized_rot, rtol=1e-3, atol=1e-5):
raise ValueError(
'Resynthesized rotation does not match original rotation.')
generator_mapping_spec = 'sS,sScC->cC' if diagonalize_8x8s else 'cC,sScC->sS'
rep_action = 0.25 * self.e7.spin8.gamma_sscc
rot_other_rep = mu.resynthesize_rotation_for_rep(
8, 8, decomposed_rot, generator_mapping_spec, rep_action)
(rot_s, rot_c) = ((rot, rot_other_rep) if diagonalize_8x8s
else (rot_other_rep, rot))
canon_m8x8s = rot_s.T @ m8x8s @ rot_s
canon_m8x8c = rot_c.T @ m8x8c @ rot_c
if diagonalize_8x8s:
gens_postdiag = mu.get_generators_for_post_diagonalization_reduction(
numpy.diag(canon_m8x8s), 'gsS,sScC->gcC', self.e7.spin8.gamma_sscc)
else:
gens_postdiag = mu.get_generators_for_post_diagonalization_reduction(
numpy.diag(canon_m8x8c), 'gcC,sScC->gsS', self.e7.spin8.gamma_sscc)
tc_rot_gens = mu.tff64(gens_postdiag)
tc_8x8s = mu.tff64(canon_m8x8s)
tc_8x8c = mu.tff64(canon_m8x8c)
@tf.function
def tf_rotated_8x8(t_rot_params):
t_rot = mu.tf_expm(
tf.einsum('gab,g->ab', tc_rot_gens, t_rot_params))
if diagonalize_8x8s:
tc_rotated_8x8 = tf.linalg.matmul(
t_rot @ tc_8x8c, t_rot, transpose_b=True)
else:
tc_rotated_8x8 = tf.linalg.matmul(
t_rot @ tc_8x8s, t_rot, transpose_b=True)
return tc_rotated_8x8
@tf.function
def tf_loss(t_rot_params):
t_8x8 = tf_rotated_8x8(t_rot_params)
ret = tf.reduce_sum(tf.abs(t_8x8))
return ret
if gens_postdiag.shape[0] == 0:
return self.e7.v70_from_35s35c(canon_m8x8s, canon_m8x8c)
_, opt_rot_params = mu.tf_minimize_v2(
tf_loss,
rng.normal(scale=1.0, size=gens_postdiag.shape[0]),
default_gtol=1e-14)
opt_8x8 = tf_rotated_8x8(mu.tff64(opt_rot_params)).numpy()
if diagonalize_8x8s:
return self.e7.v70_from_35s35c(canon_m8x8s, opt_8x8)
else:
return self.e7.v70_from_35s35c(opt_8x8, canon_m8x8c)
def canonicalize_equilibrium(self, v70, **kwargs):
"""Simplifies a location on the scalar manifold by rotation."""
v70 = numpy.asarray(v70)
canon_35s = self._canonicalize_equilibrium_sc(v70, **kwargs)
canon_35c = self._canonicalize_equilibrium_sc(
v70, diagonalize_8x8s=False, **kwargs)
return min([canon_35s, canon_35c], key=lambda xs: (abs(xs) > 1e-5).sum())
def tf_T(self, t_vielbein, t_omega=None):
"""Computes the SO(8) T-tensor."""
t_omega = mu.tff64(0.0) if t_omega is None else t_omega
t_u_ijIJ = self._expand_ijkl(t_vielbein[:28, :28])
t_u_klKL = tf.math.conj(t_u_ijIJ)
t_v_ijKL = self._expand_ijkl(t_vielbein[:28, 28:])
t_v_klIJ = tf.math.conj(t_v_ijKL)
t_cw = tf.math.cos(t_omega)
t_sw = tf.math.sin(t_omega)
tc_exp_w = tf.complex(t_cw, t_sw)
tc_exp_nw = tf.complex(t_cw, -t_sw)
t_uv = tc_exp_nw * t_u_klKL + tc_exp_w * t_v_klIJ
t_uuvv = (
tf.einsum('lmJK,kmKI->lkIJ', t_u_ijIJ, t_u_klKL) -
tf.einsum('lmJK,kmKI->lkIJ', t_v_ijKL, t_v_klIJ))
return tf.einsum('ijIJ,lkIJ->lkij', t_uv, t_uuvv)
def tf_A123(self, t_T, want_A1=True, want_A2=True, want_A3=True):
"""See base class."""
t_A1 = t_A2 = t_A3 = None
if want_A1:
t_A1 = mu.tfc128(-4 / 21) * tf.einsum('mijm->ij', t_T)
if want_A2 or want_A3:
t_A2 = mu.tfc128(-4 / (3 * 3)) * (
# Antisymmetrize in last 3 indices, but using antisymmetry in last 2.
# Note factor 1/3 above (in -4/(3*3) rather than -4/3).
t_T + tf.einsum('lijk->ljki', t_T) + tf.einsum('lijk->lkij', t_T))
if want_A3:
t_A3 = a123.tf_A3_from_A2(t_A2)
return t_A1, t_A2, t_A3
def tf_ext_sugra_tensors(self, t_scalars, with_stationarity=True, **kwargs):
"""See base class."""
if not self._use_dwn_stationarity or not with_stationarity:
# If we are not using de Wit-Nicolai stationarity, or are not interested
# in stationarity, just delegate to the base class method.
return super().tf_ext_sugra_tensors(t_scalars,
with_stationarity=with_stationarity,
**kwargs)
sugra_tensors = super().tf_ext_sugra_tensors(
t_scalars, with_stationarity=False, **kwargs)
*_, t_A1, t_A2, _ = sugra_tensors
t_dwn_stationarity = self.dwn_stationarity(t_A1, t_A2, **kwargs)
tf_squash = self._squash_stationarity_tf_func
t_squashed_stationarity = (
t_dwn_stationarity if tf_squash is None
else tf_squash(t_dwn_stationarity))
return sugra_tensors[:-1] + (t_squashed_stationarity,)
def dwn_stationarity(self, t_A1, t_A2, **kwargs):
"""Computes stationarity-violation 'in the local frame'."""
# Attention: The stationarity that we get from
# self.position_and_stationarity() will typically be squashed
# by the default asinh-squash!
return a123.tf_dwn_stationarity(t_A1, t_A2)
def show_position_tex(self, position, digits=6):
"""Returns a text-string that shows the position."""
m35s = mu.nsum('Iij,I->ij', self.e7.su8.m_35_8_8, position[:35])
m35c = mu.nsum('Iij,I->ij', self.e7.su8.m_35_8_8, position[35:70])
fmt_num = lambda x: f'{x:.0{digits}f}'
def dotified_m(sc, ij, is_positive):
sign_str = '' if is_positive else '-'
indices = (r'\dot{}\dot{}' if sc else '{}{}').format(*ij)
return '%sM_{%s}' % (sign_str, indices)
pos_sign_by_text = collections.defaultdict(list)
for sc, m35 in ((0, m35s), (1, m35c)):
for ij in itertools.product(range(8), range(8)):
if not ij[0] <= ij[1]:
# We only report entries of the upper-triangular part of these
# symmetric matrices.
continue
num = m35[ij]
abs_num_str = fmt_num(abs(num))
if float(abs_num_str) == 0: # Skip zeroes.
continue
pos_sign_by_text[abs_num_str].append((sc, ij, num > 0))
groups = sorted(
[(sorted(locations), abs_num_str)
for abs_num_str, locations in pos_sign_by_text.items()])
tex_pieces = []
for raw_locations, abs_num_str in groups:
is_plus_first = raw_locations[0][-1]
if is_plus_first:
locations = raw_locations
num_str = abs_num_str
else:
# 'is_plus' gets replaced by relative sign w.r.t. first such entry.
locations = [(sc, ij, not is_plus) for sc, ij, is_plus in raw_locations]
num_str = '-' + abs_num_str
tex_pieces.append(
r'$\scriptstyle %s\approx%s$' % (
r'{\approx}'.join(dotified_m(*sc_ij_relative_sign)
for sc_ij_relative_sign in locations),
num_str))
return (r'{\begin{minipage}[t]{10cm}'
r'\begin{flushleft}%s\end{flushleft}\end{minipage}}\\' %
', '.join(tex_pieces))
def tf_sugra_tensors_from_vielbein(self, t_vielbein, t_omega=None):
"""See base class."""
t_T = self.tf_T(t_vielbein, t_omega=t_omega)
t_A1, t_A2, _ = self.tf_A123(t_T, want_A3=False)
t_potential_A1 = mu.tff64(-3 / 4) * tf.math.real(
tf.einsum('ij,ij->', t_A1, tf.math.conj(t_A1)))
t_potential_A2 = mu.tff64(1 / 24) * tf.math.real(
tf.einsum('ijkl,ijkl->', t_A2, tf.math.conj(t_A2)))
t_potential = t_potential_A1 + t_potential_A2
return t_potential, t_T, t_A1, t_A2
def tf_fermion_massmatrix(self, ts_A123, t_potential):
"""See base class."""
*_, t_A3 = ts_A123
return a123.tf_fermion_massmatrix(
t_A3, t_potential,
mu.tfc128(-self.signature.fermion_masses_factor))
def tf_vector_massmatrix(self, ts_A123, t_potential):
"""See base class."""
_, t_A2, _ = ts_A123
return a123.tf_vector_massmatrix(
t_A2, t_potential,
mu.tfc128(-self.signature.vector_masses_factor))
def get_residual_symmetry(self, v70, **kwargs):
"""See base class."""
del kwargs # Unused.
unbroken_gg, d_unbroken_gg, u1s = mu.decompose_residual_symmetry(
v70,
self.e7.f_abC,
numpy.pad(numpy.eye(28), [(0, 0), (105, 0)]))
# TODO(tfish): Do we actually need the compact-group f_abC here?
rank = mu.get_lie_algebra_rank(unbroken_gg, self.e7.f_abC)
return supergravity.ResidualSymmetry(
rank=rank,
all_gens=unbroken_gg,
semisimple_subgroup_gens=d_unbroken_gg,
u1_gens=u1s)
def show_position_text(self, position):
"""Returns a text-string that shows the position."""
m288 = mu.nsum('a,axij->xij', position, self.e7.v70_as_sc8x8)
m8x8s = m288[0].round(5)
m8x8c = m288[1].round(5)
def fmt8x8(m):
# Pylint wrongly complains about `row` not being defined.
# pylint: disable=undefined-loop-variable
return '\n'.join(
', '.join('%+.5f' % x if x else ' 0 ' for x in row)
for row in m)
# pylint: enable=undefined-loop-variable
return (f'=== 8x8s ===\n{fmt8x8(m8x8s)}\n'
f'=== 8x8c ===\n{fmt8x8(m8x8c)}\n')
def v70o_goldstone_basis_and_projector(self, v70o, ev_threshold=1e-5):
"""Computes a basis and a projector onto the 'goldstone directions'.
Given a vector of 70 scalar field parameters in the orthonormal basis,
determines a basis and projector for the subspace of directions that
we get by applying so(8) generators to the vector (again, referring
to the orthonormal basis.)
Args:
v70o: Optional [70]-numpy.ndarray, the scalar field parameters
in the orthonormal basis.
ev_threshold: Threshold for SVD singular values.
Returns:
A tuple (dim_goldstone_basis, basis, goldstone_projector)
that gives us the dimensionality D == dim_goldstone_basis
of the vector space V spanned by applying so(8) generators
to the input vector, plus a [70, 70]-array B that provides
an orthonormal basis for the scalars where B[:, :D] is an
orthonormal basis of the D-dimensional subspace V, plus
a [70, 70]-projector matrix that performs orthogonal projection
onto V.
"""
e7 = self.e7
so8_rotated_v70o = mu.nsum('abC,b->Ca',
e7.fo_abC[105:, :70, :70], v70o)
svd_so8_rot_u, svd_so8_rot_s, svd_so8_rot_vh = (
numpy.linalg.svd(so8_rotated_v70o, full_matrices=True))
del svd_so8_rot_vh # Unused, named for documentation only.
num_goldstone_directions = (svd_so8_rot_s > ev_threshold).sum()
goldstone_directions = svd_so8_rot_u[:, :num_goldstone_directions]
proj_goldstone = goldstone_directions.dot(goldstone_directions.T)
return num_goldstone_directions, svd_so8_rot_u, proj_goldstone
def get_subspace_aligner(self, target_subspace_an, rcond=1e-10):
"""Returns a closure that aligns a scalar vector with a target space."""
target_subspace_an = numpy.asarray(target_subspace_an)
if target_subspace_an.shape[0] != 70 or len(target_subspace_an.shape) != 2:
raise ValueError(
'Target subspace must be a [70, D]-array, '
f'shape is: {target_subspace_an.shape}')
tc_f_abC = mu.tff64(self.e7.f_abC)
v70o_target_subspace_an = mu.nsum(
'an,Aa->An', target_subspace_an, self.e7.v70o_from_v70)
svd_u, svd_s, svd_vh = numpy.linalg.svd(v70o_target_subspace_an,
full_matrices=False)
del svd_vh # Unused, named for documentation purposes only.
v70o_target_subspace_an_basis = svd_u[:, svd_s > rcond]
tc_v70o_proj_complement = mu.tff64(
numpy.eye(70) -
v70o_target_subspace_an_basis.dot(v70o_target_subspace_an_basis.T))
tc_v70o_from_v70 = mu.tff64(self.e7.v70o_from_v70)
#
def f_do_align(v70, **kwargs):
tc_v70 = mu.tff64(v70)
def tf_loss(t_rot_params):
t_gen_so8 = tf.einsum('abC,a->Cb', tc_f_abC[-28:, :70, :70],
t_rot_params)
t_rot_so8 = tf.linalg.expm(t_gen_so8)
t_rotated = tf.einsum('ab,b->a', t_rot_so8, tc_v70)
t_deviation = tf.einsum(
'a,Aa,BA->B', t_rotated, tc_v70o_from_v70, tc_v70o_proj_complement)
return tf.reduce_sum(tf.math.square(t_deviation))
return mu.tf_minimize_v2(tf_loss, v70, **kwargs)
#
return f_do_align
def known_solutions(potential_min, potential_max, csv_path=None):
"""Returns [potential, stationarity, *coords] rows for known solutions."""
if csv_path is None:
csv_path = os.path.join(os.path.dirname(__file__),
'../equilibria/SO8_SOLUTIONS.csv')
return [numpy.asarray(row) for row in mu.csv_numdata(csv_path)
if potential_min <= row[0] <= potential_max]
### Demo Functions ###
# demo_*() functions may come and go, and are generally not tested.
# They show some general use patterns of the above code and provide good
# starting points for exploring the capabilities of this code. They are
# not considered part of the stable API, however.
def demo_show_physics_so7():
"""Text-prints the physics of the known 'SO(7)+' solution."""
sugra = SO8_SUGRA()
rows = list(itertools.islice(mu.csv_numdata(
'dim4/so8/equilibria/SO8_SOLUTIONS.csv'), 10))
# phys = sugra.get_physics(rows[4][-70:],
# dict(name='some_solution')) # SU(3)xU(1) N=2
# phys = sugra.get_physics(rows[8][-70:], dict(name='SO(3)xSO(3) S0880733'))
phys = sugra.get_physics(rows[1][-70:], dict(name='SO(7)+'))
print(sugra.show_physics_text(phys))
def demo_scan_sl2x7(seed=10, scale=1.0, verbosity=''):
"""Scans for equilibria on the (SL(2)/U(1))**7 submanifold."""
sl2x7_embedding = algebra.g.e7.sl2x7[:2][Ellipsis, :70].reshape(-1, 70)
sugra = SO8_SUGRA()
x0s = sugra.get_generator_x0s(scale=scale, dim=14, seed=seed)
for n, (pot, stat, a_pos) in enumerate(
sugra.scan(x0s=x0s, submanifold_embedding=sl2x7_embedding,
verbosity=verbosity)):
print(f'{(n, pot, stat, a_pos.round(7).tolist())!r}')
def demo_show_canonicalized():
"""Discovers and shows some canonicalized solutions."""
sugra = SO8_SUGRA()
rng = numpy.random.RandomState(seed=0)
for n, sol in zip(range(10),
sugra.scan(x0s=sugra.get_generator_x0s(scale=0.2),
verbosity='SF')):
pot, stat, pos = sol
pos_canon = sugra.canonicalize_equilibrium(pos)
print(f'### N={n}, P={pot}, S={stat}\n pos={pos_canon.round(6)}')
pot_good, stat_good, pos_good = sugra.find_equilibrium(
pos, minimize_kwargs=dict(strategy='N', mdnewton_maxsteps=2))
del pot_good, stat_good # Unused.
pos_canon = sugra.canonicalize_equilibrium(pos_good, rng=rng)
phys = sugra.get_physics(pos_canon, dict(name='some_solution'))
print(sugra.show_physics_text(phys))
| 41.70045 | 81 | 0.664434 | 2,802 | 18,515 | 4.132762 | 0.213419 | 0.009845 | 0.01658 | 0.004145 | 0.210449 | 0.153627 | 0.098964 | 0.078756 | 0.061313 | 0.032124 | 0 | 0.046862 | 0.21858 | 18,515 | 443 | 82 | 41.794582 | 0.753525 | 0.241912 | 0 | 0.086667 | 0 | 0.003333 | 0.060599 | 0.019351 | 0 | 0 | 0 | 0.002257 | 0 | 1 | 0.086667 | false | 0 | 0.03 | 0.003333 | 0.206667 | 0.013333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d16c1860d3a97199e52c42a7198ad4b92ddbbfd | 13,840 | py | Python | examples/mainx.py | aicanhelp/ai-harness | 112303b6d41ba023052863bb716bfa870ede6eee | [
"MIT"
] | null | null | null | examples/mainx.py | aicanhelp/ai-harness | 112303b6d41ba023052863bb716bfa870ede6eee | [
"MIT"
] | null | null | null | examples/mainx.py | aicanhelp/ai-harness | 112303b6d41ba023052863bb716bfa870ede6eee | [
"MIT"
] | null | null | null | import time
import sys
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from torch.multiprocessing import set_start_method
def prepare_for_multiple_workers():
'''
It is also important to set the multiprocessing start method to *spawn*,
as the default is *fork* which may cause deadlocks when using multiple
worker threads for dataloading.
:return:
'''
try:
set_start_method('spawn')
except RuntimeError:
pass
class AverageMeter(object):
"""
The ``AverageMeter`` class tracks training statistics like accuracy and iteration count.
Computes and stores the average and current value.
"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""
The ``accuracy`` function computes and returns the top-k accuracy of the model so we can track learning
progress.
Computes the precision@k for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradients in a backward pass
optimizer.zero_grad()
loss.backward()
# Average gradients across all workers
# average_gradients(model)
# Call step of optimizer to update model params
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % 10 == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
def validate(val_loader, model, criterion):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % 100 == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def adjust_learning_rate(initial_lr, optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = initial_lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
"""
#########################################################################3
## INPUTS
Here is where we will define the inputs for the run. Some of the inputs are standard
model training inputs such as batch size and number of training epochs, and some are
specific to our distributed training task.i
- batch_size - batch size for *each* process in the distributed training group. Total batch size
across distributed model is batch_size*world_size ????????
- workers - number of worker threads used with the dataloaders
- num_epochs - number of epochs to train for
- starting_lr - starting learning rate for training
- world_size - number of processes in the distributed training environment
- dist_backend - backend to use for distributed training communication (i.e. NCCL, Gloo, MPI, etc.)
- dist_url - url to specify the initialization method of the process group. This may
contain the IP address and port of the rank0 process or be a non-existant file on a shared file system.
"""
print("Collect Inputs...")
# Batch Size for training and testing
batch_size = 32
# Number of worker threads for dataloading
workers = 2
# Number of epochs to train for
num_epochs = 2
# Starting Learning Rate
starting_lr = 0.1
# Number of distributed processes
world_size = 4
# Distributed backend type
dist_backend = 'nccl'
# dist_backend = 'gloo'
# Url used to setup distributed training
# v1
# dist_url = "tcp://18.205.21.252:23456"
dist_url = "tcp://172.31.22.234:23456"
# dist_url = "tcp://172.17.0.1:23456"
# v2
# dist_url = "file:///home/ubuntu/distributed_tutorial/trainfile"
"""
#########################################################################3
### Initialize process group
One of the most important parts of distributed training in PyTorch is to properly setup
the process group, which is the **first** step in initializing the torch.distributed package.
To do this, we will use the `torch.distributed.init_process_group` function which takes
several inputs. First, a *backend* input which specifies the backend to use (i.e. NCCL, Gloo, MPI, etc.).
An *init_method* input which is either a url containing the address and port
of the rank0 machine or a path to a non-existant file on the shared file system. Note,
to use the file init_method, all machines must have access to the file, similarly for the
url method, all machines must be able to communicate on the network so make sure to configure
any firewalls and network settings to accomodate. The init_process_group function also takes
*rank* and *world_size* arguments which specify the rank of this process when run and the number
of processes in the collective, respectively. It is important to note that this is a blocking
function, meaning program execution will wait at this function until *world_size* processes have
joined the process group.
Another important step, especially when each node has multiple gpus is to set the *local_rank* of
this process. For example, if you have two nodes, each with 8 GPUs and you wish to train with all
of them then $world_size=16* and each node will have a process with local rank 0-7. This local_rank
is used to set the device (i.e. which GPU to use) for the process and later used to set the device when
creating a distributed data parallel model. It is also recommended to use NCCL backend in this
hypothetical environment as NCCL is preferred for multi-gpu nodes.
"""
print("Initialize Process Group...")
# v1 - init with url
dist.init_process_group(backend=dist_backend, init_method=dist_url, rank=int(sys.argv[1]), world_size=world_size)
# v2 - init with file
# dist.init_process_group(backend="nccl", init_method="file:///home/ubuntu/pt-distributed-tutorial/trainfile", rank=int(sys.argv[1]), world_size=world_size)
local_rank = int(sys.argv[2])
dp_device_ids = [local_rank]
torch.cuda.set_device(local_rank)
"""
#########################################################################3
### Initialize Model
(?) - Does DPP also handle averaging of gradients?
The next major step is to initialize the model to be trained. Here, we will use
a resnet18 model from `torchvision.models` but any model may be used.
First, we initialize the model and place it in GPU memory.
The next step, which is very important for our distributed training example, is to
make the model `DistributedDataParallel`, which handles the distribution of the data
to and from the model. Also notice we pass our device ids list as a parameter which contains
the local rank (i.e. GPU) we are using. Finally, we specify the loss function and
optimizer to train with.
"""
print("Initialize Model...")
model = models.resnet18(pretrained=False).cuda()
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=dp_device_ids, output_device=local_rank)
# model = torch.nn.parallel.DistributedDataParallelC10D(model, device_ids=dp_device_ids, output_device=local_rank)
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(model.parameters(), starting_lr, momentum=0.9, weight_decay=1e-4)
"""
#########################################################################3
### Initialize Dataloaders
(?) - mention about pin_memory arg in the dataloaders
The last step in preparation for the training is to specify which dataset to use. Here
we use the STL10 dataset from torchvision.datasets.STL10. The STL10 dataset is a
10 class dataset of 96x96px images. For use with our model, notice we resize the images
to 224x224px in the transform. One distributed training specific item in this section
is the use of the `DistributedSampler` for the training set, which is designed to be
used in conjunction with `DistributedDataParallel` models. This object handles the
partitioning of the dataset across the distributed environment so that not all models are training
on the same subset of data, which would be counterintuitive. Finally, we create the `DataLoader`'s
which are responsible for feeding the data to the processes.
"""
print("Initialize Dataloaders...")
transform = transforms.Compose(
[transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = datasets.STL10(root='./data', split='train', download=True, transform=transform)
valset = datasets.STL10(root='./data', split='test', download=True, transform=transform)
train_sampler = torch.utils.data.distributed.DistributedSampler(trainset)
# train_sampler = None
train_loader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=(train_sampler is None),
num_workers=workers, pin_memory=False, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(valset, batch_size=batch_size, shuffle=False, num_workers=workers,
pin_memory=False)
"""
#########################################################################
### Training Loop
Finally, the last step is to execute the training loop. We have already done most
of the work for setting up the distributed training so this training loop has
almost no artifacts of distributed training. The one detail specific to our task
is the setting the current epoch count in the `DistributedSampler`, as the sampler
shuffles the data going to each process deterministically based on epoch. After
updating the sampler, the loop runs a full training epoch, runs a full validation
step then prints the performance of the current model against the best performing
so far. After training for num_epochs, the loop exits and the tutorial is complete.
Notice, since this is an exercise we are not saving models but one may wish to
keep track of the best performing model then save it at the end of training.
"""
best_prec1 = 0
for epoch in range(num_epochs):
train_sampler.set_epoch(epoch)
adjust_learning_rate(starting_lr, optimizer, epoch)
# train for one epoch
print("\nBegin Training Epoch {}".format(epoch + 1))
train(train_loader, model, criterion, optimizer, epoch)
# evaluate on validation set
print("Begin Validation @ Epoch {}".format(epoch + 1))
prec1 = validate(val_loader, model, criterion)
# remember best prec@1 and save checkpoint if desired
# is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
print("Epoch Summary: ")
print("\tEpoch Accuracy: {}".format(prec1))
print("\tBest Accuracy: {}".format(best_prec1))
| 39.430199 | 156 | 0.674277 | 1,956 | 13,840 | 4.695808 | 0.222393 | 0.012738 | 0.006532 | 0.002177 | 0.230376 | 0.173762 | 0.153076 | 0.136962 | 0.136962 | 0.12172 | 0 | 0.022058 | 0.207298 | 13,840 | 350 | 157 | 39.542857 | 0.815149 | 0.131358 | 0 | 0.264901 | 0 | 0.006623 | 0.109818 | 0.014704 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05298 | false | 0.006623 | 0.086093 | 0 | 0.15894 | 0.07947 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d17c2e0a24f1fbe0f37cf7a1a6fd74cecd4a675 | 1,634 | py | Python | tests/test_core.py | danrs/r2-d7 | d1f7a839f0bcb490954477c592245b5107b8a6aa | [
"MIT"
] | 13 | 2016-07-06T12:47:15.000Z | 2022-02-03T21:18:06.000Z | tests/test_core.py | danrs/r2-d7 | d1f7a839f0bcb490954477c592245b5107b8a6aa | [
"MIT"
] | 35 | 2017-01-31T17:36:07.000Z | 2022-03-30T02:52:42.000Z | tests/test_core.py | danrs/r2-d7 | d1f7a839f0bcb490954477c592245b5107b8a6aa | [
"MIT"
] | 13 | 2016-09-22T05:22:30.000Z | 2022-03-14T23:18:08.000Z | import threading
import pytest
from r2d7.core import DroidCore
categories = [
'condition',
'damage',
'pilot',
'ship',
'upgrade',
]
def test_data(testbot):
for filename in categories:
assert filename in testbot.data
assert testbot.data_version is not None
assert testbot.data['ship']['starviperclassattackplatform']['name'] == "StarViper-class Attack Platform"
assert testbot.data['upgrade']['genius']['name'] == "\"Genius\""
assert type(testbot.data['ship']['hwk290lightfreighter']['pilots']) is dict
partial_canonicalize_tests = {
'X-Wing': 'xwing',
'T-70 X-Wing': 't70xwing',
'Veteran instincts': 'veteraninstincts',
}
@pytest.mark.parametrize('before, after', partial_canonicalize_tests.items())
def test_partial_canonicalize(before, after):
assert DroidCore.partial_canonicalize(before) == after
def test_needs_update(testbot):
assert testbot.data_version is not None
assert not testbot.needs_update()
def test_threaded(testbot):
def threadtest(signal):
# If a new event loop isn't created for the thread, this will crash
try:
assert threading.current_thread() != threading.main_thread()
testbot.load_data()
except Exception as error:
# Pytest will catch this stdout and print it and the signal will
# fail the test
print(error)
signal.clear()
else:
signal.set()
signal = threading.Event()
thread = threading.Thread(target=threadtest, args=(signal, ))
thread.start()
thread.join()
assert signal.is_set()
| 26.786885 | 108 | 0.662179 | 189 | 1,634 | 5.62963 | 0.481481 | 0.06203 | 0.06391 | 0.045113 | 0.073308 | 0.073308 | 0.073308 | 0.073308 | 0 | 0 | 0 | 0.007092 | 0.223378 | 1,634 | 60 | 109 | 27.233333 | 0.831363 | 0.086903 | 0 | 0.046512 | 0 | 0 | 0.149294 | 0.01883 | 0 | 0 | 0 | 0 | 0.232558 | 1 | 0.116279 | false | 0 | 0.069767 | 0 | 0.186047 | 0.023256 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d19638a09e808187a7766760758a12c4a907286 | 1,284 | py | Python | setup.py | nedap/django-pypuppetdb | f3c4009f7ff1830fef6363ee123ba0ddc8e22092 | [
"Apache-2.0"
] | null | null | null | setup.py | nedap/django-pypuppetdb | f3c4009f7ff1830fef6363ee123ba0ddc8e22092 | [
"Apache-2.0"
] | 3 | 2015-04-08T09:44:08.000Z | 2017-05-18T14:31:45.000Z | setup.py | nedap/django-pypuppetdb | f3c4009f7ff1830fef6363ee123ba0ddc8e22092 | [
"Apache-2.0"
] | null | null | null | import sys
import os
import codecs
from setuptools import setup, find_packages
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
with codecs.open('README.rst', encoding='utf-8') as f:
README = f.read()
with codecs.open('CHANGELOG.rst', encoding='utf-8') as f:
CHANGELOG = f.read()
setup(
name='django_pypuppetdb',
version='0.2.2',
url='https://github.com/nedap/django-pypuppetdb',
license='Apache License 2.0',
description='Handles authorization for Django by using puppetdb users',
long_description='\n'.join((README, CHANGELOG)),
keywords='puppet puppetdb django authorization tastypie',
author='Ronald van Zon',
author_email='rvzon84+django-pypuppetdb@gmail.com',
packages=find_packages(),
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Topic :: Software Development :: Libraries'
],
)
| 29.860465 | 75 | 0.645639 | 149 | 1,284 | 5.530201 | 0.57047 | 0.092233 | 0.121359 | 0.036408 | 0.043689 | 0.043689 | 0 | 0 | 0 | 0 | 0 | 0.016815 | 0.212617 | 1,284 | 42 | 76 | 30.571429 | 0.79822 | 0 | 0 | 0 | 0 | 0 | 0.507009 | 0.027259 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.114286 | 0 | 0.114286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d1bcaaae9f06f9ff6b61ebd29da0eb8808acc1c | 3,611 | py | Python | Med_Cabinet/sketch_Flask_API.py | RMDircio/Med_Cabinet_Data_Science | a8d33785a9bfe8d9beaa03c014c897d8d54a91b0 | [
"MIT"
] | null | null | null | Med_Cabinet/sketch_Flask_API.py | RMDircio/Med_Cabinet_Data_Science | a8d33785a9bfe8d9beaa03c014c897d8d54a91b0 | [
"MIT"
] | null | null | null | Med_Cabinet/sketch_Flask_API.py | RMDircio/Med_Cabinet_Data_Science | a8d33785a9bfe8d9beaa03c014c897d8d54a91b0 | [
"MIT"
] | null | null | null | # app_API.py
# NOT WORKING CODE, MOCKUP, NEEDS WORK, see test_Flask_API.py for functioning test
#Imports
import pandas as pd
import requests
from flask import Flask, json, Blueprint, request, jsonify, flash, redirect, render_template # all from twitoff, only Flask, json, and request used here
from sklearn.linear_model import LogisticRegression # for example
from sklearn.ensemble import RandomForestClassifier # consider using if no neural net pickle
# import pickle model
# Import Leafly csv # import from github URL for production
file_name = r"C:\Users\johnj\OneDrive\Documents\Lambda\BuildWeek3\data-science\Med_Cabinet\data\Leafly.csv"
df = pd.read_csv(file_name)
strains = df['Strain']
# Mock data for quasi params
MOCK_DATA = {"First Name": "John", "Last Name": "Doe", "Effects": "Euphoric, Happy, Relaxed, Focused"}
#effects = MOCK
# Defining a params dict for parameters to be "sent"(GET) to the front end API. ie: params for First Name, Last Name, and Effects
PARAMS = {"First Name": first_name, "Last Name": last_name, "effects": effects}
# REQUEST for a JSON object in the following format:
{
"First Name": ["John"],
"Last Name": ['Doe'],
"Effects": ["Euphoric, Happy, Relaxed, Focused"]
}
# RESPONSE is in JSON as a list of dictionaries, each with 3 keys: First Name, Last Name, and Recommendatio
{
"First Name": "John",
"Last Name": "Doe",
"Recommendattion": "Purple Kush"
}
# Flask API
api = Flask(__name__)
# API endpoints
GET_URL = "getaddress"
API_ENDPOINT = "postaddress"
API_KEY = "is this even necessary?"
# GET route
@api.route('/effects', methods=['GET'])
def get_effects_recommend(): # Try using a class here
if(request.method == 'GET'):
# Sending GET request and saving the response as (r)esponse object
r = requests.get(url = GET_URL, params = PARAMS)
# Extracting data in json format
data = r.json()
# Extracting first name, last name, and effects of the first matching
first_name = data['results'][0]['First Name']
last_name = data['results'][0]['Last Name']
effects = data['results'][0]['Effects']
# Loading the saved pickle model
#model = load_model()
#X, y = load_cannabis(return_X_y=True) # mock-up but just to have some data to use when predicting
#result = model.predict(X[:2 :]) # mock-up from twitoff iris example to be tweaked
#return str(result)
# This was training the model on the fly from twitoff
classifier = LogisticRegression() # for example
#classifier = RandomForestClassifier() # back up model to try on the fly or train and pickle before
classifier.fit(effects, strains) # get list of strains from df['Strain"]
results = classifier.predict(strains)
# POST route
@api.route('/results', methods=['POST'])
def post_results(): # Try using a class here
# Data to be sent to API
data = {"api_dev_key":API_KEY, #?
"First Name":first_name,
"Last Name":last_name,
"Reccommendation":results}
# Sending post request and saving response as response object
r = requests.post(url = API_ENDPOINT, data = data)
# Extracting response text
DB_URL = r.text
# For fun, to get some output and confirmation
print("The DB URL is:%s"%DB_URL)
# Run API
if __name__ == '__main__':
api.run()
# 13 unique effects, being used for predictions of strains
#Euphoric
#Happy
#Relaxed
#Focused
#Energetic
#Sleepy
#Talkative
#Tingly
#Aroused
#Giggly
#Creative
#Hungry
#Uplifted
| 24.398649 | 152 | 0.677098 | 498 | 3,611 | 4.819277 | 0.385542 | 0.045 | 0.04 | 0.0425 | 0.1325 | 0.109167 | 0.076667 | 0.076667 | 0.048333 | 0.048333 | 0 | 0.002833 | 0.217945 | 3,611 | 147 | 153 | 24.564626 | 0.847026 | 0.453337 | 0 | 0 | 0 | 0.021739 | 0.25756 | 0.047967 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.108696 | 0 | 0.152174 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d1e286f0ac67546a9847d2fa6dac047d092c04a | 7,072 | py | Python | visualization.py | xironw/NBAproject | 630b0842363795f81e2988770ee1620d3bc49ef7 | [
"MIT"
] | null | null | null | visualization.py | xironw/NBAproject | 630b0842363795f81e2988770ee1620d3bc49ef7 | [
"MIT"
] | null | null | null | visualization.py | xironw/NBAproject | 630b0842363795f81e2988770ee1620d3bc49ef7 | [
"MIT"
] | null | null | null | import altair as alt
import os
def scatter_plot(data):
"""
This function takes into a dataframe. Based on this dataframe, it will
plot 4 scatter plots for the players' height vs average points per game,
, players' height vs rebounds, players' height vs assists and players'
height vs usage percentage. The plot is then saved as a file named
scatterchart.html in the current directory.
"""
# create the first scatter plot for players' height vs
# average points per game.
chart1 = alt.Chart(data, title="Players' height vs Average points per" +
' game'
).mark_point().encode(
alt.X('player_height',
axis=alt.Axis(title='Player height(cm)'
), scale=alt.Scale(domain=(150,
240)
)
), alt.Y('pts', axis=alt.Axis(title='Averag' +
'e ' +
'point' +
's ' +
'per game'
)
)).properties(width=450, height=350)
# create the second scatter plot for players' height vs rebounds
chart2 = alt.Chart(data, title="Players' height vs Rebounds"
).mark_point().encode(
alt.X('player_height',
axis=alt.Axis(title='Player height(cm)'),
scale=alt.Scale(domain=(150, 240)),
), alt.Y('reb',
axis=alt.Axis(title='Rebounds')
)).properties(width=450, height=350)
# create the third scatter plot for players' height vs assists.
# height vs usage percentage.
chart3 = alt.Chart(data, title="Players' height vs Assists"
).mark_point().encode(
alt.X('player_height',
axis=alt.Axis(title='Player height(cm)'),
scale=alt.Scale(domain=(150, 240)),
), alt.Y('ast',
axis=alt.Axis(title='Assists')
)).properties(width=450, height=350)
# create the fourth scatter plot for players' height vs usage percentage.
chart4 = alt.Chart(data, title="Players' height vs Usage percentage"
).mark_point().encode(
alt.X('player_height',
axis=alt.Axis(title='Player height(cm)'),
scale=alt.Scale(domain=(150, 240)),
), alt.Y('usg_pct',
axis=alt.Axis(title='Usage percentage')
)).properties(width=450,
height=350)
# create the interaction (zoom in and out) function for the plots
selection = alt.selection_interval(bind='scales')
# create the compound charts
chart = alt.vconcat(chart1, chart2, chart3, chart4, title="Players'" +
" Height vs Different Performances",
padding=100, spacing=80).configure_mark(
opacity=0.5, color='pink').add_selection(
selection
).configure_title(align='right', fontSize=23
).configure_axis(
titleFontSize=20)
# saving and output the compound scatter charts.
path = os.getcwd()
chart.save(path + '/scatterchart.html')
def boxplots(data):
"""
This function takes in a data frame as a parameter. Based on the data
in the dataframe, four box plots will be generated. The box plots will
compare a basketball players age to points, rebounds, assists, and usage
percentage. The plot is then finally saved as 'age_statistics.html' in
the file directory.
:param data: Data frame of NBA player statistics
"""
age_range = [21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]
age_mask_lower = data['age'] >= 21
age_mask_upper = data['age'] <= 31
columns_interest = ['age', 'pts', 'reb', 'ast', 'usg_pct']
data_clean = data.loc[age_mask_lower & age_mask_upper, columns_interest]
# Generate box plot of Age vs Points
age_vs_points = alt.Chart(
data_clean
).mark_boxplot(size=50, extent=2).encode(
alt.X('age', title='Age', axis=alt.Axis(values=age_range),
scale=alt.Scale(domain=(20, 32))),
y=alt.Y('pts', title='Points'),
color=alt.Color('age', scale=alt.Scale(scheme='accent'), legend=None)
).properties(width=850, title='Age VS Points')
# Generate box plot of Age vs Rebounds
age_vs_rebounds = alt.Chart(
data_clean
).mark_boxplot(size=50, extent=2.5).encode(
alt.X('age', title='Age', axis=alt.Axis(values=age_range),
scale=alt.Scale(domain=(20, 32))),
y=alt.Y('reb', title='Rebounds'),
color=alt.Color('age', scale=alt.Scale(scheme='accent'), legend=None)
).properties(width=850, title='Age VS Rebounds')
# Generate box plot of Age vs Assists
age_vs_ast = alt.Chart(data_clean).mark_boxplot(size=50, extent=3).encode(
alt.X('age', title='Age', axis=alt.Axis(values=age_range),
scale=alt.Scale(domain=(20, 32))),
y=alt.Y('ast', title='Assists'),
color=alt.Color('age', scale=alt.Scale(scheme='accent'), legend=None)
).properties(width=850, title='Age VS Assists')
# Generate box plot of Age vs Usage Percentage
age_vs_usg = alt.Chart(data_clean).mark_boxplot(size=50, extent=2).encode(
alt.X('age', title='Age', axis=alt.Axis(values=age_range),
scale=alt.Scale(domain=(20, 32))),
y=alt.Y('usg_pct', title='Usage Percentage'),
color=alt.Color('age', scale=alt.Scale(scheme='accent'), legend=None)
).properties(width=850, title='Age VS Usage Percentage')
# create the interaction (zoom in and out) function for the plots
selection = alt.selection_interval(bind='scales')
# Compound the charts with one another and save the chart
charts = alt.vconcat(
age_vs_points, age_vs_rebounds,
age_vs_ast, age_vs_usg,
title="Players'" + " Age vs Different Performances",
padding=100, spacing=80).configure_mark(
opacity=0.5, color='red').add_selection(
selection
).configure_title(
align='right', fontSize=23
).configure_axis(
titleFontSize=20
)
charts.save('age_statistics.html')
| 49.454545 | 78 | 0.524887 | 803 | 7,072 | 4.542964 | 0.206725 | 0.0233 | 0.053454 | 0.035088 | 0.634594 | 0.60773 | 0.54057 | 0.443805 | 0.443805 | 0.43284 | 0 | 0.032459 | 0.363971 | 7,072 | 142 | 79 | 49.802817 | 0.778568 | 0.197115 | 0 | 0.382353 | 0 | 0 | 0.11935 | 0 | 0.009804 | 0 | 0 | 0 | 0 | 1 | 0.019608 | false | 0 | 0.019608 | 0 | 0.039216 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d22cf20d4d8233653657e8818c2e38877be4f0f | 3,201 | py | Python | poly_m2.py | coherent17/PM2.5_new | 3b7a583bbebe316f91de0b172253f09815c73cda | [
"MIT"
] | null | null | null | poly_m2.py | coherent17/PM2.5_new | 3b7a583bbebe316f91de0b172253f09815c73cda | [
"MIT"
] | null | null | null | poly_m2.py | coherent17/PM2.5_new | 3b7a583bbebe316f91de0b172253f09815c73cda | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import math
import random
#data preprocesing:
#feature data
dataX=np.genfromtxt("dataset_X.csv",delimiter=',')
dataX=np.delete(dataX,[0],axis=1)
temp=np.array([1]*len(dataX))
dataX=np.c_[temp,dataX]
#append the dataX to match the theta (171 features)
k=18
for i in range(1,18):
for j in range(1,i+1):
if k in range(18,171):
dataX=np.insert(dataX,k,values=dataX[:,i]*dataX[:,j],axis=1)
k+=1
#target data
dataT=np.genfromtxt("dataset_T.csv",delimiter=',')
dataT=np.delete(dataT,[0],axis=1)
#shuffle the data to avoid the strange distribution
#concatenate the feature and target matrix and shuffle together
data_temp=np.c_[dataT,dataX]
# np.random.shuffle(data_temp)
dataT=data_temp[:,0]
dataX=np.delete(data_temp,[0],axis=1)
#split the data into training set and testing set
def train_test_split(X,Y,test_size):
X_train=np.array(X[:math.floor(len(X)*(1-test_size))])
Y_train=np.array(Y[:math.floor(len(Y)*(1-test_size))])
X_test=np.array(X[math.floor(len(X)*(1-test_size)):])
Y_test=np.array(Y[math.floor(len(Y)*(1-test_size)):])
Y_train=Y_train.reshape(len(Y_train),1)
Y_test=Y_test.reshape(len(Y_test),1)
return X_train, X_test, Y_train, Y_test
def linear_regression(X,Y):
w=np.matmul(np.matmul(np.linalg.pinv(np.matmul(X.T,X)),X.T),Y)
return w
def rmse(a,b):
return math.sqrt(np.sum((a-b)**2)/len(a))
def hypothesis(w,X):
return np.matmul(w,np.transpose(X))
X_train,X_test,T_train,T_test = train_test_split(dataX,dataT,0.2)
w=linear_regression(X_train,T_train)
#plot the value of the model predict and the actual model (train part)
x=np.arange(0,len(X_train))
y=hypothesis(w.reshape(1,171),X_train).reshape(len(X_train),)
T_train=T_train.reshape(len(X_train),)
plt.plot(x,y,color='red',lw=1.0,ls='-',label="training_predict_value")
plt.plot(x,T_train,color='blue',lw=1.0,ls='-',label="target_value")
plt.text(0,1,"RMSE=%.3lf" %(rmse(T_train,y)))
plt.xlabel("the nth data")
plt.ylabel("PM2.5")
plt.title("Linear regression (M=2) training")
plt.legend()
plt.show()
#plot the value of the model predict and the actual model (test part)
x=np.arange(0,len(X_test))
y=hypothesis(w.reshape(1,171),X_test).reshape(len(X_test),)
T_test=T_test.reshape(len(X_test),)
plt.plot(x,y,color='red',lw=1.0,ls='-',label="testing_predict_value")
plt.plot(x,T_test,color='blue',lw=1.0,ls='-',label="target_value")
plt.text(0,1,"RMSE=%.3lf" %(rmse(T_test,y)))
plt.xlabel("the nth data")
plt.ylabel("PM2.5")
plt.title("Linear regression (M=2) testing")
plt.legend()
plt.show()
#change the percentage of the training dataset to see the rmse
list=[0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8]
rmse_per=[]
for i in list:
X_train,X_test,T_train,T_test = train_test_split(dataX,dataT,i)
w=linear_regression(X_train,T_train)
T_test=T_test.reshape(len(X_test),)
y_per=hypothesis(w.reshape(1,171),X_test).reshape(len(X_test),)
rmse_per.append(rmse(T_test,y_per))
#visualize
x=list
plt.plot(x,rmse_per,color='red',lw=1.0,ls='-')
plt.title("testing rmse in different percentage of training set(M=2)")
plt.xlabel("the percentage of testing dataset")
plt.ylabel("rmse")
plt.show() | 32.333333 | 72 | 0.710403 | 622 | 3,201 | 3.525723 | 0.189711 | 0.02508 | 0.030096 | 0.01368 | 0.434565 | 0.407205 | 0.381669 | 0.327405 | 0.304606 | 0.304606 | 0 | 0.030261 | 0.101843 | 3,201 | 99 | 73 | 32.333333 | 0.732522 | 0.15214 | 0 | 0.180556 | 0 | 0 | 0.121347 | 0.015908 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.055556 | 0.027778 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d23d289b39391f8189b59f134a72120a0f0f8ef | 15,552 | py | Python | tests/test_definitions.py | d3rp/fissle | 770a140e42e6d8f7d55b3211a6ba691d2a915a2d | [
"Apache-2.0"
] | 1 | 2021-05-21T12:54:32.000Z | 2021-05-21T12:54:32.000Z | tests/test_definitions.py | d3rp/fissle | 770a140e42e6d8f7d55b3211a6ba691d2a915a2d | [
"Apache-2.0"
] | 4 | 2020-03-24T17:37:35.000Z | 2020-12-03T13:22:35.000Z | tests/test_definitions.py | d3rp/fissle | 770a140e42e6d8f7d55b3211a6ba691d2a915a2d | [
"Apache-2.0"
] | null | null | null | import sys
from functools import partial
from functools import wraps
from pathlib import Path
from pathlib import PurePosixPath as PosixPath
# Using Pure versions of paths allows testing cross platform code
# Can't use just Path, because that will get rendered to a platform specific
# subclass when validating (e.g. on windows Path('...') -> WindowsPath('...') )
from pathlib import PureWindowsPath as WindowsPath
from unittest import TestCase
from tests import SysArgvRestore
# from clima import Schema
# TODO: sane exception for this scenario
# def test_schema_without_default():
# class C(Schema):
# a: int
def wrap_cc(func):
wraps(func)
from clima import c, Schema
def wrapped(*args, c=c, Schema=Schema, **kwargs):
result = func(*args, c=c, Schema=Schema, **kwargs)
c._clear()
return result
return wrapped
def wrap_methods_with_c(cls):
orig = cls.__getattribute__
def new_getattr(self, name):
# print(f'wrapped with {c}')
if name.startswith('test_'):
method = getattr(cls, name)
new_method = wrap_cc(method)
setattr(cls, name, new_method)
return orig(self, name)
cls.__getattribute__ = new_getattr
return cls
class TestSchemaNoType(TestCase, SysArgvRestore):
default = 42
def setUp(self) -> None:
from clima import c, Schema
self.c = c
self.save_sysargv()
class C(Schema):
a = self.default
def test_default(self):
c = self.c
sys.argv = ['test', 'x']
assert (c.a == self.default)
def test_override(self):
c = self.c
sys.argv = ['test', 'x', '--a', '1']
@c
class Cli:
def x(self):
"""docstring"""
pass
assert (c.a == 1)
class TestSchemaX(TestCase, SysArgvRestore):
def test_schema_without_type(self):
from clima import c, Schema
self.c = c
sys.argv = ['test', 'x']
class C(Schema):
a = 1
L = [1, 2, 3]
@c
class D:
def x(self):
assert c.a == 1
assert c.L == [1, 2, 3]
assert c.a == 1
class TestSchemaY(TestCase, SysArgvRestore):
def test_schema_post_init(self):
from clima import c, Schema
self.c = c
sys.argv = ['test', 'x']
class C(Schema):
a = 1
def post_init(self, *args):
self.a = 2
@c
class D:
def x(self):
assert c.a == 2
assert c.a == 2
def test_schema_post_init_adding_attr(self):
from clima import c, Schema
self.c = c
sys.argv = ['test', 'x']
class C(Schema):
a = 1
def post_init(self, *args):
self.b = 2
@c
class D:
def x(self):
assert c.a == 1
assert c.b == 2
assert c.a == 1
assert c.b == 2
class TestSchemaArgs(TestCase, SysArgvRestore):
defaults = {
'str': 'foobar',
'bool': False,
}
changed = {
'str': 'changed',
'bool': True,
}
def setUp(self) -> None:
from clima import c, Schema
self.c = c
self.save_sysargv()
class C(Schema):
a: str = self.defaults['str']
b: bool = self.defaults['bool']
def test_defaults(self):
c = self.c
sys.argv = ['test', 'x']
@c
class Cli:
def x(self):
"""docstring"""
pass
assert c.a == self.defaults['str'], 'Schema definition should stick if not overridden'
assert c.b == self.defaults['bool'], 'Schema definition should stick if not overridden'
def test_schema_order_args(self):
c = self.c
sys.argv = ['test', 'x', '--a', self.changed['str'], '--b']
@c
class Cli:
def x(self):
"""docstring"""
pass
assert c.a == self.changed['str'], 'Args given in same order as schema def should override values'
assert c.b == self.changed['bool'], 'Args given in same order as schema def should override values'
def test_non_schema_order_args(self):
c = self.c
sys.argv = ['test', 'x', '--b', '--a', self.changed['str']]
@c
class Cli:
def x(self):
"""docstring"""
pass
print(sys.argv)
assert c.a == self.changed['str'], 'Args given in a different order as schema def should override values'
assert c.b == self.changed['bool'], 'Args given in a different order as schema def should override values'
# class TestWeirdSchema(TestCase, SysArgvRestore):
# def setUp(self) -> None:
# self.defaults = {
# 'test_int': [84, int],
# 'test_str': ['oh hi', str],
# }
#
# from clima import Schema
# super().save_sysargv()
#
# class C(Schema):
# test_int: int = self.defaults['test_int'][0] # comment for the int
# # thishere
# # foo: asdf
# # foo: asdf = 0
# # foo: asdf = 0 # sth
# test_str: str = self.defaults['test_str'][0] # comment for the str
#
# def test_commented_default(self):
# from clima import c
# sys.argv = ['test', 'x']
#
# @c
# class Cli:
# def x(self):
# """docstring"""
# pass
#
# for k, v in self.defaults.items():
# assert (getattr(c, k) == v[0])
# assert (type(getattr(c, k)) == v[1])
class TestSchema(TestCase, SysArgvRestore):
def setUp(self) -> None:
self.defaults = {
'test_int': [42, int],
'test_str': ['oh hi', str],
'test_posix_path': [PosixPath('/tmp'), PosixPath],
'test_win_path': [WindowsPath('/tmp'), WindowsPath],
}
from clima import c, Schema
self.c = c
super().save_sysargv()
class C(Schema):
test_int: int = self.defaults['test_int'][0]
test_str: str = self.defaults['test_str'][0]
test_posix_path: PosixPath = self.defaults['test_posix_path'][0]
test_win_path: WindowsPath = self.defaults['test_win_path'][0]
def test_default(self):
c = self.c
sys.argv = ['test', 'x']
for k, v in self.defaults.items():
assert (getattr(c, k) == v[0])
assert (type(getattr(c, k)) == v[1])
def test_override(self):
c = self.c
sys.argv = ['test', 'x', '--test_int', '1']
@c
class Cli:
def x(self):
"""docstring"""
pass
assert (c.test_int == 1)
assert (type(c.test_int) == int)
def test_no_docstring(self):
c = self.c
"""Not defining a docstring in Configuration(Schema) should not crash the script"""
sys.argv = ['test', 'x', '--test_str', 'no moi']
@c
class Cli:
def x(self):
pass
assert c.test_int == 42, 'Schema should enable default value as int'
assert c.test_str == 'no moi', 'cli args not read'
def test_positional(self):
c = self.c
"""Positional arguments should be parsed from Configuration(Schema) layout order"""
_int = 96
_str = 'numberwang'
sys.argv = ['test', 'x', _int, _str]
@c
class Cli:
def x(self):
"""docstring"""
pass
assert (c.test_int == _int)
assert (c.test_str == _str)
class TestTypeCasting(TestCase, SysArgvRestore):
def setUp(self) -> None:
from clima import c, Schema
self.c = c
sys.argv = ['test', 'x']
class TypeGalore(Schema):
a: bool = 0
b: bytearray = 0
c: bytes = 0
d: complex = 0
e: dict = tuple(zip('aa', 'bb'))
f: float = 0
g: frozenset = {}
h: int = 0.0
i: list = 'aa'
# k: property = 0
l: set = [1, 2]
m: str = 0
n: tuple = []
o: int = '1'
def test_builtins(self):
c = self.c
@c
class Cli:
def x(self):
"""docstring"""
pass
for k, valid in zip('abcdefghilmno', [
bool,
bytearray,
bytes,
complex,
dict,
float,
frozenset,
int,
list,
# property,
set,
str,
tuple,
int
]):
assert type(getattr(c, k)) == valid
class TestTypeCastingWith(TestCase, SysArgvRestore):
def setUp(self) -> None:
sys.argv = ['test', 'x']
def test_win_path(self):
from clima import c, Schema
class TestTypes(Schema):
p: WindowsPath = '.'
@c
class Cli:
def x(self):
"""docstring"""
pass
assert (type(c.p) == WindowsPath)
def test_posix_path(self):
from clima import c, Schema
class TestTypes(Schema):
p: PosixPath = '.'
@c
class Cli:
def x(self):
"""docstring"""
pass
assert (type(c.p) == PosixPath)
def test_builtins_post_init(self):
from clima import c, Schema
values = {
'a': True,
'f': 1.0,
'i': ['bbb'],
'm': '1',
}
class TypeGalore(Schema):
a: bool = 0
b: bytearray = 0
c: bytes = 0
d: complex = 0
e: dict = tuple(zip('aa', 'bb'))
f: float = 0
g: frozenset = {}
h: int = 0.0
i: list = 'aa'
# k: property = 0
l: set = [1, 2]
m: str = 0
n: tuple = []
def post_init(self, *args):
# overriding
self.a = 1
self.f = 1
self.i = 'bbb'
self.m = 1
@c
class Cli:
def x(self):
"""docstring"""
pass
for k, valid in zip('afim', [
bool,
float,
list,
str,
]):
field = getattr(c, k)
value = values[k]
assert type(getattr(c, k)) is valid, 'Fields not cast correctly after schema.post_init'
assert field == value, f'After casting and schema.post_init, values are incorrect ("{field}"!="{value}")'
class TestTypeCastingWithArgs(TestCase, SysArgvRestore):
def setUp(self) -> None:
sys.argv = ['test', 'x', '--a', '0', '--path', 'foobar']
def test_casting_cli_args(self):
from clima import c, Schema
class TestTypes(Schema):
a: bool = True
path: PosixPath = ''
@c
class Cli:
@staticmethod
def post_init(s):
pass
def x(self):
"""docstring"""
pass
# for attr, cls in TestTypes.__annotations__.items():
# if hasattr(c, attr):
# setattr(c, attr, cls(getattr(c, attr)))
assert (type(c.a) == bool), f'Casting of cli args should follow schema {type(c.a)} != bool'
assert (type(c.path) == PosixPath), f'Casting of cli args should follow schema {type(c.path)} != Path'
# TODO: TBD
def test_postinit_after_cli_args(self):
from clima import c, Schema
class TestTypes(Schema):
a: bool = True
path: PosixPath = ''
@c
class Cli:
@staticmethod
def post_init(s):
assert type(s.a) == bool, 'post_init args should have been cast correctly'
assert not s.a, f'post_init should have access to cli args ({s.a} != False)'
assert type(
s.path) == PosixPath, f'Casting of cli args should follow schema {type(s.path)} != Path'
assert s.path.name == 'foobar'
s.path = Path('baz')
def x(self):
"""docstring"""
pass
assert c.path.name == 'baz', 'post_init should override values'
class TestConfigurable(TestCase, SysArgvRestore):
def setUp(self) -> None:
from clima import c, Schema
self.c = c
self.save_sysargv()
class C(Schema):
a: int = 1 # description
def test_cli(self):
c = self.c
"""Basic Cli definition"""
sys.argv = ['test', '--a', 13]
@partial(c, noprepare=True)
class Cli:
def x(self):
"""docstring"""
pass
def test_cli_without_ds(self):
c = self.c
"""Cli definition without a docstring"""
@partial(c, noprepare=True)
class Cli:
def x(self):
pass
class TestIterables(TestCase, SysArgvRestore):
_int = 123
_str = 'abc'
def setUp(self) -> None:
from clima import c, Schema
self.c = c
self.save_sysargv()
class C(Schema):
a: tuple = self._int # description
b: tuple = self._str # description
c: list = self._int # description
d: list = self._str # description
e: set = self._int # description
f: set = self._str # description
def test_cli_with_args(self):
c = self.c
"""Basic Cli definition with iterables and cli args parsing"""
sys.argv = ['test', 'x',
'--a', '13',
'--b', 'cd',
'--c', '13',
'--d', 'cd',
'--e', '13',
'--f', 'cd',
]
@c
class Cli:
def x(self):
"""docstring"""
pass
assert c.a == tuple([13]), 'Should wrap in iterable when parsing cli args'
assert c.b == tuple(['cd']), 'Should wrap in iterable when parsing cli args'
assert c.c == list([13]), 'Should wrap in iterable when parsing cli args'
assert c.d == list(['cd']), 'Should wrap in iterable when parsing cli args'
assert c.e == set([13]), 'Should wrap in iterable when parsing cli args'
assert c.f == set(['cd']), 'Should wrap in iterable when parsing cli args'
def test_cli(self):
c = self.c
"""Basic Cli with iterables"""
sys.argv = ['test', 'x']
@c
class Cli:
def x(self):
"""docstring"""
pass
assert c.a == tuple([self._int]), 'Should wrap in iterable when configured in schema'
assert c.b == tuple([self._str]), 'Should wrap in iterable when configured in schema'
assert c.c == list([self._int]), 'Should wrap in iterable when configured in schema'
assert c.d == list([self._str]), 'Should wrap in iterable when configured in schema'
assert c.e == set([self._int]), 'Should wrap in iterable when configured in schema'
assert c.f == set([self._str]), 'Should wrap in iterable when configured in schema'
| 26.767642 | 117 | 0.491319 | 1,830 | 15,552 | 4.096175 | 0.126776 | 0.02468 | 0.022412 | 0.028815 | 0.598052 | 0.553095 | 0.535619 | 0.513874 | 0.492663 | 0.460512 | 0 | 0.009744 | 0.386317 | 15,552 | 580 | 118 | 26.813793 | 0.775671 | 0.108282 | 0 | 0.548969 | 0 | 0 | 0.138137 | 0.001641 | 0 | 0 | 0 | 0.001724 | 0.126289 | 1 | 0.152062 | false | 0.046392 | 0.059278 | 0 | 0.347938 | 0.002577 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d24cb71d0546e3e5ca71e3ff4d0e786b7de366b | 2,549 | py | Python | src/tracecode/utils.py | pombredanne/https-github.com-nexB-tracecode-toolkit | 5bb5741cd051f991c4d1944035dc00211dbcac6d | [
"Apache-2.0"
] | 21 | 2018-01-21T18:31:08.000Z | 2021-04-10T07:25:56.000Z | src/tracecode/utils.py | pombredanne/https-github.com-nexB-tracecode-toolkit | 5bb5741cd051f991c4d1944035dc00211dbcac6d | [
"Apache-2.0"
] | 4 | 2019-10-07T02:09:22.000Z | 2019-12-02T15:12:15.000Z | src/tracecode/utils.py | pombredanne/https-github.com-nexB-tracecode-toolkit | 5bb5741cd051f991c4d1944035dc00211dbcac6d | [
"Apache-2.0"
] | 8 | 2018-07-20T11:04:13.000Z | 2022-01-29T19:20:48.000Z | #
# Copyright (c) nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/tracecode-toolkit/
# The TraceCode software is licensed under the Apache License version 2.0.
# Data generated with TraceCode require an acknowledgment.
# TraceCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with TraceCode or any TraceCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with TraceCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# TraceCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# TraceCode is a free and open source software analysis tool from nexB Inc. and others.
# Visit https://github.com/nexB/tracecode-toolkit/ for support and download.
#
from __future__ import absolute_import
from __future__ import division
import os
import simplejson
from commoncode import filetype
from commoncode import fileutils
def get_notice():
"""
Retrieve the notice text from the NOTICE file for display in the JSON output.
"""
notice_path = os.path.join(os.path.abspath(
os.path.dirname(__file__)), 'NOTICE')
notice_text = open(notice_path).read()
delimiter = '\n\n\n'
[notice_text, extra_notice_text] = notice_text.split(delimiter, 1)
extra_notice_text = delimiter + extra_notice_text
delimiter = '\n\n '
[notice_text, acknowledgment_text] = notice_text.split(delimiter, 1)
acknowledgment_text = delimiter + acknowledgment_text
notice = acknowledgment_text.strip().replace(' ', '')
return notice
def is_json_path(location):
"""
Test if the input location file is a valid json file.
"""
if filetype.is_file(location):
try:
with open(location) as jsonfile:
result = simplejson.load(jsonfile)
if result:
return True
except:
return False
return False
| 36.414286 | 88 | 0.724598 | 353 | 2,549 | 5.135977 | 0.413598 | 0.049641 | 0.024821 | 0.01765 | 0.150028 | 0.150028 | 0.08053 | 0.08053 | 0.08053 | 0.08053 | 0 | 0.002959 | 0.204394 | 2,549 | 69 | 89 | 36.942029 | 0.891026 | 0.572381 | 0 | 0.071429 | 0 | 0 | 0.019305 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.214286 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d2763cdfedb26fbdf58fd1eb516a53a4a9df262 | 7,935 | py | Python | act/plotting/plot.py | scollis/ACT | e0067254a84573c5baad43d8c87a2ddd4763ea83 | [
"BSD-3-Clause"
] | 1 | 2019-04-11T19:21:59.000Z | 2019-04-11T19:21:59.000Z | act/plotting/plot.py | scollis/ACT | e0067254a84573c5baad43d8c87a2ddd4763ea83 | [
"BSD-3-Clause"
] | null | null | null | act/plotting/plot.py | scollis/ACT | e0067254a84573c5baad43d8c87a2ddd4763ea83 | [
"BSD-3-Clause"
] | null | null | null | # Import third party libraries
import matplotlib.pyplot as plt
import datetime as dt
import astral
import numpy as np
# Import Local Libs
from . import common
from ..utils import datetime_utils as dt_utils
from ..utils import data_utils
class display(object):
"""
A class for handing the display of ARM Datasets. The class stores
the dataset to be plotted
Attributes
----------
fields: dict
The dictionary containing the fields inside the ARM dataset. Each field
has a key that links to an xarray DataArray object.
ds: str
The name of the datastream.
file_dates: list
The dates of each file being display
plots: list
The list of plots handled (currently not supported).
plot_vars: list
The list of variables being plotted.
cbs: list
The list of colorbar handles.
"""
def __init__(self, arm_obj):
"""Initialize Object"""
self._arm = arm_obj
self.fields = arm_obj.variables
self.ds = str(arm_obj['ds'].values)
self.file_dates = arm_obj.file_dates.values
self.plots = []
self.plot_vars = []
self.cbs = []
def day_night_background(self, ax=None, fig=None):
"""
Colorcodes the background according to sunrise/sunset
Parameters
----------
ax: matplotlib axis handle
Axis handle to plot the bacground on. Set to None to use the
current axis.
fig: matplotlib figure handle
Figure to plot the background on. Set to None to use the current
figure handle.
"""
# Get File Dates
file_dates = self._arm.file_dates.data
all_dates = dt_utils.dates_between(file_dates[-1], file_dates[0])
# Get ax and fig for plotting
ax, fig = common.parse_ax_fig(ax, fig)
# initialize the plot to a gray background for total darkness
rect = ax.patch
rect.set_facecolor('0.85')
# Initiate Astral Instance
a = astral.Astral()
if self._arm.lat.data.size > 1:
lat = self._arm.lat.data[0]
lon = self._arm.lon.data[0]
else:
lat = float(self._arm.lat.data)
lon = float(self._arm.lon.data)
for f in all_dates:
sun = a.sun_utc(f, lat, lon)
# add yellow background for specified time period
ax.axvspan(sun['sunrise'], sun['sunset'], facecolor='#FFFFCC')
# add local solar noon line
ax.axvline(x=sun['noon'], linestyle='--', color='y')
def set_xrng(self, xrng, ax=None, fig=None):
"""
Sets the x range of the plot.
Parameters
----------
xrng: 2 number array
The x limits of the plot.
ax: matplotlib axis handle
Axis handle to plot the bacground on. Set to None to use the
current axis.
fig: matplotlib figure handle
Figure to plot the background on. Set to None to use the current
figure handle.
"""
# Get ax and fig for plotting
ax, fig = common.parse_ax_fig(ax, fig)
ax.set_xlim(xrng)
self.xrng = xrng
def set_yrng(self, yrng, ax=None, fig=None):
"""
Sets the y range of the plot.
Parameters
----------
yrng: 2 number array
The y limits of the plot.
ax: matplotlib axis handle
Axis handle to plot the bacground on. Set to None to use the
current axis.
fig: matplotlib figure handle
Figure to plot the background on. Set to None to use the current
figure handle.
"""
# Get ax and fig for plotting
ax, fig = common.parse_ax_fig(ax, fig)
ax.set_ylim(yrng)
self.yrng = yrng
def add_colorbar(self, mappable, title=None, ax=None, fig=None):
"""
Adds a colorbar to the plot
Parameters
----------
mappable: matplotlib mappable
The mappable to base the colorbar on.
title: str
The title of the colorbar. Set to None to have no title.
ax: matplotlib axis handle
Axis handle to plot the bacground on. Set to None to use the
current axis.
fig: matplotlib figure handle
Figure to plot the background on. Set to None to use the current
figure handle.
Returns
-------
cbar: matplotlib colorbar handle
The handle to the matplotlib colorbar.
"""
# Get ax and fig for plotting
ax, fig = common.parse_ax_fig(ax, fig)
# Give the colorbar it's own axis so the 2D plots line up with 1D
box = ax.get_position()
pad, width = 0.01, 0.01
cax = fig.add_axes([box.xmax + pad, box.ymin, width, box.height])
cbar = plt.colorbar(mappable, cax=cax)
cbar.ax.set_ylabel(title, rotation=270, fontsize=8, labelpad=3)
cbar.ax.tick_params(labelsize=6)
return cbar
def plot(self, field, ax=None, fig=None,
cmap=None, cbmin=None, cbmax=None, set_title=None,
add_nan=False, **kwargs):
"""
Makes the plot
Parameters
----------
mappable: matplotlib mappable
The mappable to base the colorbar on.
title: str
The title of the colorbar. Set to None to have no title.
ax: matplotlib axis handle
Axis handle to plot the bacground on. Set to None to use the
current axis.
fig: matplotlib figure handle
Figure to plot the background on. Set to None to use the current
figure handle.
cmap: matplotlib colormap
The colormap to use/
cbmin: float
The minimum for the colorbar.
cbmax: float
The maximum for the colorbar.
set_title: str
The title for the plot.
add_nan: bool
Set to True to fill in data gaps with NaNs.
kwargs: dict
The keyword arguments for plt.plot
"""
# Get data and dimensions
data = self._arm[field]
dim = list(self._arm[field].dims)
xdata = self._arm[dim[0]]
ytitle = ''.join(['(', data.attrs['units'], ')'])
if len(dim) > 1:
ydata = self._arm[dim[1]]
units = ytitle
ytitle = ''.join(['(', ydata.attrs['units'], ')'])
else:
ydata = None
# Get the current plotting axis, add day/night background and plot data
ax, fig = common.parse_ax_fig(ax, fig)
if ydata is None:
self.day_night_background()
ax.plot(xdata, data, '.')
else:
# Add in nans to ensure the data are not streaking
if add_nan is True:
xdata, data = data_utils.add_in_nan(xdata, data)
mesh = ax.pcolormesh(xdata, ydata, data.transpose(),
cmap=cmap, vmax=cbmax,
vmin=cbmin, edgecolors='face')
# Set Title
if set_title is None:
set_title = ' '.join([self.ds, field, 'on', self.file_dates[0]])
plt.title(set_title)
# Set YTitle
ax.set_ylabel(ytitle)
# Set X Limit
if hasattr(self, 'xrng'):
self.set_xrng(self.xrng)
else:
self.xrng = [xdata.min().values, xdata.max().values]
self.set_xrng(self.xrng)
# Set Y Limit
if hasattr(self, 'yrng'):
self.set_yrng(self.yrng)
# Set X Format
days = (self.xrng[1] - self.xrng[0]) / np.timedelta64(1, 'D')
myFmt = common.get_date_format(days)
ax.xaxis.set_major_formatter(myFmt)
if ydata is not None:
self.add_colorbar(mesh, title=units)
| 31.488095 | 79 | 0.565091 | 1,044 | 7,935 | 4.216475 | 0.225096 | 0.017038 | 0.024534 | 0.029986 | 0.335075 | 0.315538 | 0.306452 | 0.306452 | 0.300545 | 0.300545 | 0 | 0.006377 | 0.347826 | 7,935 | 251 | 80 | 31.613546 | 0.844251 | 0.421046 | 0 | 0.123596 | 0 | 0 | 0.016723 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.067416 | false | 0 | 0.078652 | 0 | 0.168539 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d28274e4079f33480cebe64aab0b45c2a185593 | 8,853 | py | Python | src/comms/comms.py | google-code-export/evennia | f458265bec64909d682736da6a118efa21b42dfc | [
"BSD-3-Clause"
] | null | null | null | src/comms/comms.py | google-code-export/evennia | f458265bec64909d682736da6a118efa21b42dfc | [
"BSD-3-Clause"
] | null | null | null | src/comms/comms.py | google-code-export/evennia | f458265bec64909d682736da6a118efa21b42dfc | [
"BSD-3-Clause"
] | null | null | null | """
Default Typeclass for Comms.
See objects.objects for more information on Typeclassing.
"""
from src.comms import Msg, TempMsg, ChannelDB
from src.typeclasses.typeclass import TypeClass
from src.utils import logger
from src.utils.utils import make_iter
class Channel(TypeClass):
"""
This is the base class for all Comms. Inherit from this to create different
types of communication channels.
"""
def __init__(self, dbobj):
super(Channel, self).__init__(dbobj)
def channel_prefix(self, msg=None, emit=False):
"""
How the channel should prefix itself for users. Return a string.
"""
return '[%s] ' % self.key
def format_senders(self, senders=None):
"""
Function used to format a list of sender names.
This function exists separately so that external sources can use
it to format source names in the same manner as normal object/player
names.
"""
if not senders:
return ''
return ', '.join(senders)
def pose_transform(self, msg, sender_string):
"""
Detects if the sender is posing, and modifies the message accordingly.
"""
pose = False
message = msg.message
message_start = message.lstrip()
if message_start.startswith((':', ';')):
pose = True
message = message[1:]
if not message.startswith((':', "'", ',')):
if not message.startswith(' '):
message = ' ' + message
if pose:
return '%s%s' % (sender_string, message)
else:
return '%s: %s' % (sender_string, message)
def format_external(self, msg, senders, emit=False):
"""
Used for formatting external messages. This is needed as a separate
operation because the senders of external messages may not be in-game
objects/players, and so cannot have things like custom user
preferences.
senders should be a list of strings, each containing a sender.
msg should contain the body of the message to be sent.
"""
if not senders:
emit = True
if emit:
return msg.message
senders = ', '.join(senders)
return self.pose_transform(msg, senders)
def format_message(self, msg, emit=False):
"""
Formats a message body for display.
If emit is True, it means the message is intended to be posted detached
from an identity.
"""
# We don't want to count things like external sources as senders for
# the purpose of constructing the message string.
senders = [sender for sender in msg.senders if hasattr(sender, 'key')]
if not senders:
emit = True
if emit:
return msg.message
else:
senders = [sender.key for sender in msg.senders]
senders = ', '.join(senders)
return self.pose_transform(msg, senders)
def message_transform(self, msg, emit=False, prefix=True,
sender_strings=None, external=False):
"""
Generates the formatted string sent to listeners on a channel.
"""
if sender_strings or external:
body = self.format_external(msg, sender_strings, emit=emit)
else:
body = self.format_message(msg, emit=emit)
if prefix:
body = "%s%s" % (self.channel_prefix(msg, emit=emit), body)
msg.message = body
return msg
def at_channel_create(self):
"""
Run at channel creation.
"""
def pre_join_channel(self, joiner):
"""
Run right before a channel is joined. If this returns a false value,
channel joining is aborted.
"""
return True
def post_join_channel(self, joiner):
"""
Run right after an object or player joins a channel.
"""
return True
def pre_leave_channel(self, leaver):
"""
Run right before a user leaves a channel. If this returns a false
value, leaving the channel will be aborted.
"""
return True
def post_leave_channel(self, leaver):
"""
Run right after an object or player leaves a channel.
"""
def pre_send_message(self, msg):
"""
Run before a message is sent to the channel.
This should return the message object, after any transformations.
If the message is to be discarded, return a false value.
"""
return msg
def post_send_message(self, msg):
"""
Run after a message is sent to the channel.
"""
def at_init(self):
"""
This is always called whenever this channel is initiated --
that is, whenever it its typeclass is cached from memory. This
happens on-demand first time the channel is used or activated
in some way after being created but also after each server
restart or reload.
"""
def distribute_message(self, msg, online=False):
"""
Method for grabbing all listeners that a message should be sent to on
this channel, and sending them a message.
"""
# get all players connected to this channel and send to them
for conn in ChannelDB.objects.get_all_connections(self, online=online):
try:
conn.player.msg(msg.message, from_obj=msg.senders)
except AttributeError:
try:
conn.to_external(msg.message,
senders=msg.senders, from_channel=self)
except Exception:
logger.log_trace("Cannot send msg to connection '%s'" % conn)
def msg(self, msgobj, header=None, senders=None, sender_strings=None,
persistent=False, online=False, emit=False, external=False):
"""
Send the given message to all players connected to channel. Note that
no permission-checking is done here; it is assumed to have been
done before calling this method. The optional keywords are not used if
persistent is False.
msgobj - a Msg/TempMsg instance or a message string. If one of the
former, the remaining keywords will be ignored. If a string,
this will either be sent as-is (if persistent=False) or it
will be used together with header and senders keywords to
create a Msg instance on the fly.
senders - an object, player or a list of objects or players.
Optional if persistent=False.
sender_strings - Name strings of senders. Used for external
connections where the sender is not a player or object. When
this is defined, external will be assumed.
external - Treat this message agnostic of its sender.
persistent (default False) - ignored if msgobj is a Msg or TempMsg.
If True, a Msg will be created, using header and senders
keywords. If False, other keywords will be ignored.
online (bool) - If this is set true, only messages people who are
online. Otherwise, messages all players connected. This can
make things faster, but may not trigger listeners on players
that are offline.
emit (bool) - Signals to the message formatter that this message is
not to be directly associated with a name.
"""
if senders:
senders = make_iter(senders)
else:
senders = []
if isinstance(msgobj, basestring):
# given msgobj is a string
msg = msgobj
if persistent and self.db.keep_log:
msgobj = Msg()
msgobj.save()
else:
# Use TempMsg, so this message is not stored.
msgobj = TempMsg()
msgobj.header = header
msgobj.message = msg
msgobj.channels = [self.dbobj] # add this channel
if not msgobj.senders:
msgobj.senders = senders
msgobj = self.pre_send_message(msgobj)
if not msgobj:
return False
msgobj = self.message_transform(msgobj, emit=emit,
sender_strings=sender_strings,
external=external)
self.distribute_message(msgobj, online=online)
self.post_send_message(msgobj)
return True
def tempmsg(self, message, header=None, senders=None):
"""
A wrapper for sending non-persistent messages.
"""
self.msg(message, senders=senders, header=header, persistent=False)
| 37.041841 | 81 | 0.593358 | 1,078 | 8,853 | 4.815399 | 0.237477 | 0.012136 | 0.010788 | 0.008476 | 0.122905 | 0.097476 | 0.058178 | 0.036987 | 0.036987 | 0.036987 | 0 | 0.00017 | 0.336609 | 8,853 | 238 | 82 | 37.197479 | 0.883705 | 0.41726 | 0 | 0.247619 | 0 | 0 | 0.015685 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.171429 | false | 0 | 0.038095 | 0 | 0.371429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d2aa93efe76b2d54b03c62ea6f5df7272c15ed4 | 6,975 | py | Python | src/utils/uiElements.py | amadea-system/void | 8904456ba028ed9b2b40e73e894a50124381ff3c | [
"Apache-2.0"
] | null | null | null | src/utils/uiElements.py | amadea-system/void | 8904456ba028ed9b2b40e73e894a50124381ff3c | [
"Apache-2.0"
] | null | null | null | src/utils/uiElements.py | amadea-system/void | 8904456ba028ed9b2b40e73e894a50124381ff3c | [
"Apache-2.0"
] | null | null | null | """
"""
import asyncio
import logging
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Optional, Dict, List, Union, Tuple, NamedTuple, Callable, Any
import discord
from discord.ext import commands
class DiscordPermissionsError(Exception):
pass
class CannotAddReactions(DiscordPermissionsError):
def __init__(self):
super().__init__(f"Insufficient permissions to add reactions to user interface!\n"
f"Please have an admin add the **Add Reactions** and **Read Message History** permissions to this bot and make sure that the channel you are using commands in is configured to allow those permissions as well.")
class CannotEmbedLinks(DiscordPermissionsError):
def __init__(self):
super().__init__('Bot does not have embed links permission in this channel.')
class CannotSendMessages(DiscordPermissionsError):
def __init__(self):
super().__init__('Bot cannot send messages in this channel.')
class CannotAddExtenalReactions(DiscordPermissionsError):
def __init__(self):
super().__init__(f"Gabby Gums is missing the **Use External Emojis** Permission!\n"
f"Please have an admin add the **Use External Emojis** permissions to this bot and make sure that the channel you are using commands in is configured to allow External Emojis as well.")
async def do_nothing(*args, **kwargs):
pass
@dataclass
class PageResponse:
"""Data Storage class for returning the user response (if any) and the UI Message(es) that the Page sent out."""
response: Optional[Any]
ui_message: Optional[discord.Message]
# user_messages: List[discord.Message] = field(default_factory=[])
def __str__(self):
return str(self.content())
def content(self):
if isinstance(self.response, str):
return self.response
elif isinstance(self.response, discord.Message):
return self.response.content
else:
return self.response
def c(self):
return self.content()
class Page:
"""
An interactive form that can be interacted with in a variety of ways including Boolean reaction, string input, non-interactive response message, soon to be more.
Calls a Callback with the channel and response data to enable further response and appropriate handling of the data.
"""
LOG = logging.getLogger("GGBot.Page")
def __init__(self, page_type: str, name: Optional[str] = None, body: Optional[str] = None,
callback: Callable = do_nothing, additional: str = None, embed: Optional[discord.Embed] = None, previous_msg: Optional[Union[discord.Message, PageResponse]] = None, timeout: int = 120.0):
self.name = name
self.body = body
self.additional = additional
self.embed = embed
self.timeout = timeout
self.page_type = page_type.lower()
self.callback = callback
self.prev = previous_msg.ui_message if isinstance(previous_msg, PageResponse) else previous_msg
self.response = None
self.page_message: Optional[discord.Message] = None
self.user_message: Optional[discord.Message] = None
async def run(self, ctx: commands.Context):
pass
def construct_std_page_msg(self) -> str:
page_msg = ""
if self.name is not None:
page_msg += "**{}**\n".format(self.name)
if self.body is not None:
page_msg += "{}\n".format(self.body)
if self.additional is not None:
page_msg += "{}\n".format(self.additional)
# self.page_message = page_message
return page_msg
@staticmethod
async def cancel(ctx, self):
await self.remove()
await ctx.send("Canceled!")
async def remove(self, user: bool = True, page: bool = True):
# if self.previous is not None:
# await self.previous.remove(user, page)
try:
if user and self.user_message is not None:
await self.user_message.delete(delay=1)
except Exception:
pass
try:
if page and self.page_message is not None:
await self.page_message.delete(delay=1)
except Exception:
pass
class BoolPage(Page):
def __init__(self, name: Optional[str] = None, body: Optional[str] = None,
callback: Callable = do_nothing, additional: str = None, embed: Optional[discord.Embed] = None, previous_msg: Optional[Union[discord.Message, PageResponse]] = None, timeout: int = 120.0):
"""
Callback signature: page: reactMenu.Page, _client: commands.Bot, ctx: commands.Context, response: bool
"""
self.ctx = None
self.match = None
self.canceled = False
super().__init__(page_type="n/a", name=name, body=body, callback=callback, additional=additional, embed=embed, previous_msg=previous_msg, timeout=timeout)
async def run(self, ctx: commands.Context):
"""
Callback signature: page: reactMenu.Page, _client: commands.Bot, ctx: commands.Context, response: bool
"""
self.ctx = ctx
channel: discord.TextChannel = ctx.channel
author: discord.Member = ctx.author
message: discord.Message = ctx.message
if self.embed is None:
self.page_message = await channel.send(self.construct_std_page_msg())
else:
self.page_message = await channel.send(self.construct_std_page_msg(), embed=self.embed)
try:
await self.page_message.add_reaction("✅")
await self.page_message.add_reaction("❌")
except discord.Forbidden as e:
await ctx.send(
f"CRITICAL ERROR!!! \n{ctx.guild.me.name} does not have the `Add Reactions` permissions!. Please have an Admin fix this issue and try again.")
raise e
def react_check(_reaction: discord.Reaction, _user):
self.LOG.info("Checking Reaction: Reacted Message: {}, orig message: {}".format(_reaction.message.id,
self.page_message.id))
return _user == ctx.author and (str(_reaction.emoji) == '✅' or str(_reaction.emoji) == '❌')
try:
reaction, react_user = await self.ctx.bot.wait_for('reaction_add', timeout=self.timeout, check=react_check)
if str(reaction.emoji) == '✅':
self.response = True
await self.remove()
await self.callback(self, self.ctx.bot, ctx, True)
return True
elif str(reaction.emoji) == '❌':
self.response = False
await self.remove()
await self.callback(self, self.ctx.bot, ctx, False)
return False
except asyncio.TimeoutError:
await self.remove()
return None
| 36.328125 | 235 | 0.634265 | 846 | 6,975 | 5.107565 | 0.230496 | 0.024994 | 0.031243 | 0.031474 | 0.35987 | 0.34043 | 0.314511 | 0.239991 | 0.209674 | 0.209674 | 0 | 0.001962 | 0.269391 | 6,975 | 191 | 236 | 36.518325 | 0.84478 | 0.094624 | 0 | 0.221311 | 0 | 0.02459 | 0.140317 | 0.003426 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090164 | false | 0.040984 | 0.04918 | 0.016393 | 0.311475 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d2e5aca6a3643b56a99abd040c66e564d7ea66c | 1,424 | py | Python | bin/assign-locus-tags.py | lcoombe/gfftools | 9da14f986e7c0935ddf0df39f1468b8b28831edf | [
"MIT"
] | null | null | null | bin/assign-locus-tags.py | lcoombe/gfftools | 9da14f986e7c0935ddf0df39f1468b8b28831edf | [
"MIT"
] | 1 | 2021-03-09T14:20:37.000Z | 2021-03-09T14:20:37.000Z | bin/assign-locus-tags.py | lcoombe/gfftools | 9da14f986e7c0935ddf0df39f1468b8b28831edf | [
"MIT"
] | 1 | 2021-03-09T00:20:58.000Z | 2021-03-09T00:20:58.000Z | #!/usr/bin/env python2
import sys
def gen_tag(curr_count, width, pre):
newnum = str(int(curr_count) + 10)
newnum_filled = newnum.zfill(width)
tag = '_'.join([pre, newnum_filled])
return (newnum, tag)
def main():
# presets
PRE = 'AB205'
WIDTH = 7
LOC = 222240 # next available locus_tag suffix
updates = 'mapped-predictions-genes-uniq2-dedup2-better-maps-locus-tagged-fixed.txt'
#updates = 'mapped-predictions-genes-uniq2-locustags.txt'
#updates = 'v2-locus-tags-mapped-to-v3.txt'
all_proteins = 'version3-gene-IDs.txt'
outname = 'version3-gene-locus-tags.tsv'
# read in pre-mapped sequences
update_dict = dict()
with open(updates, 'r') as infile:
for line in infile:
sid, loc = line.strip().split(' ')
update_dict[sid] = loc
# read all sequences and either return mapped locus_tag or make a new one
curr_tag = LOC
with open(outname, 'w') as outfile:
with open(all_proteins,'r') as infile:
for line in infile:
sid = line.strip()
if sid not in update_dict:
curr_tag, tag = gen_tag(curr_tag, WIDTH, PRE)
outbuff = ''.join([sid,'\t',tag,'\n'])
else:
outbuff = ''.join([sid, '\t', update_dict[sid], '\n'])
outfile.write(outbuff)
if __name__ == '__main__':
main()
| 29.666667 | 88 | 0.58427 | 186 | 1,424 | 4.333333 | 0.462366 | 0.049628 | 0.024814 | 0.07196 | 0.151365 | 0.066998 | 0.066998 | 0.066998 | 0 | 0 | 0 | 0.019666 | 0.285815 | 1,424 | 47 | 89 | 30.297872 | 0.772861 | 0.182584 | 0 | 0.064516 | 0 | 0.032258 | 0.127053 | 0.104581 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064516 | false | 0 | 0.032258 | 0 | 0.129032 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d2f849b962f9ce6a9acd28163f158fb94eb8116 | 1,874 | py | Python | src/visualize.py | christosgalano/COVID_19-Reporter | d019aac9b2894a6b53c4781087a54c761e8e1a74 | [
"BSD-2-Clause"
] | null | null | null | src/visualize.py | christosgalano/COVID_19-Reporter | d019aac9b2894a6b53c4781087a54c761e8e1a74 | [
"BSD-2-Clause"
] | null | null | null | src/visualize.py | christosgalano/COVID_19-Reporter | d019aac9b2894a6b53c4781087a54c761e8e1a74 | [
"BSD-2-Clause"
] | null | null | null | import pandas as pd
import matplotlib.pyplot as plt
from typing import List
def general_visualize(data: pd.DataFrame, name: str, countries: List[str]):
"""
Creates a graph regarding the data of the countries that are provided.
:param data: DataFrame to be plotted
:param name: Name to be used in the title of the graph
:param countries: Countries of the DataFrame to be plotted
:return: None
"""
fig, ax = plt.subplots(figsize=(10, 5))
ax.set_facecolor('black')
ax.figure.set_facecolor('#121212')
ax.ticklabel_format(useOffset=False, style='plain')
ax.tick_params(axis='x', colors='white')
ax.tick_params(axis='y', colors='white')
ax.set_title(f'COVID-19 - Total {name}', color='white')
for country in countries:
data[country][0:].plot(label=country)
fig.tight_layout()
plt.legend(loc='upper left')
plt.show()
def visualize_rate(rate: pd.DataFrame, name: str, country: str, freq='W', date=None):
"""
Creates a bar chart regarding the rate of the country which is provided.
:param rate: DataFrame with data regarding rates
:param name: Name to be used in the title of the graph
:param country: Name of the country
:param freq: Frequency on which the graph is based, default is weekly
:param date: Only data from that day and forward are used
:return:
"""
fig, ax = plt.subplots(figsize=(12, 6))
ax.set_facecolor('black')
ax.figure.set_facecolor('#121212')
ax.ticklabel_format(useOffset=False, style='plain')
ax.tick_params(axis='x', colors='white')
ax.tick_params(axis='y', colors='white')
ax.set_title(f'COVID-19 - {name} [{country}]', color='white')
rate = rate.resample(freq).sum()
rate = rate[country][date if date else 0:]
rate.index = rate.index.date
rate.plot.bar()
fig.tight_layout()
plt.show()
| 36.745098 | 85 | 0.676094 | 281 | 1,874 | 4.451957 | 0.366548 | 0.023981 | 0.038369 | 0.051159 | 0.366107 | 0.329337 | 0.329337 | 0.329337 | 0.329337 | 0.329337 | 0 | 0.015936 | 0.196371 | 1,874 | 50 | 86 | 37.48 | 0.814741 | 0.312166 | 0 | 0.466667 | 0 | 0 | 0.107553 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.1 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d2fb9a7bd62c9c4e6e3ea92d3873ae9ef226b5f | 6,350 | py | Python | env/player.py | alxwdm/TichuAgent | d498d1050264d13c920018006e3dcc2a04bc61df | [
"MIT"
] | null | null | null | env/player.py | alxwdm/TichuAgent | d498d1050264d13c920018006e3dcc2a04bc61df | [
"MIT"
] | null | null | null | env/player.py | alxwdm/TichuAgent | d498d1050264d13c920018006e3dcc2a04bc61df | [
"MIT"
] | null | null | null | """ This module contains a class to represent a Tichu Player. """
import random
from env.cards import Cards
class Player():
"""
A class to represent a Player in a Tichu game.
Attributes
----------
hand: Cards
A Cards instance containing the hand cards of this Player.
points: int
The points this Player has achieved so far.
tichu_flag: bool
Whether The Player has called Tichu.
hand_size: int
The number of hand Cards of this Player.
hand_power: int
The power of the hand (only > 0 if it is a single combination).
finished: bool
Whether this Player has finished (i.e. no more hand Cards).
hand_rating: float
A rating of how good the hand is.
Methods
-------
assign_hand(cards):
Adds a Cards instance to the Players' hand.
remove_cards(cards):
Removes Cards from Players hand.
Returns true if successfull.
add_points(points):
Adds points to the points the Players' achived so far.
set_points(points):
Sets Players points and overrides the points achieved so far.
move(cards):
Checks whether hand contains cards (i.e. if move is possible).
random_move():
Makes a random choice from all available combinations of hand.
call_tichu():
Sets tichu_flag if Player did not play any hand cards yet.
has_finished():
returns True if Player has played all hand cards.
"""
def __init__(self):
"""
Constructs a Player waiting to recieve a hand via assign_hand().
"""
self.points = 0
self.tichu_flag = False
self.hand_size = 0
self.hand_power = 0
self.finished = False
self.hand = None
self.hand_rating = 0
def assign_hand(self, cards):
""" Assigns a Cards instance to the Players' hand. """
self.hand = cards
self._update()
self._set_hand_rating()
return True
def remove_cards(self, cards):
""" Removes all Card instances in Cards from Players' hand. """
if self.hand.contains(cards):
for crd in cards.cards:
self.hand.remove(crd)
self._update()
return True
else:
return False
def add_points(self, points):
""" Add points to Players' points. """
self.points += points
def set_points(self, points):
""" Sets (overrides) Players' points. """
self.points = points
def move(self, cards):
""" Returns true if Cards is a valid move. """
return bool(self.hand.contains(cards))
def random_move(self):
""" Randomly play one available combination. """
available_comb = self.hand.get_available_combinations()
flattened = [item for sublist in available_comb for item in sublist]
random_comb = random.choice(flattened)
suc = self.move(random_comb)
if suc: # double-check, move should always return True
return random_comb
else:
return False
def call_tichu(self):
""" Calls Tichu if Player did not play any Cards yet. """
if self.hand_size == 14:
self.tichu_flag = True
return True
else:
return False
def has_finished(self):
""" Checks whether the Player played all its Cards. """
return self.finished
def _update(self):
""" Update Players' Attributes after a Card has been played. """
self.hand_size = self.hand.size
self.hand_power = self.hand.power
if self.hand_size == 0:
self.finished = True
def _set_hand_rating(self):
"""
Set hand rating of Players' hand based on a heuristic.
The hand rating is based on the individual cards
and available combinations of the Players' hand.
A high rating can be achieved if the Player has
a lot of high cards (Kings, Aces and Dragon or Phoenix)
and a lot of good combinations (bomb, straight, triple, full).
"""
good_cards = ['A', 'K', 'Dragon', 'Phoenix']
bad_cards = {'Dog'}
comb_types = {'solo': 0,
'pair': 1,
'triple': 2,
'four_bomb': 3,
'full': 4,
'straight': 5,
'straight_bomb': 6,
'pair_seq': 7}
# initialize cards and score
card_list = self.hand.cards
cards = Cards(card_list)
score = 0
# update score based on individual cards
good_cards_list = [elem for elem in cards.cards
if elem.name in good_cards]
for crd in good_cards_list:
if crd.name == 'Dragon' or crd.name == 'A':
score += 20
else:
score += 10
bad_cards_list = [elem for elem in cards.cards
if elem.name in bad_cards]
if bad_cards_list:
score -= 40
# update score based on combinations
avail_combs = cards.get_available_combinations()
if avail_combs[comb_types['four_bomb']]:
for crds in avail_combs[comb_types['four_bomb']]:
score += 40
for crd in crds.cards:
cards.remove(crd)
avail_combs = cards.get_available_combinations()
if avail_combs[comb_types['straight_bomb']]:
for crds in avail_combs[comb_types['straight_bomb']]:
score += 40
if avail_combs[comb_types['full']]:
max_fulls = sum([full.power for full
in avail_combs[comb_types['full']]]
)/len(avail_combs[comb_types['full']])
score += max_fulls
if avail_combs[comb_types['triple']]:
max_triple = sum([triple.power
for triple in avail_combs[comb_types['triple']]]
)/len(avail_combs[comb_types['triple']])
score += max_triple
if avail_combs[comb_types['straight']]:
max_len = max([strt.size
for strt in avail_combs[comb_types['straight']]])
score += max_len
self.hand_rating = score
| 34.89011 | 76 | 0.570079 | 791 | 6,350 | 4.439949 | 0.207332 | 0.038724 | 0.047836 | 0.06492 | 0.252278 | 0.166287 | 0.094533 | 0.077449 | 0.059226 | 0.059226 | 0 | 0.006486 | 0.344409 | 6,350 | 181 | 77 | 35.082873 | 0.837137 | 0.340787 | 0 | 0.156863 | 0 | 0 | 0.044061 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107843 | false | 0 | 0.019608 | 0 | 0.22549 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d2fdd0140b9ffc8057ee83ac8333d070e35be9e | 18,298 | py | Python | noncausal.py | dmitry-brizhinev/honours-code | 1d8535eadc3f9620d234af49425087b6cac67be2 | [
"MIT"
] | null | null | null | noncausal.py | dmitry-brizhinev/honours-code | 1d8535eadc3f9620d234af49425087b6cac67be2 | [
"MIT"
] | null | null | null | noncausal.py | dmitry-brizhinev/honours-code | 1d8535eadc3f9620d234af49425087b6cac67be2 | [
"MIT"
] | 1 | 2018-11-25T22:50:29.000Z | 2018-11-25T22:50:29.000Z | import tensorflow as tf
import numpy as np
from datetime import datetime
from PIL import Image
import time
def get_weights(shape):
return tf.Variable(tf.contrib.layers.xavier_initializer()(shape))
def get_bias_weights(shape):
return tf.Variable(tf.zeros_initializer()(shape))
class NonCausal:
def gate(self, p):
p1, p2 = tf.split(p, 2, -1)
return tf.multiply(tf.tanh(p1), tf.sigmoid(p2))
def conv2d(self, input, shape):
assert len(shape) == 4
W = get_weights(shape)
b = get_bias_weights(shape[-1])
return tf.nn.conv2d(input, W, strides=[1, 1, 1, 1], padding='SAME') + b
def fully_connected(self, input, in_features, out_features, bias=True):
W = get_weights([1, 1, in_features, out_features])
out = tf.nn.conv2d(input, W, strides=[1, 1, 1, 1], padding='VALID')
if bias:
out += get_bias_weights([out_features])
return out
def layer(self, input, first_layer=False, last_layer=False):
assert not (first_layer and last_layer)
out = self.gate(self.conv2d(input, [self.filter_size, self.filter_size, self.channels if first_layer else self.features, 2*self.features]))
residual = self.fully_connected(out, self.features, self.end_features if last_layer else self.features)
skip = self.fully_connected(out, self.features, self.end_features)
if first_layer:
input = self.fully_connected(input, self.channels, self.features, bias=False)
elif last_layer:
input = self.fully_connected(input, self.features, self.end_features, bias=False)
return input + residual, skip
def end(self, outs):
out = tf.nn.relu(tf.reduce_sum(outs, 0))
out = tf.nn.relu(self.fully_connected(out, self.end_features, self.end_features))
out = self.fully_connected(out, self.end_features, self.channels * self.values)
return tf.reshape(out, shape=[self.batch_size, self.height, self.width, self.channels, self.values])
def noncausal(self):
X = self.X_in
outs = []
for i in range(self.layers):
X, skip = self.layer(X, first_layer=(i==0), last_layer=(i==self.layers-1))
outs.append(skip)
outs.append(X)
logits = self.end(outs)
predictions = self.sample(logits)
return logits, predictions
def sample(self, logits):
probabilities = logits / self.temperature
return tf.reshape(tf.cast(tf.multinomial(tf.reshape(probabilities, shape=[self.batch_size*self.height*self.width*self.channels, self.values]), 1), tf.float32), shape=[self.batch_size,self.height,self.width,self.channels])
def logitise(self, images):
return tf.stop_gradient(tf.one_hot(tf.to_int32(images * (self.values-1)), self.values))
def generate_ten_thousand_samples_graph(self, sess):
print('Generating graph for noncausal')
outer_samples = []
test_data = self.data.get_noise_values()
for b in range(self.data.total_test_batches):
print('batch', b)
inner_samples = []
X_corrupted = sess.run(test_data)
inner_samples.append(X_corrupted)
for _ in range(40):#self.test_iterations):
X_corrupted = sess.run(self.predictions, feed_dict={self.X_in:X_corrupted})
inner_samples.append(X_corrupted)
outer_samples.append(inner_samples)
samples = np.concatenate(outer_samples, 1)
print(samples.shape)
return samples
def generate_ten_thousand_samples(self, sess):
from data.dataset import noise
print('Generating 10000 samples for noncausal')
start = time.perf_counter()
samples = []
test_data = self.data.get_noise_values()
for j in range(self.data.total_test_batches):
print('batch', j)
X_corrupted = sess.run(test_data)
for i in range(self.test_iterations):
X_corrupted = sess.run(self.predictions, feed_dict={self.X_in:X_corrupted})
if self.markov_type == 'renoise_per_two':
X_corrupted = sess.run(self.predictions, feed_dict={self.X_in:X_corrupted})
if self.markov_type != 'only_denoise':
noise_prop = (self.test_iterations - i - 1) / self.test_iterations
X_corrupted = np.array([noise(arr, noise_prop, noise_prop)[0] for arr in X_corrupted])
samples.append(X_corrupted)
samples = np.concatenate(samples)
print(time.perf_counter() - start)
return samples
def update_clamp(self, data, newdata, filename):
if filename == 'topgap':
data = data.copy()
data[:,:data.shape[1]//2,:,:] = newdata[:,:data.shape[1]//2,:,:]
return data
elif filename == 'bottomgap':
data = data.copy()
data[:,data.shape[1]//2:,:,:] = newdata[:,data.shape[1]//2:,:,:]
return data
else:
return newdata
def generate_diversity_samples(self, sess, filename, X_corrupted, X_true, horz_samples=10, vert_samples=10):
from data.dataset import noise
print('Generating samples for', filename)
predictions = [X_true[:vert_samples,:,:,:].reshape(1, vert_samples, self.height, self.width), X_corrupted[:vert_samples,:,:,:].reshape(1, vert_samples, self.height, self.width)]
X_corrupted = np.repeat(X_corrupted[:vert_samples,:,:,:], horz_samples, axis=0)
X_true = np.repeat(X_true[:vert_samples,:,:,:], horz_samples, axis=0)
start = time.perf_counter()
for i in range(self.test_iterations // 2 if filename == 'denoise' else self.test_iterations):
X_corrupted = self.update_clamp(X_corrupted, sess.run(self.predictions, feed_dict={self.X_in:X_corrupted}), filename)
if self.markov_type == 'renoise_per_two':
X_corrupted = self.update_clamp(X_corrupted, sess.run(self.predictions, feed_dict={self.X_in:X_corrupted}), filename)
if self.markov_type != 'only_denoise':
if filename == 'denoise':
noise_prop = ((self.test_iterations//2 - i - 1) / self.test_iterations)
else:
noise_prop = (self.test_iterations - i - 1) / self.test_iterations
X_corrupted = self.update_clamp(X_corrupted, np.array([noise(arr, noise_prop, noise_prop)[0] for arr in X_corrupted]), filename)
print(time.perf_counter() - start)
predictions.append(X_corrupted.reshape(vert_samples, horz_samples, self.height, self.width).transpose(1,0,2,3))
images = np.concatenate(tuple(predictions), axis=0)
images = images.transpose(1, 2, 0, 3)
images = images.reshape((self.height * vert_samples, self.width * (horz_samples + 2)))
filename = datetime.now().strftime('samples/%Y_%m_%d_%H_%M_noncausal_diversity_' + filename)+".png"
Image.fromarray((images*255).astype(np.int32)).convert('RGB').save(filename)
def generate_one_group_of_samples(self, sess, filename, X_corrupted, X_true=None, horz_samples=10):
from data.dataset import noise
print('Generating samples for', filename)
X_corrupted = X_corrupted[:horz_samples,:,:,:]
predictions = [X_true[:horz_samples,:,:,:]] if X_true is not None else []
predictions.append(X_corrupted)
start = time.perf_counter()
for i in range(self.test_iterations // 2 if filename == 'denoise' else self.test_iterations):
X_corrupted = self.update_clamp(X_corrupted, sess.run(self.predictions, feed_dict={self.X_in:X_corrupted}), filename)
if self.markov_type == 'renoise_per_two':
X_corrupted = self.update_clamp(X_corrupted, sess.run(self.predictions, feed_dict={self.X_in:X_corrupted}), filename)
predictions.append(X_corrupted)
if self.markov_type != 'only_denoise':
if filename == 'denoise':
noise_prop = ((self.test_iterations//2 - i - 1) / self.test_iterations)
else:
noise_prop = (self.test_iterations - i - 1) / self.test_iterations
X_corrupted = self.update_clamp(X_corrupted, np.array([noise(arr, noise_prop, noise_prop)[0] for arr in X_corrupted]), filename)
print(time.perf_counter() - start)
predictions = tuple(p.reshape((horz_samples, 1, self.height, self.width)) for p in predictions)
images = np.concatenate(predictions, axis=1)
images = images.transpose(1, 2, 0, 3)
images = images.reshape((self.height * ((self.test_iterations//2 if filename == 'denoise' else self.test_iterations) + 1 + (1 if X_true is not None else 0)), self.width * horz_samples))
filename = datetime.now().strftime('samples/%Y_%m_%d_%H_%M_noncausal_' + filename)+".png"
Image.fromarray((images*255).astype(np.int32)).convert('RGB').save(filename)
def generate_grid_of_samples(self, sess, X_corrupted):
from data.dataset import noise
print('Generating samples for grid')
horz_samples = 10
vert_samples = 10
X_corrupted = X_corrupted[:horz_samples*vert_samples,:,:,:]
start = time.perf_counter()
for i in range(self.test_iterations):
X_corrupted = sess.run(self.predictions, feed_dict={self.X_in:X_corrupted})
if self.markov_type == 'renoise_per_two':
X_corrupted = sess.run(self.predictions, feed_dict={self.X_in:X_corrupted})
if self.markov_type != 'only_denoise':
noise_prop = (self.test_iterations - i - 1) / self.test_iterations
X_corrupted = np.array([noise(arr, noise_prop, noise_prop)[0] for arr in X_corrupted])
print(time.perf_counter() - start)
images = X_corrupted.reshape((vert_samples, horz_samples, self.height, self.width))
images = images.transpose(0, 2, 1, 3)
images = images.reshape((self.height * vert_samples, self.width * horz_samples))
filename = datetime.now().strftime('samples/%Y_%m_%d_%H_%M_noncausal_grid')+".png"
Image.fromarray((images*255).astype(np.int32)).convert('RGB').save(filename)
def generate_samples(self, sess):
denoising_values = self.data.get_denoising_values()
X_corrupted, X_true, _ = sess.run(denoising_values)
self.generate_one_group_of_samples(sess, 'denoise', X_corrupted, X_true)
X_corrupted, X_true, _ = sess.run(denoising_values)
self.generate_diversity_samples(sess, 'denoise', X_corrupted, X_true)
topgap_values = self.data.get_topgap_values()
X_corrupted, X_true, _ = sess.run(topgap_values)
self.generate_one_group_of_samples(sess, 'topgap', X_corrupted, X_true)
X_corrupted, X_true, _ = sess.run(topgap_values)
self.generate_diversity_samples(sess, 'topgap', X_corrupted, X_true)
bottomgap_values = self.data.get_bottomgap_values()
X_corrupted, X_true, _ = sess.run(bottomgap_values)
self.generate_one_group_of_samples(sess, 'bottomgap', X_corrupted, X_true)
X_corrupted, X_true, _ = sess.run(bottomgap_values)
self.generate_diversity_samples(sess, 'bottomgap', X_corrupted, X_true)
noise_values = self.data.get_noise_values()
X_corrupted = sess.run(noise_values)
self.generate_one_group_of_samples(sess, 'purenoise', X_corrupted)
X_corrupted = sess.run(noise_values)
self.generate_grid_of_samples(sess, X_corrupted)
def samples(self):
assert self.restore
saver = tf.train.Saver()
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state('ckpts', latest_filename='noncausal')
saver.restore(sess, ckpt.model_checkpoint_path)
self.generate_samples(sess)
def get_test_samples(self):
assert self.restore
saver = tf.train.Saver()
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state('ckpts', latest_filename='noncausal')
saver.restore(sess, ckpt.model_checkpoint_path)
return self.generate_ten_thousand_samples(sess)
def get_test_samples_graph(self):
assert self.restore
saver = tf.train.Saver()
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state('ckpts', latest_filename='noncausal')
saver.restore(sess, ckpt.model_checkpoint_path)
return self.generate_ten_thousand_samples_graph(sess)
def run(self):
saver = tf.train.Saver()
train_data = self.data.get_corrupted_values()
test_data = self.data.get_corrupted_test_values()
summary_writer = tf.summary.FileWriter('logs/noncausal')
with tf.Session() as sess:
if self.restore:
ckpt = tf.train.get_checkpoint_state('ckpts', latest_filename='noncausal')
saver.restore(sess, ckpt.model_checkpoint_path)
else:
sess.run(tf.global_variables_initializer())
print('Started training at time', time.perf_counter())
starttime = time.perf_counter()
global_step = sess.run(self.global_step)
print("Started Model Training...")
while global_step < self.iterations:
#print('running', global_step, self.iterations)
X_corrupted, X_true, _, proportion = sess.run(train_data)
predictions, train_summary, _, _ = sess.run([self.predictions, self.train_summary, self.loss, self.train_step], feed_dict={self.X_in:X_corrupted, self.X_true:X_true, self.proportion:proportion})
summary_writer.add_summary(train_summary, global_step)
global_step = sess.run(self.global_step)
train_summary, _, _ = sess.run([self.train_summary, self.loss, self.train_step], feed_dict={self.X_in:predictions, self.X_true:X_true, self.proportion:proportion})
summary_writer.add_summary(train_summary, global_step)
global_step = sess.run(self.global_step)
if global_step%1000 == 0 or global_step >= self.iterations:
saver.save(sess, 'ckpts/noncausal.ckpt', global_step=global_step, latest_filename='noncausal')
sess.run([self.reset_loss_mean])
for i in range(self.data.total_test_batches):
X_corrupted, X_true, _, proportion = sess.run(test_data)
predictions, _ = sess.run([self.predictions, self.update_loss_mean], feed_dict={self.X_in:X_corrupted, self.X_true:X_true, self.proportion:proportion})
sess.run([self.update_loss_mean], feed_dict={self.X_in:predictions, self.X_true:X_true, self.proportion:proportion})
test_summary, test_loss = sess.run([self.test_summary, self.loss_mean])
summary_writer.add_summary(test_summary, global_step)
print("iteration %d, test loss %g"%(global_step, test_loss))
print('Finished training at time', time.perf_counter())
print('Time elapsed', time.perf_counter() - starttime)
def run_tests(self):
print('Noncausal basic test:')
train_data = self.data.get_corrupted_values()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
X_corrupted, X_true, _, proportion = sess.run(train_data)
loss, predictions, _ = sess.run([self.loss, self.predictions, self.train_step], feed_dict={self.X_in:X_corrupted, self.X_true:X_true, self.proportion:proportion})
print('Loss', loss)
print('Predictions', [p.shape for p in predictions])
print('Test completed')
def __init__(self, conf, data):
self.channels = data.channels
assert data.channels == 1
self.height = data.height
self.width = data.width
self.values = data.values
self.labels = data.labels
self.data = data
self.filter_size = conf.filter_size
self.features = conf.features
self.end_features = conf.end_features
self.layers = conf.layers
self.temperature = conf.temperature
self.iterations = conf.iterations
self.learning_rate = conf.learning_rate
self.restore = conf.restore
self.markov_type = conf.markov_type
self.test_iterations = conf.test_iterations
self.global_step = tf.Variable(0, trainable=False)
self.X_in = tf.placeholder(tf.float32, [None,self.height,self.width,self.channels])
self.batch_size = tf.shape(self.X_in)[0]
self.X_true = tf.placeholder(tf.float32, [None,self.height,self.width,self.channels])
self.proportion = tf.placeholder(tf.float32, [None])
self.expanded_proportion = tf.tile(tf.expand_dims(tf.expand_dims(tf.expand_dims(tf.reciprocal(self.proportion), -1), -1), -1), [1,self.height,self.width,self.channels])
logits, self.predictions = self.noncausal()
self.loss = tf.reduce_mean(tf.multiply(self.expanded_proportion, tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=self.logitise(self.X_true))))
self.train_summary = tf.summary.scalar('train_loss', self.loss)
with tf.name_scope('loss_mean_calc'):
self.loss_mean, self.update_loss_mean = tf.metrics.mean(self.loss)
self.test_summary = tf.summary.scalar('test_loss', self.loss_mean)
self.reset_loss_mean = tf.initialize_variables([i for i in tf.local_variables() if 'loss_mean_calc' in i.name])
self.train_step = tf.train.RMSPropOptimizer(self.learning_rate).minimize(self.loss, global_step=self.global_step)
print('trainable variables:', np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()]))
| 52.429799 | 229 | 0.639961 | 2,352 | 18,298 | 4.743622 | 0.102466 | 0.06543 | 0.035493 | 0.022856 | 0.633773 | 0.597024 | 0.549789 | 0.505243 | 0.475666 | 0.445998 | 0 | 0.009681 | 0.243579 | 18,298 | 348 | 230 | 52.58046 | 0.796402 | 0.003716 | 0 | 0.375862 | 0 | 0 | 0.045592 | 0.0062 | 0 | 0 | 0 | 0 | 0.02069 | 1 | 0.07931 | false | 0 | 0.031034 | 0.010345 | 0.172414 | 0.075862 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d3107a1475701068bb67affc4766805ec601f64 | 674 | py | Python | paraVerComoFuncionaAlgumasCoisas/opcua-servidor/paraColocarNaMaquinaVirtual/client.py | jonasht/pythonEstudos | 5e7d28e7bd82b9d1b08e795867fdbaa743f4b747 | [
"MIT"
] | null | null | null | paraVerComoFuncionaAlgumasCoisas/opcua-servidor/paraColocarNaMaquinaVirtual/client.py | jonasht/pythonEstudos | 5e7d28e7bd82b9d1b08e795867fdbaa743f4b747 | [
"MIT"
] | null | null | null | paraVerComoFuncionaAlgumasCoisas/opcua-servidor/paraColocarNaMaquinaVirtual/client.py | jonasht/pythonEstudos | 5e7d28e7bd82b9d1b08e795867fdbaa743f4b747 | [
"MIT"
] | null | null | null |
import time
from opcua import Client
from colorama.ansi import Fore, Back
url = 'opc.tcp://192.168.15.128/24:4840'
client = Client(url)
client.connect()
print(Fore.GREEN, 'cliente conectado')
print(' client connected ', Fore.RESET)
while True:
Temp = client.get_node('ns=2;i=2')
Temperature = Temp.get_value()
Temperature = Temp.get_value()
print(Temperature, '', end='')
Press = client.get_node('ns=2;i=3')
Pressure = Press.get_value()
print(Pressure, end='')
TIME = client.get_node('ns=2;i=4')
TIME_value = TIME.get_value()
print(TIME_value, Back.GREEN, ' ', Back.RESET, '\n')
time.sleep(1) | 20.424242 | 56 | 0.630564 | 96 | 674 | 4.333333 | 0.447917 | 0.076923 | 0.09375 | 0.108173 | 0.122596 | 0.122596 | 0 | 0 | 0 | 0 | 0 | 0.045113 | 0.210682 | 674 | 33 | 57 | 20.424242 | 0.736842 | 0 | 0 | 0.1 | 0 | 0 | 0.139466 | 0.047478 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.15 | 0 | 0.15 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d3124b7d0ec0a2c78af6aff4b5a22b8a280a5e7 | 381 | py | Python | data-hub-api/apps/cdms_api/tests/integration/test_client.py | uktrade/data-hub-api-old | 5ecf093d88692870982a638ced45de6a82d55672 | [
"MIT"
] | null | null | null | data-hub-api/apps/cdms_api/tests/integration/test_client.py | uktrade/data-hub-api-old | 5ecf093d88692870982a638ced45de6a82d55672 | [
"MIT"
] | 18 | 2016-04-04T12:42:45.000Z | 2016-09-01T07:21:05.000Z | data-hub-api/apps/cdms_api/tests/integration/test_client.py | uktrade/data-hub-api-old | 5ecf093d88692870982a638ced45de6a82d55672 | [
"MIT"
] | 1 | 2016-06-01T15:45:21.000Z | 2016-06-01T15:45:21.000Z | from .client_test_case import ClientTestCase
class TestClient(ClientTestCase):
def test_happy(self):
"""
Client can List Organisations returns single entry: Test organisation
"""
result = self.client.list('Organization')
self.assertEqual(len(result), 1)
entry = result[0]
self.assertEqual(entry['Name'], 'UKTI Test')
| 25.4 | 77 | 0.648294 | 41 | 381 | 5.95122 | 0.634146 | 0.081967 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006969 | 0.246719 | 381 | 14 | 78 | 27.214286 | 0.843206 | 0.181102 | 0 | 0 | 0 | 0 | 0.086806 | 0 | 0 | 0 | 0 | 0 | 0.285714 | 1 | 0.142857 | false | 0 | 0.142857 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d329c49a371fcc7268eecd84db861c92c516519 | 1,145 | py | Python | organiseFiles.py | oasiscse/Data-Analysis-with-Pandas-and-Python | 124906177ccce1a3386afee8e550a4377e11c6c1 | [
"MIT"
] | 1 | 2019-12-26T15:11:05.000Z | 2019-12-26T15:11:05.000Z | organiseFiles.py | oasiscse/Data-Analysis-with-Pandas-and-Python-Automations | 124906177ccce1a3386afee8e550a4377e11c6c1 | [
"MIT"
] | null | null | null | organiseFiles.py | oasiscse/Data-Analysis-with-Pandas-and-Python-Automations | 124906177ccce1a3386afee8e550a4377e11c6c1 | [
"MIT"
] | null | null | null | import os
from pathlib import Path #pip install pathlib
folder_dict = {
"Images": [".jpeg", ".jpg", ".tiff", ".gif", ".bmp", ".png", ".bpg", "svg",
".heif", ".psd"],
"Videos": [".avi", ".flv", ".wmv", ".mov", ".mp4", ".webm", ".vob", ".mng",
".qt", ".mpg", ".mpeg", ".3gp",".mkv"],
"Documents": [".doc", ".pptx", ".pdf", ".docx", ".doc", ".xla",],
"Audio": [".aac", ".aa", ".aac", ".dvf", ".m4a", ".m4b", ".m4p", ".mp3",
".msv", "ogg", "oga", ".raw", ".vox", ".wav", ".wma"],
"Text": [".txt", ".in", ".out"],
"PDF": [".pdf"],
"Python": [".py"],
"XML": [".xml"],
"EXE": [".exe"]
}
FILE_FORMATS = {file_format: directory
for directory, file_formats in folder_dict.items()
for file_format in file_formats}
def organize():
for entry in os.scandir():
if entry.is_dir():
continue
file_path = Path(entry)
file_format = file_path.suffix.lower()
if file_format in FILE_FORMATS:
directory_path = Path(FILE_FORMATS[file_format])
directory_path.mkdir(exist_ok=True)
file_path.rename(directory_path.joinpath(file_path))
for dir in os.scandir():
try:
os.rmdir(dir)
except:
pass
organize()
| 27.261905 | 76 | 0.570306 | 151 | 1,145 | 4.18543 | 0.589404 | 0.087025 | 0.047468 | 0.066456 | 0.167722 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00627 | 0.164192 | 1,145 | 41 | 77 | 27.926829 | 0.654127 | 0.016594 | 0 | 0 | 0 | 0 | 0.221333 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028571 | false | 0.028571 | 0.057143 | 0 | 0.085714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d3583e0111673a3b1c512d983f8f3488748b203 | 5,894 | py | Python | test/test_Recommender.py | apostolis1/Movie-recommender | 8ef25e45305352533b6a54d62c8a03b90e0f5fdd | [
"MIT"
] | null | null | null | test/test_Recommender.py | apostolis1/Movie-recommender | 8ef25e45305352533b6a54d62c8a03b90e0f5fdd | [
"MIT"
] | 1 | 2020-09-13T17:14:14.000Z | 2020-09-13T17:14:14.000Z | test/test_Recommender.py | apostolis1/Movie-recommender | 8ef25e45305352533b6a54d62c8a03b90e0f5fdd | [
"MIT"
] | null | null | null | import random
import unittest
import pandas as pd
from movierecommender.recommender.Recommender import Recommender, get_titles_from_tconst_list
import pathlib
import os
from movierecommender.datahandler.DbHandler import DbHandler
from sqlalchemy import text
file_dir = pathlib.Path(__file__).parent.parent.resolve()
DATA_PATH = os.path.join(file_dir, "data/")
class TestRecommender(unittest.TestCase):
def test_recommender_init(self):
try:
rec = Recommender()
except Exception as e:
assert False, e
def test_get_recommendation_with_create_count_vecctorizer(self):
rec = Recommender()
rec.create_cosine_sim()
sample_tconst = random.choice(list(rec.cosine_sim.tconst.values)) # grab a random movie to test
print(f"Running test_get_recommendation for tconst: {sample_tconst}")
sample_recommendations = rec.get_recommendation_from_tconst(sample_tconst)
assert sample_recommendations[0] == sample_tconst # the best fit should be the movie itself
def test_get_recommendation_with_import(self):
rec = Recommender()
rec.import_cosine_sim_from_pkl()
sample_tconst = random.choice(list(rec.cosine_sim.tconst.values)) # grab a random movie to test
print(f"Running test_get_recommendation for tconst: {sample_tconst}")
sample_recommendations = rec.get_recommendation_from_tconst(sample_tconst)
assert sample_recommendations[0] == sample_tconst # the best fit should be the movie itself
def test_cosine_sim_export(self):
"""
Test that export functionality after create_count_vectorizer() method is called works
:return:
"""
temp_test_path = os.path.join(DATA_PATH, 'cosine_sim/test_file.pkl')
rec = Recommender()
rec.create_cosine_sim() # Initialize self.cosine_sim
rec.export_cosine_sim_to_pkl(temp_test_path)
assert os.path.exists(temp_test_path)
os.remove(temp_test_path) # Remove test temp file
return
def test_cosine_sim_import(self):
"""
Test that the import correctly initializes the self.cosine_sim dataframe
:return:
"""
rec = Recommender()
rec.import_cosine_sim_from_pkl()
print(rec.cosine_sim.head())
assert type(rec.cosine_sim) == pd.DataFrame
def test_cosine_sim_import_error(self):
"""
Test that correct Exception is thrown in case of wrong filepath import
:return:
"""
rec = Recommender()
wrong_path = os.path.join(DATA_PATH, 'cosine_sim/wrong_file.pkl')
try:
rec.import_cosine_sim_from_pkl(pkl_path=wrong_path)
assert False
except FileNotFoundError:
assert True
def test_cosine_sim_import_auto_create(self):
"""
Test that if auto_create is set then the file is successfully created
:return:
"""
rec = Recommender()
wrong_path = os.path.join(DATA_PATH, 'cosine_sim/wrong_file_auto_create.pkl')
rec.import_cosine_sim_from_pkl(pkl_path=wrong_path, auto_create=True)
assert os.path.exists(wrong_path)
os.remove(wrong_path)
def test_get_titles_from_tconst(self):
"""
Test the get_titles_from_tconst_list function
Grab the recommendation titles for a random movie and crosscheck with the titles in the title_basics table
:return:
"""
rec = Recommender()
rec.import_cosine_sim_from_pkl()
sample_tconst = random.choice(list(rec.cosine_sim.tconst.values)) # grab a random movie to test
sample_recommendations_tconst = rec.get_recommendation_from_tconst(sample_tconst)
sample_titles = get_titles_from_tconst_list(sample_recommendations_tconst)
recommendation_titles = [result[1] for result in sample_titles]
# Make sure the titles returned are a subset of the titles in title_basics table
dbhandler = DbHandler()
dbhandler.connect()
# Use a join to limit the results only on those that interest us
all_titles = dbhandler.conn.execute(text(
"SELECT tconst,primaryTitle from title_basics NATURAL JOIN title_keywords"))
# Convert the result of the query to a df
all_titles_df = pd.DataFrame(data=[row for row in all_titles], columns=['tconst','primaryTitle'])
# Find the titles that exist in both the df from the db and the results from the recommender
same_titles = all_titles_df.loc[all_titles_df['tconst'].isin(sample_recommendations_tconst)]['primaryTitle']
# Make sure we found all the movie titles
assert len(recommendation_titles) == same_titles.size
def test_recommendation_limit_works(self):
"""
Test that the limit param in get_recommendation_from_tconst works as intended
:return:
"""
limit = random.randint(1, 50)
print(f"Testing for limit: {limit}")
rec = Recommender()
rec.import_cosine_sim_from_pkl()
sample_tconst = random.choice(list(rec.cosine_sim.tconst.values)) # grab a random movie to test
sample_recommendations_tconst = rec.get_recommendation_from_tconst(sample_tconst, limit=limit)
assert len(sample_recommendations_tconst) == limit
def test_wrong_tconst_str(self):
"""
Test that if a wrong tconst value is provided in the get_recommendation_from_tconst method
an Exception is raised
:return:
"""
rec = Recommender()
rec.import_cosine_sim_from_pkl()
try:
rec.get_recommendation_from_tconst('wrong_tconst')
# If Exception is raised then True
except Exception as e:
print(e)
assert True
return
assert False, "Exception not thrown for wrong tconst"
| 42.402878 | 116 | 0.687988 | 760 | 5,894 | 5.071053 | 0.207895 | 0.058381 | 0.030877 | 0.04904 | 0.405293 | 0.349766 | 0.33316 | 0.33316 | 0.325117 | 0.300208 | 0 | 0.001341 | 0.240753 | 5,894 | 138 | 117 | 42.710145 | 0.859888 | 0.219715 | 0 | 0.404494 | 0 | 0 | 0.090074 | 0.030331 | 0 | 0 | 0 | 0 | 0.134831 | 1 | 0.11236 | false | 0 | 0.213483 | 0 | 0.359551 | 0.05618 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d361e2df722e284e43a4f7b882786df9881551f | 4,879 | py | Python | pele/controllers/api_v01/default.py | hysds/pele | bbb779038f99b8f37bebb0376a4b36ab8820d10c | [
"Apache-2.0"
] | null | null | null | pele/controllers/api_v01/default.py | hysds/pele | bbb779038f99b8f37bebb0376a4b36ab8820d10c | [
"Apache-2.0"
] | 8 | 2018-06-07T23:57:46.000Z | 2022-03-24T21:59:37.000Z | pele/controllers/api_v01/default.py | hysds/pele | bbb779038f99b8f37bebb0376a4b36ab8820d10c | [
"Apache-2.0"
] | null | null | null | from builtins import str
import traceback
import jwt
import uuid
from datetime import datetime, timedelta
from flask import current_app, g, url_for
from flask_restx import Resource, fields, inputs
from flask_mail import Message
from pele import db, cache, limiter, mail
from pele.extensions import auth
from pele.models.user import User
from pele.controllers.api_v01.config import api
@api.route('/register', endpoint='register')
@api.doc(responses={201: "Success",
400: "Invalid parameters",
500: "Registration failed"},
description="User registration.")
class Register(Resource):
"""Register."""
parser = api.parser()
parser.add_argument('email', required=True, type=inputs.email(),
help='email address', location='form')
parser.add_argument('password', required=True, type=str,
help='password', location='form')
decorators = [limiter.limit("1/minute")]
model = api.model('Register', {
'success': fields.Boolean(description="success flag"),
'message': fields.String(description="message"),
'email': fields.String(description="email"),
'id': fields.Integer(description="id"),
})
@api.marshal_with(model)
@api.doc(parser=parser)
def post(self):
data = self.parser.parse_args()
data['verification_code'] = str(uuid.uuid4())
user = User(**data)
db.session.add(user)
try:
db.session.commit()
except Exception as e:
current_app.logger.error(traceback.format_exc())
return {
'success': False,
'message': "Registration failed. Please contact support."
}, 500
msg = Message("Verify your Pele API account", recipients=[user.email])
msg.body = "Use your verification code below to verify your Pele API " + \
"account at {}:\n\n{}".format(url_for('api_v0-1.doc', _external=True),
data['verification_code'])
mail.send(msg)
user_dict = user.to_dict()
user_dict['success'] = True
user_dict['message'] = "Verification email sent. Verify before using the API."
return user_dict, 201
@api.route('/verify', endpoint='verify')
@api.doc(responses={200: "Success",
400: "Invalid parameters",
401: "Unathorized",
500: "Verification failed"},
description="Verify registered account.")
class Verify(Resource):
"""Verify."""
parser = api.parser()
parser.add_argument('email', required=True, type=inputs.email(),
help='email address', location='form')
parser.add_argument('verification_code', required=True, type=str,
help='verification code', location='form')
model = api.model('Verify', {
'success': fields.Boolean(description="success flag"),
'message': fields.String(description="message"),
})
decorators = [limiter.limit("3/minute")]
@api.marshal_with(model)
@api.doc(parser=parser)
def post(self):
data = self.parser.parse_args()
user = User.verify(**data)
if not user:
return {'message': 'Invalid verification code'}, 401
return {
'success': True,
'message': 'Mahalo for verifying. You may now login to receive an API token.'
}
@api.route('/login', endpoint='login')
@api.doc(responses={200: "Success",
400: "Invalid parameters",
401: "Unathorized",
500: "Login failed"},
description="Login and receive API token.")
class Login(Resource):
"""Login."""
model = api.model('Login', {
'success': fields.Boolean(description="success flag"),
'message': fields.String(description="message"),
'token': fields.String(description="API token"),
})
decorators = [limiter.limit("3/minute")]
@auth.login_required
@api.marshal_with(model)
def post(self):
user = g.user
if not user:
return {
'message': 'Invalid credentials'
}, 401
try:
token = jwt.encode({
'sub': user.email,
'iat':datetime.utcnow(),
'exp': datetime.utcnow() + timedelta(seconds=current_app.config['TOKEN_EXPIRATION_SECS'])},
current_app.config['SECRET_KEY'])
except Exception as e:
current_app.logger.error(traceback.format_exc())
return {
'success': False,
'message': "Login failed. Please contact support."
}, 500
return {
'success': True,
'token': token.decode('UTF-8')
}
| 34.118881 | 107 | 0.576553 | 514 | 4,879 | 5.404669 | 0.285992 | 0.034557 | 0.041397 | 0.029158 | 0.415047 | 0.339453 | 0.318575 | 0.318575 | 0.318575 | 0.318575 | 0 | 0.016469 | 0.290633 | 4,879 | 142 | 108 | 34.359155 | 0.786189 | 0.004919 | 0 | 0.445378 | 0 | 0 | 0.2174 | 0.00434 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02521 | false | 0.016807 | 0.10084 | 0 | 0.277311 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d38f4750687cfe7822364bc6138badeb0389f45 | 996 | py | Python | bot/main.py | jandolezal/energy-mix | da99e2c34751e45981ae2ed11da4077b12385ddd | [
"MIT"
] | 3 | 2021-05-19T10:31:53.000Z | 2021-05-19T17:15:46.000Z | bot/main.py | jandolezal/energy-mix | da99e2c34751e45981ae2ed11da4077b12385ddd | [
"MIT"
] | null | null | null | bot/main.py | jandolezal/energy-mix | da99e2c34751e45981ae2ed11da4077b12385ddd | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
from dotenv import load_dotenv
import tweepy # type: ignore
from bot import emojis
from bot import entsoe
load_dotenv()
def main():
# Request electricity production data from Entsoe for past hour
data = entsoe.get_data()
if data:
# Make a string from emojis based on data
percentages = emojis.calculate_percentages_better(data)
tweet = emojis.prepare_tweet(production=percentages)
else:
raise SystemExit
# Twitter app authentication and setup
consumer_key = os.getenv('CONSUMER_KEY')
consumer_secret = os.getenv('CONSUMER_SECRET')
access_token = os.getenv('ACCESS_TOKEN')
access_token_secret = os.getenv('ACCESS_TOKEN_SECRET')
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# Tweet the emoji string
api.update_status(status=tweet)
if __name__ == '__main__':
main()
| 23.162791 | 67 | 0.717871 | 129 | 996 | 5.294574 | 0.457364 | 0.112738 | 0.074671 | 0.096633 | 0.081991 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001259 | 0.202811 | 996 | 42 | 68 | 23.714286 | 0.858942 | 0.196787 | 0 | 0 | 0 | 0 | 0.083123 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.217391 | 0 | 0.26087 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d39c09893e70f95a853038cb4cc07828a8bf1e5 | 1,364 | py | Python | exercises/chapter5/exercise_5_1.py | JapoDeveloper/think-python | 1e6fc4fd635a5bdf1ea38eb93a1e67db9ad99587 | [
"MIT"
] | null | null | null | exercises/chapter5/exercise_5_1.py | JapoDeveloper/think-python | 1e6fc4fd635a5bdf1ea38eb93a1e67db9ad99587 | [
"MIT"
] | null | null | null | exercises/chapter5/exercise_5_1.py | JapoDeveloper/think-python | 1e6fc4fd635a5bdf1ea38eb93a1e67db9ad99587 | [
"MIT"
] | null | null | null | """
Think Python, 2nd Edition
Chapter 5
Exercise 5.1
Description:
The time module provides a function, also named time, that returns the current
Greenwich Mean Time in “the epoch”, which is an arbitrary time used as a
reference point. On UNIX systems, the epoch is 1 January 1970.
>>> import time
>>> time.time()
1437746094.5735958
Write a script that reads the current time and converts it to a time of
day in hours, minutes, and seconds, plus the number of days since the epoch.
"""
import time
day_secs = 24 * 60 * 60
hour_secs = 60 * 60
min_secs = 60
seconds_since_epoch = time.time()
# Option 1: print the time as a combination of units
days = int(seconds_since_epoch // day_secs)
remaining = seconds_since_epoch - (days * day_secs)
hours = int(remaining // hour_secs)
remaining = remaining - (hours * hour_secs)
minutes = int(remaining // min_secs)
remaining = remaining - (minutes * min_secs)
print(f'Since epoch elapsed {days} days, {hours} hours, {minutes} minutes and {remaining:.2f} seconds')
# Option 2: print the time in every unit independently
days = int(seconds_since_epoch // day_secs)
hours = int((seconds_since_epoch // hour_secs))
minutes = int((seconds_since_epoch // min_secs))
print('Since epoch elapsed:')
print(f'{days} days')
print(f'{hours} hours')
print(f'{minutes} minutes')
print(f'{int(seconds_since_epoch)} seconds') | 28.416667 | 103 | 0.739736 | 214 | 1,364 | 4.593458 | 0.345794 | 0.091556 | 0.121058 | 0.101729 | 0.063072 | 0.063072 | 0.063072 | 0 | 0 | 0 | 0 | 0.035652 | 0.156892 | 1,364 | 48 | 104 | 28.416667 | 0.81913 | 0.431818 | 0 | 0.1 | 0 | 0.05 | 0.245111 | 0.033898 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.05 | 0 | 0.05 | 0.3 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d3db8d96aba90b578b96d9a4d7134a521bba998 | 819 | py | Python | leetcode/342.py | GihwanKim/Baekjoon | 52eb2bf80bb1243697858445e5b5e2d50d78be4e | [
"MIT"
] | null | null | null | leetcode/342.py | GihwanKim/Baekjoon | 52eb2bf80bb1243697858445e5b5e2d50d78be4e | [
"MIT"
] | null | null | null | leetcode/342.py | GihwanKim/Baekjoon | 52eb2bf80bb1243697858445e5b5e2d50d78be4e | [
"MIT"
] | null | null | null | """
File: 342.py
Title: Power of Four
Difficulty: Easy
URL: https://leetcode.com/problems/power-of-four/
"""
import unittest
class Solution:
def isPowerOfFour(self, num: int) -> bool:
while num > 1:
if (num % 4) != 0:
return False
num //= 4
return num == 1
class SolutionTestCase(unittest.TestCase):
def test_example1(self):
# Input
num = 16
# Output
output = True
solution = Solution()
self.assertEqual(solution.isPowerOfFour(num), output)
def test_example2(self):
# Input
num = 5
# Output
output = False
solution = Solution()
self.assertEqual(solution.isPowerOfFour(num), output)
if __name__ == "__main__":
unittest.main()
| 19.5 | 61 | 0.556777 | 86 | 819 | 5.186047 | 0.511628 | 0.03139 | 0.049327 | 0.139013 | 0.273543 | 0.273543 | 0.273543 | 0.273543 | 0 | 0 | 0 | 0.023941 | 0.336996 | 819 | 41 | 62 | 19.97561 | 0.797422 | 0.155067 | 0 | 0.190476 | 0 | 0 | 0.01203 | 0 | 0 | 0 | 0 | 0 | 0.095238 | 1 | 0.142857 | false | 0 | 0.047619 | 0 | 0.380952 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d3e42c0f2708076b73f0558a20fc183f281b6a1 | 593 | py | Python | day1/main_part2.py | kzabashta/advent-of-code-17 | e37ff0a211791b7c120c78269b7927d8d40583fe | [
"MIT"
] | null | null | null | day1/main_part2.py | kzabashta/advent-of-code-17 | e37ff0a211791b7c120c78269b7927d8d40583fe | [
"MIT"
] | null | null | null | day1/main_part2.py | kzabashta/advent-of-code-17 | e37ff0a211791b7c120c78269b7927d8d40583fe | [
"MIT"
] | null | null | null | #!/usr/bin/env python
f = open('input.txt')
contents = list(f.readline())
def inverse_captcha(chars):
cumulative = 0
cur = chars[0]
for idx, cur in enumerate(chars):
if idx < int(len(chars) / 2) - 1:
next = chars[int(len(chars) / 2) + idx]
else:
next = chars[idx - int(len(chars) / 2)]
if cur == next:
cumulative+=cur
return cumulative
if __name__ == '__main__':
f = open('input.txt')
#print(inverse_captcha([1,2,3,1,2,3]))
print(inverse_captcha([int(i) for i in list(f.readline())])) | 25.782609 | 64 | 0.548061 | 83 | 593 | 3.783133 | 0.421687 | 0.133758 | 0.105096 | 0.11465 | 0.095541 | 0 | 0 | 0 | 0 | 0 | 0 | 0.028504 | 0.290051 | 593 | 23 | 64 | 25.782609 | 0.71734 | 0.096121 | 0 | 0.125 | 0 | 0 | 0.048598 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0 | 0 | 0.125 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d410f002d5c4b8b09e1dc583583d33cbd2ae1dc | 724 | py | Python | contrib/generate_secret_key.py | abnerpc/flask_tutorial | a7c02f9f2afb9727ff4b12df2a3b1d4c90ac5e4a | [
"MIT"
] | 7 | 2016-12-27T01:20:39.000Z | 2019-06-20T12:50:23.000Z | contrib/generate_secret_key.py | rafaelhenrique/flask_tutorial | eb8f1fcc4b1b442c6bfe7657cc83f8e4c678e6b9 | [
"MIT"
] | 6 | 2018-10-25T21:22:20.000Z | 2019-05-19T16:03:35.000Z | contrib/generate_secret_key.py | rafaelhenrique/flask_tutorial | eb8f1fcc4b1b442c6bfe7657cc83f8e4c678e6b9 | [
"MIT"
] | 6 | 2018-04-15T01:49:55.000Z | 2018-10-27T15:09:46.000Z | import random
"""
Some parts of this code were extracted from Django framework
https://github.com/django/django/blob/master/django/utils/crypto.py
"""
random = random.SystemRandom()
def get_random_string(length=12,
allowed_chars='abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'):
"""
Returns a securely generated random string.
The default length of 12 with the a-z, A-Z, 0-9 character set returns
a 71-bit value. log_2((26+26+10)^12) =~ 71 bits
"""
return ''.join(random.choice(allowed_chars) for i in range(length))
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
print(get_random_string(50, chars))
| 27.846154 | 76 | 0.672652 | 89 | 724 | 5.382022 | 0.662921 | 0.075157 | 0.06263 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.071553 | 0.208564 | 724 | 25 | 77 | 28.96 | 0.764398 | 0.222376 | 0 | 0 | 0 | 0 | 0.279302 | 0.279302 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.375 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d450f78e13698d70c7590d58a14bd80ecb2d4b0 | 931 | py | Python | CHAPTER 14 (graph algorithm)/topological_sort.py | ahammadshawki8/Data-Structures-Algorithms-in-Python- | fc18b54128cd5bc7639a14999d8f990190b524eb | [
"MIT"
] | null | null | null | CHAPTER 14 (graph algorithm)/topological_sort.py | ahammadshawki8/Data-Structures-Algorithms-in-Python- | fc18b54128cd5bc7639a14999d8f990190b524eb | [
"MIT"
] | null | null | null | CHAPTER 14 (graph algorithm)/topological_sort.py | ahammadshawki8/Data-Structures-Algorithms-in-Python- | fc18b54128cd5bc7639a14999d8f990190b524eb | [
"MIT"
] | null | null | null | from graph_class import *
def topological_sort(g):
"""Reurn a list of vertices of directed acyclic graph g in topological order.
If graph g has a cycle, the result will be incomplete.
"""
topo = [] # a list of vertices placed in topological order.
ready = [] # list of vertices that have no remaining constraints.
incount = {} # keep track of in-degree for each vertex.
for u in g.vertices():
incount[g] = g.degree(u, False) # parameter requests incoming degree
if incount[u] == 0: # if u have no incoming edges
ready.append(u) # it is free of contraints
while len(ready) > 0 :
u = ready.pop() # u is free of contraints
topo.append(u) # add u to topological order
for e in g.incident_edges(u): # consider all outgoing neighbours of u
v = e.opposite(u)
incount[v] -= 1 # v has one less constraint without u
if incount[v] == 0:
ready.append(v)
return topo | 35.807692 | 78 | 0.663802 | 148 | 931 | 4.155405 | 0.47973 | 0.029268 | 0.068293 | 0.04878 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005682 | 0.243824 | 931 | 26 | 79 | 35.807692 | 0.867898 | 0.522019 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.055556 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d4ee3f942d193ee180ca66557844163bf0af30d | 5,599 | py | Python | saint_venant_reflective boundary.py | hwangpo/Navier-Stokes-Simulation-in-1D | 80af48562f7d540f4c79f31634a15377473b8fb4 | [
"MIT"
] | 2 | 2021-07-23T07:06:31.000Z | 2022-02-01T20:42:16.000Z | saint_venant_reflective boundary.py | hwangpo/Navier-Stokes-Simulation-in-1D | 80af48562f7d540f4c79f31634a15377473b8fb4 | [
"MIT"
] | null | null | null | saint_venant_reflective boundary.py | hwangpo/Navier-Stokes-Simulation-in-1D | 80af48562f7d540f4c79f31634a15377473b8fb4 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
#----------------------------------------------------------------------------------------------------------
"""
Author : Mohamed
Title : Implementation des equations de saint-venant
Description : Simulation des vagues dans un domaine [0,1] avec les equations de saint-venant
"""
#-----------------------------------------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
#Params
g = 9.8
def F(U):
"""
U[0] : hauteur
U[1] : debit
"""
return np.array([U[1],(U[1]**2)/U[0] + g*(U[0]**2)/2])
def f(U_g,U_d):
"""
U_g/d : vecteur a gauche/droite
"""
#vitesse
v_g = U_g[1]/U_g[0]
v_d = U_d[1]/U_d[0]
c = np.amax([np.abs(v_g)+np.sqrt(g*U_g[0]), np.abs(v_d)+np.sqrt(g*U_d[0])])
return (F(U_g)+F(U_d))/2 - (c/2)*(U_d - U_g)
#----------------------------------------------------------------------------------------------------------
# Mayage
#-------------------------
N = 50
dx = 1.0/N
x = np.linspace(0, 1, num=N+1) # le nombre d'element est N+1 car on a N intervalle
#[End]----------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------
# Conditions initiales et de bords
#-------------------------
v_0 = np.zeros(N+1) #vitesse nulle a t = 0
#Condition discontinue sur les hauteurs
# h_0 = np.ones(N+1)
# h_0[N//2:] = np.ones(len(h_0[N//2:])) * 20
#Condition continue
h_0 = np.linspace(1,20,N+1)
U_0 = np.array([ h_0, h_0*v_0 ])
#[End]----------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------
# Definition des tableaux
#-------------------------
U_tab = np.array([U_0]) #tableau de vecteur qui va contenir les vecteurs U (qui contient les donnees de tout le maillage) a tout instant.
time = np.zeros(1) #Tableau des instants
#[End]----------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------
# Implementation
#-------------------------
def lambda1(v,h):
return v - np.sqrt(g*h)
def lambda2(v,h):
return v + np.sqrt(g*h)
#debut de l'evolution
n = 0 #--> n designe le temps
while n < 1000:
h_n = U_tab[n][0] #tableau hauteurs h_n
q_n = U_tab[n][1] #tableau debits q_n
v_n = q_n/h_n #tableau vitesses v_n
m = 2 * np.max([np.abs(lambda1(v_n,h_n)), np.abs(lambda2(v_n,h_n))])
dt = dx/m
time = np.append(time, time[n]+dt) #Ajout dans le tableau de temps
# U_nplus1 = U_tab[n] #--> fait gaffe la, car U_nplus1 prend la meme adress que U_tab[n] !!!!!
U_nplus1 = np.array([np.ones(N+1), np.ones(N+1)]) #Tableau qui contient les valeurs de vecteurs U a l'instant n+1 sur tout le maillage
for i in range(1,N): # i -> designe l'endroit sur le maillage
#Approche: Vitesse nulle au bord (pour faire une reflexion)
u_in = np.array([h_n[i], q_n[i]]) #u_in vecteur a l'endroit i qui contient les donnees h_n[i],q_n[i] a l'instant n et a l'endroit i
u_ig = np.array([h_n[(i-1)], q_n[(i-1)]]) #definition de u_gauche a l'instant n avec condition periodique
u_id = np.array([h_n[(i+1)], q_n[(i+1)]])
u_i_nplus1 = u_in - (dt/dx)*(f(u_in, u_id)-f(u_ig, u_in)) #def de vecteur U a l'instant n+1 a l'endroit i
#Stocker les params dans le nouveau tableau de vecteur a l'instant n+1 a l'endroit i
U_nplus1[0][i] = u_i_nplus1[0]
U_nplus1[1][i] = u_i_nplus1[1]
#i=0
U_nplus1[0][0] = h_n[1]
U_nplus1[1][0] = -0.7*q_n[1] #Vitesse avec un coefficient de restitution
#i=N
U_nplus1[0][N] = h_n[N-1]
U_nplus1[1][N] = -0.7*q_n[N-1] #Vitesse avec un coefficient de restitution
U_tab = np.concatenate((U_tab, [U_nplus1])) #Ajout des nouveaux donnees de l'instant n+1
n = n+1 #incrementation de temps
#[End]----------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------
# Animation de plot
#-------------------------
# plt.ion() # mode interactif
# plt.figure(1) # nouvelle figure
# plt.plot(x, U_tab[0][0])
# plt.show()
# plt.pause(0.01) # nécessaire (bug matplotlib)
# plt.hold(False) # Ne pas superposer les figures pour animer le mouvement
# for j in range(1,n):
# plt.plot(x, U_tab[j][0]) # on redessine
# plt.ylim([0, 21]) #limits sur l'axis y
# plt.pause(0.00001) # on attend un peu
# plt.ioff() # fin du mode interactif
# plt.show() #nécessaire
import matplotlib.animation as animation #Tutorial http://jakevdp.github.io/blog/2012/08/18/matplotlib-animation-tutorial/
fig = plt.figure()
plt.ylim([-80, 80]) #limits sur l'axis y
line1, = plt.plot(x, U_tab[0][0], lw=2)
line2, = plt.plot(x, U_tab[0][1], lw=2)
def animate(j):
y1 = U_tab[j][0]
x1 = x
line1.set_data(x1, y1)
y1 = U_tab[j][1]
line2.set_data(x1, y1)
return line1,line2
def init():
line1.set_data(x, U_tab[0][0])
line2.set_data(x, U_tab[0][1])
return line1,line2
ani = animation.FuncAnimation(fig, animate, init_func = init, frames = n, interval = 5, blit = False)
ani.save('wave_motion.mp4', fps=30, extra_args=['-vcodec', 'libx264'])
plt.show()
#[End]---------------------------------------------------------------------------------------------------- | 27.581281 | 137 | 0.481872 | 843 | 5,599 | 3.083037 | 0.259786 | 0.013082 | 0.020777 | 0.019238 | 0.138515 | 0.106195 | 0.08157 | 0.070796 | 0.027703 | 0.011543 | 0 | 0.036102 | 0.144133 | 5,599 | 203 | 138 | 27.581281 | 0.50626 | 0.619039 | 0 | 0.031746 | 0 | 0 | 0.014293 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0 | 0.047619 | 0.031746 | 0.238095 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d5005563466172052f7117f43881598fd9091c5 | 3,261 | py | Python | Examples/ApiExamples/ex_xaml_fixed_save_options.py | alex-dudin/Aspose.Words-for-Python-via-.NET | 02b257df8da9892fcce671c473c2ef27b68b5087 | [
"MIT"
] | 3 | 2021-12-04T22:17:28.000Z | 2022-02-22T03:30:01.000Z | Examples/ApiExamples/ex_xaml_fixed_save_options.py | alex-dudin/Aspose.Words-for-Python-via-.NET | 02b257df8da9892fcce671c473c2ef27b68b5087 | [
"MIT"
] | 4 | 2021-11-26T10:01:06.000Z | 2021-12-14T15:01:11.000Z | Examples/ApiExamples/ex_xaml_fixed_save_options.py | alex-dudin/Aspose.Words-for-Python-via-.NET | 02b257df8da9892fcce671c473c2ef27b68b5087 | [
"MIT"
] | 2 | 2021-10-20T18:06:22.000Z | 2021-10-29T20:59:18.000Z | # Copyright (c) 2001-2022 Aspose Pty Ltd. All Rights Reserved.
#
# This file is part of Aspose.Words. The source code in this file
# is only intended as a supplement to the documentation, and is provided
# "as is", without warranty of any kind, either expressed or implied.
import os
import aspose.words as aw
from api_example_base import ApiExampleBase, MY_DIR, ARTIFACTS_DIR
class ExXamlFixedSaveOptions(ApiExampleBase):
#ExStart
#ExFor:XamlFixedSaveOptions
#ExFor:XamlFixedSaveOptions.resource_saving_callback
#ExFor:XamlFixedSaveOptions.resources_folder
#ExFor:XamlFixedSaveOptions.resources_folder_alias
#ExFor:XamlFixedSaveOptions.save_format
#ExSummary:Shows how to print the URIs of linked resources created while converting a document to fixed-form .xaml.
def test_resource_folder(self):
doc = aw.Document(MY_DIR + "Rendering.docx")
callback = ExXamlFixedSaveOptions.ResourceUriPrinter()
# Create a "XamlFixedSaveOptions" object, which we can pass to the document's "save" method
# to modify how we save the document to the XAML save format.
options = aw.saving.XamlFixedSaveOptions()
self.assertEqual(aw.SaveFormat.XAML_FIXED, options.save_format)
# Use the "resources_folder" property to assign a folder in the local file system into which
# Aspose.Words will save all the document's linked resources, such as images and fonts.
options.resources_folder = ARTIFACTS_DIR + "XamlFixedResourceFolder"
# Use the "resources_folder_alias" property to use this folder
# when constructing image URIs instead of the resources folder's name.
options.resources_folder_alias = ARTIFACTS_DIR + "XamlFixedFolderAlias"
options.resource_saving_callback = callback
# A folder specified by "resources_folder_alias" will need to contain the resources instead of "resources_folder".
# We must ensure the folder exists before the callback's streams can put their resources into it.
os.makedirs(options.resources_folder_alias)
doc.save(ARTIFACTS_DIR + "XamlFixedSaveOptions.resource_folder.xaml", options)
for resource in callback.resources:
print(resource)
self._test_resource_folder(callback) #ExSkip
class ResourceUriPrinter(aw.saving.IResourceSavingCallback):
"""Counts and prints URIs of resources created during conversion to fixed .xaml."""
def __init__(self):
self.resources = [] # type: List[str]
def resource_saving(self, args: aw.saving.ResourceSavingArgs):
self.resources.add(f"Resource \"{args.resource_file_name}\"\n\t{args.resource_file_uri}")
# If we specified a resource folder alias, we would also need
# to redirect each stream to put its resource in the alias folder.
args.resource_stream = open(args.resource_file_uri, 'wb')
args.keep_resource_stream_open = False
#ExEnd
def _test_resource_folder(self, callback: ExXamlFixedSaveOptions.ResourceUriPrinter):
self.assertEqual(15, len(callback.resources))
for resource in callback.resources:
self.assertTrue(os.path.exists(resource.split('\t')[1]))
| 42.350649 | 122 | 0.726158 | 407 | 3,261 | 5.687961 | 0.390663 | 0.064795 | 0.043197 | 0.034557 | 0.047516 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004234 | 0.203312 | 3,261 | 76 | 123 | 42.907895 | 0.886836 | 0.456302 | 0 | 0.071429 | 0 | 0 | 0.080321 | 0.052783 | 0 | 0 | 0 | 0 | 0.107143 | 1 | 0.142857 | false | 0 | 0.107143 | 0 | 0.321429 | 0.035714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7d51526e79b9f6c16042c6a364e90e861c222b06 | 1,453 | py | Python | core/models/resnet.py | tridivb/attention_based_tbn | 8fc32216664833c48579c9bd8b63fdf5aa5a7711 | [
"MIT"
] | 7 | 2020-07-20T08:29:45.000Z | 2020-08-04T14:00:15.000Z | core/models/resnet.py | tridivb/attention_based_tbn | 8fc32216664833c48579c9bd8b63fdf5aa5a7711 | [
"MIT"
] | null | null | null | core/models/resnet.py | tridivb/attention_based_tbn | 8fc32216664833c48579c9bd8b63fdf5aa5a7711 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
from torchvision import models
import numpy as np
class Resnet(nn.Module):
"""
Resnet module
"""
def __init__(self, model_depth, modality, in_channels):
super(Resnet, self).__init__()
if model_depth == 18:
self.model = models.resnet18(pretrained=True)
elif model_depth == 34:
self.model = models.resnet34(pretrained=True)
elif model_depth == 50:
self.model = models.resnet50(pretrained=True)
elif model_depth == 101:
self.model = models.resnet101(pretrained=True)
elif model_depth == 152:
self.model = models.resnet152(pretrained=True)
if modality != "RGB":
weight = self.model.conv1.weight.mean(dim=1).unsqueeze(dim=1)
self.model.conv1 = nn.Conv2d(
in_channels,
self.model.conv1.out_channels,
kernel_size=self.model.conv1.kernel_size,
stride=self.model.conv1.stride,
padding=self.model.conv1.padding,
bias=self.model.conv1.bias,
)
self.model.conv1.weight = torch.nn.Parameter(weight)
self.feature_size = self.model.fc.in_features
self.model = nn.Sequential(*list(self.model.children())[:-1])
def forward(self, input):
feat = self.model(input)
feat = feat.view(feat.size(0), -1)
return feat
| 32.288889 | 73 | 0.594632 | 173 | 1,453 | 4.872832 | 0.358382 | 0.192171 | 0.132859 | 0.109134 | 0.132859 | 0 | 0 | 0 | 0 | 0 | 0 | 0.037037 | 0.293875 | 1,453 | 44 | 74 | 33.022727 | 0.7846 | 0.008947 | 0 | 0 | 0 | 0 | 0.002107 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.117647 | 0 | 0.235294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ada3d9a346535ac7e3623c52af23249af90a3bb8 | 1,398 | py | Python | examples/RP2/mono_fonts/mono_fonts.py | russhughes/gc9a01_mpy | c34db179ae242c4fd118f0a46ce9b47e0c574c8b | [
"MIT"
] | 12 | 2021-04-26T00:15:20.000Z | 2022-02-14T05:09:06.000Z | examples/RP2/mono_fonts/mono_fonts.py | russhughes/gc9a01_mpy | c34db179ae242c4fd118f0a46ce9b47e0c574c8b | [
"MIT"
] | 2 | 2021-09-17T14:44:08.000Z | 2022-01-01T18:38:24.000Z | examples/RP2/mono_fonts/mono_fonts.py | russhughes/gc9a01_mpy | c34db179ae242c4fd118f0a46ce9b47e0c574c8b | [
"MIT"
] | 4 | 2021-05-21T13:05:41.000Z | 2022-03-16T16:24:21.000Z | """
mono_fonts.py
Demo for monofont2bitmap converter and bitmap method
on a GC9A01 display connected to a Raspberry Pi Pico.
Pico Pin Display
========= =======
14 (GP10) BL
15 (GP11) RST
16 (GP12) DC
17 (GP13) CS
18 (GND) GND
19 (GP14) CLK
20 (GP15) DIN
"""
import time
from machine import Pin, SPI
import gc9a01
import inconsolata_16 as font_16
import inconsolata_32 as font_32
import inconsolata_64 as font_64
def main():
fast = False
def display_font(font):
tft.fill(gc9a01.BLUE)
column = 0
row = 0
for char in font.MAP:
tft.bitmap(font, column, row, font.MAP.index(char))
column += font.WIDTH
if column >= tft.width() - font.WIDTH:
row += font.HEIGHT
column = 0
if row > tft.height() - font.HEIGHT:
row = 0
if not fast:
time.sleep(0.05)
spi = SPI(1, baudrate=60000000, sck=Pin(14), mosi=Pin(15))
tft = gc9a01.GC9A01(
spi,
240,
240,
reset=Pin(11, Pin.OUT),
cs=Pin(13, Pin.OUT),
dc=Pin(12, Pin.OUT),
backlight=Pin(10, Pin.OUT),
rotation=0)
tft.init()
while True:
for font in [font_16, font_32, font_64]:
display_font(font)
fast = not fast
main()
| 20.558824 | 63 | 0.532189 | 189 | 1,398 | 3.873016 | 0.460317 | 0.032787 | 0.040984 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.106145 | 0.3598 | 1,398 | 67 | 64 | 20.865672 | 0.711732 | 0.218884 | 0 | 0.157895 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.157895 | 0 | 0.210526 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ada5513894ff210423e20db95655fc3037f8279b | 4,285 | py | Python | models/svgp.py | pmorenoz/RecyclableGP | 8b14a8576c3f006582b971890a0ddd1c4ccbb4c4 | [
"MIT"
] | 12 | 2020-10-09T16:22:11.000Z | 2021-03-21T09:24:15.000Z | models/svgp.py | pmorenoz/RecyclableGP | 8b14a8576c3f006582b971890a0ddd1c4ccbb4c4 | [
"MIT"
] | 1 | 2021-08-23T20:51:42.000Z | 2021-08-23T20:51:42.000Z | models/svgp.py | pmorenoz/RecyclableGP | 8b14a8576c3f006582b971890a0ddd1c4ccbb4c4 | [
"MIT"
] | 1 | 2020-10-09T12:39:02.000Z | 2020-10-09T12:39:02.000Z | # Recyclable Gaussian Processes
# Pablo Moreno-Munoz (pmoreno@tsc.uc3m.es)
# Universidad Carlos III de Madrid
# January 2020
import torch
from torch.distributions import MultivariateNormal as Normal
from torch.distributions import kl_divergence
import numpy as np
from GPy.inference.latent_function_inference import LatentFunctionInference
from GPy.inference.latent_function_inference.posterior import Posterior
class SVGP(torch.nn.Module):
"""
-- Sparse Variational Gaussian Process --
"""
def __init__(self, kernel, likelihood, M, input_dim=None):
super(SVGP, self).__init__()
if input_dim is None:
input_dim = 1
# Dimensions --
self.M = M #num. inducing
self.input_dim = int(input_dim) #dimension of x
# GP Elements --
self.likelihood = likelihood #type of likelihood
self.kernel = kernel #type of kernel
if self.input_dim > 1:
self.z = torch.nn.Parameter(2*torch.rand(self.M, self.input_dim) - 1.0, requires_grad=False)
else:
self.z = torch.nn.Parameter(torch.linspace(-0.9, 0.9, self.M)[:,None], requires_grad=False)
# Variational distribution --
self.q_m = torch.nn.Parameter(torch.randn(M,1), requires_grad=True) # variational: mean parameter
#self.q_L = torch.nn.Parameter(torch.rand(M,M), requires_grad=True) # or randn
self.q_L = torch.nn.Parameter(torch.eye(M), requires_grad=True) # variational: covariance
def forward(self, x, y):
# Variational parameters --
q_m = self.q_m
q_L = torch.tril(self.q_L)
q_S = torch.mm(q_L, q_L.t())
# Prior parameters (uses kernel) --
Kuu = self.kernel.K(self.z)
# Distributions -- q(u), p(u)
q_u = Normal(q_m.flatten(), q_S)
p_u = Normal(torch.zeros(self.M), Kuu)
# Calculus of q(f) --
Kff = self.kernel.K(x,x)
Kfu = self.kernel.K(x, self.z)
Kuf = torch.transpose(Kfu,0,1)
iKuu,_ = torch.solve(torch.eye(self.M), Kuu) # is pseudo-inverse?
A = Kfu.mm(iKuu)
AT = iKuu.mm(Kuf)
m_f = A.mm(q_m)
v_f = torch.diag(Kff + A.mm(q_S - Kuu).mm(AT))
# Expectation term --
expectation = self.likelihood.variational_expectation(y, m_f, v_f)
# KL divergence --
kl = kl_divergence(q_u, p_u)
# Lower bound (ELBO) --
elbo = expectation.sum() - kl
return -elbo
def predictive(self, x_new, lik_noise=False):
# Matrices
q_m = self.q_m.detach().numpy()
q_L = torch.tril(self.q_L)
q_S = torch.mm(q_L, q_L.t()).detach().numpy()
Kuu = self.kernel.K(self.z, self.z).detach().numpy()
posterior = Posterior(mean=q_m, cov=q_S, K=Kuu, prior_mean=np.zeros(q_m.shape))
Kx = self.kernel.K(self.z, x_new).detach().numpy()
Kxx = self.kernel.K(x_new, x_new).detach().numpy()
# GP Predictive Posterior - mean + variance
gp_mu = np.dot(Kx.T, posterior.woodbury_vector)
Kxx = np.diag(Kxx)
gp_var = (Kxx - np.sum(np.dot(np.atleast_3d(posterior.woodbury_inv).T, Kx) * Kx[None, :, :], 1)).T
gp = gp_mu
if lik_noise:
gp_upper = gp_mu + 2 * np.sqrt(gp_var) + 2 * self.likelihood.sigma.detach().numpy()
gp_lower = gp_mu - 2 * np.sqrt(gp_var) - 2 * self.likelihood.sigma.detach().numpy()
else:
gp_upper = gp_mu + 2*np.sqrt(gp_var)
gp_lower = gp_mu - 2*np.sqrt(gp_var)
return gp, gp_upper, gp_lower
def rmse(self, x_new, f_new):
f_gp,_,_ = self.predictive(x_new)
rmse = torch.sqrt(torch.mean((f_new - f_gp)**2.0)).detach().numpy()
return rmse
def mae(self, x_new, f_new):
f_gp,_,_ = self.predictive(x_new)
mae = torch.mean(torch.abs(f_new - f_gp)).detach().numpy()
return mae
def nlpd(self, x_new, y_new):
f_gp, u_gp, _ = self.predictive(x_new)
f_gp = torch.from_numpy(f_gp)
u_gp = torch.from_numpy(u_gp)
v_gp = torch.pow(0.5*(u_gp - f_gp), 2.0)
nlpd = - torch.mean(self.likelihood.log_predictive(y_new, f_gp, v_gp)).detach().numpy()
return nlpd
| 34.556452 | 106 | 0.5972 | 640 | 4,285 | 3.807813 | 0.232813 | 0.008207 | 0.017234 | 0.034469 | 0.224046 | 0.185474 | 0.137874 | 0.115716 | 0.115716 | 0.091096 | 0 | 0.009882 | 0.267911 | 4,285 | 123 | 107 | 34.837398 | 0.766975 | 0.149592 | 0 | 0.081081 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081081 | false | 0 | 0.081081 | 0 | 0.243243 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ada60b6dcaf641a2ff6267b7b9d4e59947beef7c | 11,077 | py | Python | twitter_pos.py | hendrycks/GELUs | 2726505c4767eed99dfd89ce0f836fbca44d6a6d | [
"MIT"
] | 83 | 2016-10-06T16:49:06.000Z | 2022-03-28T07:29:30.000Z | twitter_pos.py | tuozhanjun/GELUs | 55e8964c6ca1bd36c004a619df396547c6c2a6a9 | [
"MIT"
] | 2 | 2019-04-09T05:58:14.000Z | 2020-06-23T21:16:33.000Z | twitter_pos.py | tuozhanjun/GELUs | 55e8964c6ca1bd36c004a619df396547c6c2a6a9 | [
"MIT"
] | 17 | 2016-09-26T05:39:14.000Z | 2022-02-14T00:22:06.000Z | import numpy as np
import tensorflow as tf
import pickle
import sys
import io
import os
try:
nonlinearity_name = sys.argv[1] # 'relu', 'elu', 'gelu', or 'silu'
except:
print('Defaulted to gelu since no nonlinearity specified through command line')
nonlinearity_name = 'gelu'
try:
learning_rate = float(sys.argv[2]) # 0.001, 0.0001, 0.00001
except:
print('Defaulted to a learning rate of 0.001')
learning_rate = 1e-3
p = 0.8
#
# Begin Twitter Helper Functions
#
def embeddings_to_dict(filename):
'''
:param filename: the file name of the word embeddings | file is assumed
to follow this format: "word[tab]dimension 1[space]dimension 2[space]...[space]dimension 50"
:return: a dictionary with keys that are words and values that are the embedding of a word
'''
with io.open(filename, 'r', encoding='utf-8') as f:
word_vecs = {}
for line in f:
line = line.strip('\n').split()
word_vecs[line[0]] = np.array([float(s) for s in line[1:]])
return word_vecs
def data_to_mat(filename, vocab, tag_to_number, window_size=1, start_symbol=u'UUUNKKK',
one_hot=False, return_labels=True):
'''
:param filename: the filename of a training, development, devtest, or test set
:param vocab: a list of strings, one for each embedding (the keys of a dictionary)
:param tag_to_number: a dictionary of tags to predict and a numerical encoding of those tags;
with this, we will predict numbers instead of strings
:param window_size: the context window size for the left and right; thus we have 2*window_size + 1
words considered at a time
:param start_symbol: since the <s> symbol has no embedding given, chose a symbol in the vocab
to replace <s>. Common choices are u'UUUNKKK' or u'</s>'
:return: a n x (window_size*2 + 1) matrix containing context windows and the center word
represented as strings; n is the number of examples. ALSO return a n x |tag_to_number|
matrix of labels for the n examples with a one-hot (1-of-k) encoding
'''
with io.open(filename, 'r', encoding='utf-8') as f:
x, tweet_words, y = [], [], []
start = True
for line in f:
line = line.strip('\n')
if len(line) == 0: # if end of tweet
tweet_words.extend([u'</s>'] * window_size)
# ensure tweet words are in vocab; if not, map to "UUUNKKK"
tweet_words = [w if w in vocab else u'UUUNKKK' for w in tweet_words]
# from this tweet, add the training tasks to dataset
# the tags were already added to y
for i in range(window_size, len(tweet_words) - window_size):
x.append(tweet_words[i-window_size:i+window_size+1])
tweet_words = []
start = True
continue
# if before end
word, label = line.split('\t')
if start:
tweet_words.extend([start_symbol] * window_size)
start = False
tweet_words.append(word)
if return_labels is True:
if one_hot is True:
label_one_hot = len(tag_to_number) * [0]
label_one_hot[tag_to_number[label]] += 1
y.append(label_one_hot)
else:
y.append(tag_to_number[label])
return np.array(x), np.array(y)
def word_list_to_embedding(words, embeddings, embedding_dimension=50):
'''
:param words: an n x (2*window_size + 1) matrix from data_to_mat
:param embeddings: an embedding dictionary where keys are strings and values
are embeddings; the output from embeddings_to_dict
:param embedding_dimension: the dimension of the values in embeddings; in this
assignment, embedding_dimension=50
:return: an n x ((2*window_size + 1)*embedding_dimension) matrix where each entry of the
words matrix is replaced with its embedding
'''
m, n = words.shape
words = words.reshape((-1))
return np.array([embeddings[w] for w in words], dtype=np.float32).reshape(m, n*embedding_dimension)
#
# End Twitter Helper Functions
#
window_size = 1
# note that we encode the tags with numbers for later convenience
tag_to_number = {
u'N': 0, u'O': 1, u'S': 2, u'^': 3, u'Z': 4, u'L': 5, u'M': 6,
u'V': 7, u'A': 8, u'R': 9, u'!': 10, u'D': 11, u'P': 12, u'&': 13, u'T': 14,
u'X': 15, u'Y': 16, u'#': 17, u'@': 18, u'~': 19, u'U': 20, u'E': 21, u'$': 22,
u',': 23, u'G': 24
}
embeddings = embeddings_to_dict('./data/Tweets/embeddings-twitter.txt')
vocab = embeddings.keys()
# we replace <s> with </s> since it has no embedding, and </s> is a better embedding than UNK
xt, yt = data_to_mat('./data/Tweets/tweets-train.txt', vocab, tag_to_number, window_size=window_size,
start_symbol=u'</s>')
xdev, ydev = data_to_mat('./data/Tweets/tweets-dev.txt', vocab, tag_to_number, window_size=window_size,
start_symbol=u'</s>')
xdtest, ydtest = data_to_mat('./data/Tweets/tweets-devtest.txt', vocab, tag_to_number, window_size=window_size,
start_symbol=u'</s>')
data = {
'x_train': xt, 'y_train': yt,
'x_dev': xdev, 'y_dev': ydev,
'x_test': xdtest, 'y_test': ydtest
}
num_epochs = 30
num_tags = 25
hidden_size = 256
batch_size = 16
embedding_dimension = 50
example_size = (2*window_size + 1)*embedding_dimension
num_examples = data['y_train'].shape[0]
num_batches = num_examples//batch_size
graph = tf.Graph()
with graph.as_default():
x = tf.placeholder(tf.float32, [None, example_size])
y = tf.placeholder(tf.int64, [None])
is_training = tf.placeholder(tf.bool)
w1 = tf.Variable(tf.nn.l2_normalize(tf.random_normal([example_size, hidden_size]), 0))
b1 = tf.Variable(tf.zeros([hidden_size]))
w2 = tf.Variable(tf.nn.l2_normalize(tf.random_normal([hidden_size, hidden_size]), 0))
b2 = tf.Variable(tf.zeros([hidden_size]))
w_out = tf.Variable(tf.nn.l2_normalize(tf.random_normal([hidden_size, num_tags]), 0))
b_out = tf.Variable(tf.zeros([num_tags]))
if nonlinearity_name == 'relu':
f = tf.nn.relu
elif nonlinearity_name == 'elu':
f = tf.nn.elu
elif nonlinearity_name == 'gelu':
# def gelu(x):
# return tf.mul(x, tf.erfc(-x / tf.sqrt(2.)) / 2.)
# f = gelu
def gelu_fast(_x):
return 0.5 * _x * (1 + tf.tanh(tf.sqrt(2 / np.pi) * (_x + 0.044715 * tf.pow(_x, 3))))
f = gelu_fast
elif nonlinearity_name == 'silu':
def silu(_x):
return _x * tf.sigmoid(_x)
f = silu
# elif nonlinearity_name == 'soi':
# def soi_map(x):
# u = tf.random_uniform(tf.shape(x))
# mask = tf.to_float(tf.less(u, (1 + tf.erf(x / tf.sqrt(2.))) / 2.))
# return tf.cond(is_training, lambda: tf.mul(mask, x),
# lambda: tf.mul(x, tf.erfc(-x / tf.sqrt(2.)) / 2.))
# f = soi_map
else:
raise NameError("Need 'relu', 'elu', 'gelu', or 'silu' for nonlinearity_name")
def model(data_feed):
h1 = f(tf.matmul(data_feed, w1) + b1)
h1 = tf.cond(is_training, lambda: tf.nn.dropout(h1, p), lambda: h1)
h2 = f(tf.matmul(h1, w2) + b2)
h2 = tf.cond(is_training, lambda: tf.nn.dropout(h2, p), lambda: h2)
return tf.matmul(h2, w_out) + b_out
logits = model(x)
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits, y))
# pick optimizer
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)
compute_error = tf.reduce_mean(tf.to_float(tf.not_equal(tf.argmax(logits, 1), y)))
# store future results with previous results
if not os.path.exists("./data/"):
os.makedirs("./data/")
if os.path.exists("./data/twitter_pos_" + nonlinearity_name + ".p"):
history = pickle.load(open("./data/twitter_pos_" + nonlinearity_name + ".p", "rb"))
key_str = str(len(history)//7 + 1)
history["lr" + key_str] = learning_rate
history["train_loss" + key_str] = []
history["val_loss" + key_str] = []
history["test_loss" + key_str] = []
history["train_err" + key_str] = []
history["val_err" + key_str] = []
history["test_err" + key_str] = []
else:
history = {
"lr1": learning_rate,
'train_loss1': [], 'val_loss1': [], 'test_loss1': [],
'train_err1': [], 'val_err1': [], 'test_err1': []
}
key_str = '1'
with tf.Session(graph=graph) as sess:
print('Beginning training')
sess.run(tf.initialize_all_variables())
save_every = num_batches//5 # save training information 5 times per epoch
# train
for epoch in range(num_epochs):
# shuffle data every epoch
indices = np.arange(num_examples)
np.random.shuffle(indices)
data['x_train'] = data['x_train'][indices]
data['y_train'] = data['y_train'][indices]
for i in range(num_batches):
offset = i * batch_size
bx = word_list_to_embedding(data['x_train'][offset:offset + batch_size, :],
embeddings, embedding_dimension)
by = data['y_train'][offset:offset + batch_size]
if p < 1-1e-5: # we want to know how the full network is being optimized instead of the reduced version
l, err = sess.run([loss, compute_error], feed_dict={x: bx, y: by, is_training: False})
_, l_drop, err_drop = sess.run([optimizer, loss, compute_error], feed_dict={x: bx, y: by,
is_training: True})
if p < 1-1e-5: # we want to know how the full network is being optimized instead of the reduced version
history["train_loss" + key_str].append(l)
history["train_err" + key_str].append(err)
else:
history["train_loss" + key_str].append(l_drop)
history["train_err" + key_str].append(err_drop)
if i % save_every == 0:
l, err = sess.run([loss, compute_error],
feed_dict={x: word_list_to_embedding(data['x_dev'], embeddings, embedding_dimension),
y: data['y_dev'], is_training: False})
history["val_loss" + key_str].append(l)
history["val_err" + key_str].append(err)
l, err = sess.run([loss, compute_error],
feed_dict={x: word_list_to_embedding(data['x_test'], embeddings, embedding_dimension),
y: data['y_test'], is_training: False})
history["test_loss" + key_str].append(l)
history["test_err" + key_str].append(err)
# print('Epoch', epoch + 1, 'Complete')
# save history
pickle.dump(history, open("./data/twitter_pos_" + nonlinearity_name + ".p", "wb"))
| 39.419929 | 120 | 0.597544 | 1,599 | 11,077 | 3.971232 | 0.216385 | 0.033071 | 0.017323 | 0.010079 | 0.28126 | 0.23685 | 0.174173 | 0.139528 | 0.121575 | 0.115433 | 0 | 0.021952 | 0.272095 | 11,077 | 280 | 121 | 39.560714 | 0.765596 | 0.246186 | 0 | 0.12069 | 0 | 0 | 0.094402 | 0.015368 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.034483 | 0.011494 | 0.103448 | 0.017241 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ada6d3daf6592a8758e643fb7020d1ac60e5c2da | 3,375 | py | Python | Scores4EachParameterPerturbation.py | TakuyaHiraoka/Learning-Robust-Options-by-Conditional-Value-at-Risk-Optimization | 78d3f8e36cdc954897ddc6af9029991bff19fb58 | [
"MIT"
] | 9 | 2019-12-11T20:34:20.000Z | 2021-05-23T04:35:29.000Z | Scores4EachParameterPerturbation.py | TakuyaHiraoka/Learning-Robust-Options-by-Conditional-Value-at-Risk-Optimization | 78d3f8e36cdc954897ddc6af9029991bff19fb58 | [
"MIT"
] | null | null | null | Scores4EachParameterPerturbation.py | TakuyaHiraoka/Learning-Robust-Options-by-Conditional-Value-at-Risk-Optimization | 78d3f8e36cdc954897ddc6af9029991bff19fb58 | [
"MIT"
] | null | null | null | import os
import re
import statistics
def find_all_key_files_path(directory, keyfile_name):
fn = re.compile(".*"+keyfile_name+".*txt")
path=[]
for root, dirs, files in os.walk(directory):
for file in files:
if fn.match(file) is not None:
#print(file)
path.append(os.path.join(root, file))
return path
if __name__ == '__main__':
root_path = "./4test"
root_path = "./"
# find test files by recursively opening directory
result_files = find_all_key_files_path(root_path, "-test")
print(result_files)
# open test files
results = [] # list of dictionary representing the score {param 1 : its val., param 2 : its val, ..., score:val}
result_format = re.compile("^return: +(.[0-9]+.[0-9]+),.*"
+ "'torso_mass': +([0-9]+.[0-9]+),.*"
+ "'ground_friction': +([0-9]+.[0-9]+),.*"
+ "'joint_damping': +([0-9]+.[0-9]+),.*"
+ "'armature': +([0-9]+.[0-9]+).*"
)
for result_file in result_files:
f = open(result_file, "r")
dic_result = {}
# parse file into array
for line in f.readlines():
m =result_format.search(line)
if m is not None:
#print(m.group(0))
dic_result = {"score": float(m.group(1)),
"torso_mass": float(m.group(2)),
"ground_friction": float(m.group(3)),
"joint_damping": float(m.group(4)),
"armature": float(m.group(5)),
}
#print(dic_result)
results.append(dic_result)
f.close()
# calculate average (and std dev)
print("")
for target_param in ["torso_mass", "ground_friction", "joint_damping", "armature"]:
# enumerate
value_dic = {}
for result in results:
if result[target_param] not in value_dic.keys():
value_dic[result[target_param]]=[]
#print(target_param + str(value_dic))
for result in results:
value_dic[result[target_param]].append(result["score"])
#print(value_dic)
# print our result
for k in value_dic.keys():
num_samples = len(result_files) * len(value_dic[k])
break
print(target_param + "(" + str(num_samples) + "/" + str(len(result_files)) + "), ", end="")
for key in value_dic.keys():
print(str(key)+", ", end="")
print("")
print("mean, ", end="")
for key in value_dic.keys():
print(str(statistics.mean(value_dic[key]))+", ", end="")
print("")
print("stdev, ", end="")
for key in value_dic.keys():
print(str(statistics.stdev(value_dic[key]))+", ", end="")
print("")
# for certain case analysis
print("")
scores = []
for result in results:
if result["torso_mass"] == 9.0 and result["ground_friction"] == 2.5:
#print(result)
scores.append(result["score"])
mean = statistics.mean(scores)
std = statistics.stdev(scores)
print("mean, stdev, num")
print(str(mean) + ", " + str(std) + ", " + str(len(scores)))
| 37.087912 | 116 | 0.505481 | 391 | 3,375 | 4.194373 | 0.255754 | 0.063415 | 0.009146 | 0.012195 | 0.198171 | 0.121341 | 0.068902 | 0.068902 | 0.068902 | 0.05 | 0 | 0.014752 | 0.337185 | 3,375 | 90 | 117 | 37.5 | 0.718373 | 0.112593 | 0 | 0.15942 | 0 | 0 | 0.12479 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014493 | false | 0 | 0.043478 | 0 | 0.072464 | 0.202899 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
adaa5c3959c013f96317f4ed3ce3592bd0d0310d | 8,172 | py | Python | testing/MLDB-991-svm.py | kstepanmpmg/mldb | f78791cd34d01796705c0f173a14359ec1b2e021 | [
"Apache-2.0"
] | 665 | 2015-12-09T17:00:14.000Z | 2022-03-25T07:46:46.000Z | testing/MLDB-991-svm.py | tomzhang/mldb | a09cf2d9ca454d1966b9e49ae69f2fe6bf571494 | [
"Apache-2.0"
] | 797 | 2015-12-09T19:48:19.000Z | 2022-03-07T02:19:47.000Z | testing/MLDB-991-svm.py | matebestek/mldb | f78791cd34d01796705c0f173a14359ec1b2e021 | [
"Apache-2.0"
] | 103 | 2015-12-25T04:39:29.000Z | 2022-02-03T02:55:22.000Z | #
# MLDB-991-svm.py
# mldb.ai inc, 2015
# This file is part of MLDB. Copyright 2015 mldb.ai inc. All rights reserved.
#
import unittest
from mldb import mldb
class SvmTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
ds1 = mldb.create_dataset({
'type': 'sparse.mutable',
'id': 'dataset1'})
# create the dataset
ds1.record_row('1', [['label', 39, 0], ['x', 0.2, 0], ['y', -0.3, 0]])
ds1.record_row('2', [['label', 39, 0], ['x', 0.6, 0], ['y', -0.7, 0]])
ds1.record_row('3', [['label', 39, 0], ['x', 0.9, 0], ['y', -0.4, 0]])
ds1.record_row('4', [['label', 72, 0], ['x', -0.2, 0], ['y', 0.9, 0]])
ds1.record_row('5', [['label', 72, 0], ['x', -0.45, 0], ['y', 0.5, 0]])
ds1.record_row('6', [['label', 72, 0], ['x', -0.56, 0], ['y', 0.2, 0]])
ds1.commit()
def test_regression(self):
# Trivial regression test
result = mldb.put("/v1/procedures/svm_classif", {
"type": "svm.train",
"params": {
"trainingData": {"from" : {"id": "dataset1"}},
"modelFileUrl": "file://tmp/MLDB-991.svm"
}
})
mldb.log(result.json())
# run the procedure
result = mldb.post('/v1/procedures/svm_classif/runs')
mldb.log(result.json())
# create the function
result = mldb.put('/v1/functions/svm_function', {
'type': 'svm',
'params': {"modelFileUrl": "file://tmp/MLDB-991.svm"}})
mldb.log(result)
# test the function
result = mldb.get('/v1/functions/svm_function/application',
input={'embedding' : {'x': 1, 'y': -1}})
mldb.log(result)
self.assertEqual(result.json()['output']['output'], 39)
result = mldb.get('/v1/functions/svm_function/application',
input={'embedding' : {'x': -1, 'y': 1}})
mldb.log(result)
self.assertEqual(result.json()['output']['output'], 72)
def test_different_kernel(self):
# trivial test with a different kernel
test_procedure_config = {
"type": "svm.train",
"params": {
"trainingData": {"from" : {"id": "dataset1"}},
"configuration": {"kernel": 1},
"modelFileUrl": "file://tmp/MLDB-991-2.svm"
}
}
result = mldb.put("/v1/procedures/svm_classif2", test_procedure_config)
mldb.log(result.json())
result = mldb.post('/v1/procedures/svm_classif2/runs')
mldb.log(result.json())
result = mldb.put('/v1/functions/svm_function2', {
'type': 'svm',
'params': {"modelFileUrl": "file://tmp/MLDB-991-2.svm"}
})
mldb.log(result)
result = mldb.get('/v1/functions/svm_function2/application',
input={'embedding' : {'x': 1, 'y': -1}})
mldb.log(result)
self.assertEqual(result.json()['output']['output'], 39)
result = mldb.get('/v1/functions/svm_function2/application',
input={'embedding' : {'x': -1, 'y':1}})
mldb.log(result)
self.assertEqual(result.json()['output']['output'], 72)
def test_iris_dataset_classicfication(self):
# Iris dataset classification test
irisdataset = mldb.create_dataset({
"type": "sparse.mutable",
"id": "iris_dataset"
})
for i, line in enumerate(open("./mldb/testing/dataset/iris.data")):
cols = []
line_split = line.split(',')
if len(line_split) != 5:
continue
cols.append(["sepal length", float(line_split[0]), 0])
cols.append(["sepal width", float(line_split[1]), 0])
cols.append(["petal length", float(line_split[2]), 0])
cols.append(["petal width", float(line_split[3]), 0])
cols.append(["label", hash(line_split[4]) % 1000, 0])
irisdataset.record_row(str(i+1), cols)
irisdataset.commit()
result = mldb.get(
"/v1/query",
q="SELECT * FROM iris_dataset", format="table", rowNames="true", headers="true")
mldb.log(result)
result = mldb.put("/v1/procedures/svm_iris", {
"type": "svm.train",
"params": {
"trainingData": {"from" : {"id": "iris_dataset"}},
"modelFileUrl": "file://tmp/MLDB-991-iris.svm"
}
})
mldb.log(result.json())
result = mldb.post('/v1/procedures/svm_iris/runs')
mldb.log(result.json())
result = mldb.put(
'/v1/functions/svm_iris_function', {
'type': 'svm',
'params': {"modelFileUrl": "file://tmp/MLDB-991-iris.svm"}
})
mldb.log(result)
result = mldb.get(
"/v1/query",
q="SELECT label, svm_iris_function({{* excluding (label)} as embedding}) as result from iris_dataset",
format="table", rowNames="true", headers="true")
mldb.log(result)
result = mldb.get(
"/v1/query",
q="SELECT count(*) as result from iris_dataset where svm_iris_function({{* excluding (label)} as embedding})[output] != label",
format="table",
rowNames="false", headers="false")
mldb.log(result)
# cross-regression gived two classification errors over 150
self.assertEqual(result.json()[0][0], 2)
def test_svm_regression(self):
# SVM-regression testing
def mypolynomial( x, y):
return 0.3 * pow(x, 2) + 2.4 * y - 1.7
ds3 = mldb.create_dataset({
'type': 'sparse.mutable',
'id': 'dataset3'})
ds3.record_row('1', [['label', mypolynomial(0, 0), 0], ['x', 0, 0], ['y', 0, 0]])
ds3.record_row('2', [['label', mypolynomial(1, 0), 0], ['x', 1, 0], ['y', 0, 0]])
ds3.record_row('3', [['label', mypolynomial(2, 0), 0], ['x', 2, 0], ['y', 0, 0]])
ds3.record_row('4', [['label', mypolynomial(0, 1), 0], ['x', 0, 0], ['y', 1, 0]])
ds3.record_row('5', [['label', mypolynomial(1, 1), 0], ['x', 1, 0], ['y', 1, 0]])
ds3.record_row('6', [['label', mypolynomial(2, 1), 0], ['x', 2, 0], ['y', 1, 0]])
ds3.record_row('7', [['label', mypolynomial(0, 2), 0], ['x', 0, 0], ['y', 2, 0]])
ds3.record_row('8', [['label', mypolynomial(1, 2), 0], ['x', 1, 0], ['y', 2, 0]])
ds3.record_row('9', [['label', mypolynomial(2, 2), 0], ['x', 2, 0], ['y', 2, 0]])
ds3.commit()
result = mldb.get(
"/v1/query",
q="SELECT * from dataset3", format="table", rowNames="true", headers="true")
mldb.log(result)
result = mldb.put("/v1/procedures/svm_regression", {
"type": "svm.train",
"params": {
"trainingData": { "from" : {"id": "dataset3"}},
"modelFileUrl": "file://tmp/MLDB-991-regression.svm",
"svmType": "regression"
}
})
mldb.log(result.json())
result = mldb.post('/v1/procedures/svm_regression/runs')
mldb.log(result.json())
result = mldb.put('/v1/functions/svm_regression_function', {
'type': 'svm',
'params': {
"modelFileUrl": "file://tmp/MLDB-991-regression.svm"
}
})
mldb.log(result)
result = mldb.get(
"/v1/query",
q="SELECT label, svm_regression_function({{* excluding (label)} as embedding}) as result from dataset3",
format="table", rowNames="true", headers="true")
mldb.log(result)
result = mldb.get(
"/v1/query",
q="SELECT sum(abs(svm_regression_function({{* excluding (label)} as embedding})[output] - label)) as totalError from dataset3",
format="table", rowNames="false", headers="false")
mldb.log(result)
# less than 5.0 total error
self.assertLess(result.json()[0][0], 5)
if __name__ == '__main__':
mldb.run_tests()
| 36.159292 | 139 | 0.511992 | 963 | 8,172 | 4.263759 | 0.158879 | 0.05358 | 0.069654 | 0.036532 | 0.639308 | 0.603264 | 0.575986 | 0.448368 | 0.39357 | 0.311495 | 0 | 0.047892 | 0.294787 | 8,172 | 225 | 140 | 36.32 | 0.664584 | 0.047112 | 0 | 0.432927 | 0 | 0.012195 | 0.271394 | 0.110797 | 0 | 0 | 0 | 0 | 0.036585 | 1 | 0.036585 | false | 0 | 0.012195 | 0.006098 | 0.060976 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
adb04f96c406981fc773185d7b2625fc04f813cd | 1,047 | py | Python | run_scripts/fakes/loop_full_fast.py | LSSTDESC/sn_pipe | d42c7490ba5ff8c52f62e70a20c922172a6baff1 | [
"BSD-3-Clause"
] | null | null | null | run_scripts/fakes/loop_full_fast.py | LSSTDESC/sn_pipe | d42c7490ba5ff8c52f62e70a20c922172a6baff1 | [
"BSD-3-Clause"
] | 3 | 2020-07-16T09:43:49.000Z | 2020-10-06T11:46:05.000Z | run_scripts/fakes/loop_full_fast.py | LSSTDESC/sn_pipe | d42c7490ba5ff8c52f62e70a20c922172a6baff1 | [
"BSD-3-Clause"
] | 2 | 2019-10-22T13:39:33.000Z | 2020-07-23T20:42:07.000Z | import numpy as np
import os
#simus = ['sn_fast', 'sn_cosmo']
simus = ['sn_cosmo']
ebvs = [0.0]
redcutoff = 800.0
blues = [360.0, 370.0, 380.0]
blues = [360.]
fake_config = 'input/Fake_cadence/Fake_cadence.yaml'
x1 = -2.0
color = 0.2
error_model = 0
for bluecutoff in blues:
bluecutoff = np.round(bluecutoff, 1)
for ebv in ebvs:
cmd_comm = 'python run_scripts/fakes/full_simulation_fit.py --ebvofMW {} --outDir_simu Output_Simu_{}_{}_ebvofMW_{} --outDir_fit Output_Fit_{}_{}_ebvofMW_{} --bluecutoff {} --redcutoff {} --fake_config {} --x1 {} --color {} --error_model {}'.format(
ebv, bluecutoff, redcutoff, ebv, bluecutoff, redcutoff, ebv, bluecutoff, redcutoff, fake_config, x1, color,error_model)
# fast simulator
# for simu in
for simu in simus:
cmd = cmd_comm
cmd += ' --simulator {}'.format(simu)
os.system(cmd)
"""
# sncosmo simulator
cmd = cmd_comm
cmd += ' --simulator sn_cosmo'
os.system(cmd)
"""
| 30.794118 | 258 | 0.60745 | 135 | 1,047 | 4.488889 | 0.377778 | 0.125413 | 0.108911 | 0.09571 | 0.30198 | 0.229373 | 0.151815 | 0.151815 | 0 | 0 | 0 | 0.038119 | 0.248329 | 1,047 | 33 | 259 | 31.727273 | 0.731893 | 0.055396 | 0 | 0 | 0 | 0.05 | 0.335664 | 0.152681 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1 | 0 | 0.1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
adb1f0df40c6af3e235b4b422243a99d00b48df0 | 10,276 | py | Python | titration_class.py | jkelowitt/titration-generator | 91da0a4745e543762498bd03654bda44c4456d39 | [
"MIT"
] | 1 | 2020-08-24T15:18:12.000Z | 2020-08-24T15:18:12.000Z | titration_class.py | jkelowitt/titration-generator | 91da0a4745e543762498bd03654bda44c4456d39 | [
"MIT"
] | 23 | 2020-12-29T08:57:05.000Z | 2021-05-10T02:26:33.000Z | titration_class.py | jkelowitt/t-builder | 91da0a4745e543762498bd03654bda44c4456d39 | [
"MIT"
] | null | null | null | """
@title: titration_class.py
@author: Jackson Elowitt
This file can be used to simulate titration curves.
First, use the Compound class to create a titrant and an analyte.
Second, pass in the analyte and titrant to the Titration class,
along with the concentrations and volumes of the analyte and titrants.
From the titration class, call Titration.ph_t to obtain the trimmed pH values, and call
Titration.volume_titrant_t to obtain the volume of titrant required to reach those pH values.
"""
from dataclasses import dataclass, field
from typing import List, Tuple, Generator, Any
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline as IUS
def pk_to_k(pk) -> np.array:
"""Convert pK values to K values"""
return np.array(10.0 ** (-np.array(pk)))
def closest_value(num: float, arr: np.array) -> float:
"""Returns the closest value to the number in the array."""
return min(arr, key=lambda x: np.abs(x - num))
@dataclass
class Compound:
"""
Main class used to contain information about a specific compound
Parameters
----------
name : A string which holds the name of the compound
acidic : A boolean which represents whether or not the compound is acidic. True --> Acidic, False -> Basic
pKas: A list of floats which represents the pKa values of the compound.
"""
name: str
acidic: bool
pKas: list[float]
def __post_init__(self):
# The k values can become zero if the pKa value is too large ~> 330.
self.ks: np.array = np.array(10.0 ** (-np.array(self.pKas)))
@dataclass
class Titration:
"""
Main Titration Class. Performs the titration of an analyte with a titrant
given their concentrations and volumes.
Parameters
----------
analyte : A Compound class which represents the analyte of the titration
titrant : A Compound class which represents the titrant of the titration
concentration_analyte : A float which represents the concentration of the analyte
concentration_titrant : A float which represents the concentration of the titrant
volume_analyte : A float which represents the volume of analyte being titrated
Optional Parameters
-------------------
pKw : A custom value for the pKw of water. Default is None.
temp : A custom temperature for the temperature for the titration to take place.
Default is 25C. If pKw is None, this value is used to calculate the pKw at 25C.
decimal_places : The number of decimal places the titration should be simulated to. Default is 2 (2 -> 0.01).
"""
analyte: Compound
titrant: Compound
concentration_analyte: float
concentration_titrant: float
volume_analyte: float
pKw: float = field(default=None)
temp: float = field(default=25)
decimal_places: int = field(default=2)
def __post_init__(self):
"""Important values to calculate after the initialization"""
# Calculate the pKw
if self.pKw is not None: # If given a pKw
self.kw = 10 ** (-self.pKw)
else: # If given a temperature
self.kw = 10 ** (-self.temp_kw(self.temp))
# The increment level for the value ranges
self.precision: int = 10 ** -self.decimal_places
# Value ranges
self.ph, self.hydronium, self.hydroxide = self.starting_phs()
# Calculate the alpha values for the compounds at each pH
self.alpha_analyte = self.alpha_values(k=self.analyte.ks, acid=self.analyte.acidic)
self.alpha_titrant = self.alpha_values(k=self.titrant.ks, acid=self.titrant.acidic)
# Calculate and trim the volumes.
self.volume_titrant, self.phi = self.calculate_volume(self.titrant.acidic)
self.ph_t, self.volume_titrant_t = self.trim_values(self.ph, self.volume_titrant)
def starting_phs(self, min_ph: float = None, max_ph: float = None) -> Tuple[np.array, np.array, np.array]:
"""Returns a range of pH, hydronium concentration, and hydroxide concentrations"""
if min_ph is None:
min_ph = (14 * (not self.analyte.acidic)) - np.log10(self.concentration_analyte)
if max_ph is None:
max_ph = (14 * (not self.titrant.acidic)) - np.log10(self.concentration_analyte)
if self.analyte.acidic:
ph = np.arange(min_ph, max_ph, self.precision)
else: # Swap max and min pH so that the proper volume order is preserved.
ph = np.arange(max_ph, min_ph, self.precision)
h = 10 ** (-ph)
oh = self.kw / h
return ph, h, oh
@staticmethod
def temp_kw(temp: float) -> float:
"""Returns the pKw of water given a certain temperature in celsius."""
# Quadratic approximation of the data for liquid water found here:
# https://www.engineeringtoolbox.com/ionization-dissociation-autoprotolysis-constant-pKw-water-heavy-deuterium-oxide-d_2004.html
# 0 <= T <= 95 C
# R^2 = 0.9992
a = 0.000128275
b = -0.0406144
c = 14.9368
pKw = (a * temp ** 2) + (b * temp) + c
return pKw
@staticmethod
def _scale_data(data: np.array, a: float) -> np.array:
"""abs normalization"""
return a * (data / (1 + np.abs(data)))
@staticmethod
def scale_alphas(arr: np.array) -> np.array:
"""Scale the alpha values by its index in the sub-array"""
new_arr = []
for num, a in enumerate(np.transpose(arr)):
a *= num
new_arr.append(a)
return np.transpose(np.array(new_arr))
def alpha_values(self, k: np.array, acid: bool = True) -> np.array:
"""Finds the fraction of solution which each species of compound takes up at each pH."""
# If the k values are for K_b, convert to K_a. --> K_1 = K_w / K_n , K_2 = K_w / K_(n-1)
if not acid:
k = self.kw / np.flip(k) # TODO results in a Div by Zero error if pKa is too large (>330)
# The functionality of an acid or base can be determined by the number of dissociation constants it has.
n = len(k)
# Get the values for the [H+]^n power
h_vals = np.array([self.hydronium ** i for i in range(n, -1, -1)])
# Get the products of the k values.
k_vals = [np.prod(k[0:x]) for x in range(n + 1)]
# Prod and Sum the h and k values
denoms_arr = np.transpose(h_vals) * k_vals # Product of the sub-elements of the denominator
denoms = np.sum(denoms_arr, axis=1) # Sum of the sub-elements of the denominator
# Do the outermost alpha value calculation
alphas = np.transpose(np.divide(np.transpose(denoms_arr), denoms)) # Divide and re-transpose
if acid:
return np.array(alphas)
return np.flip(alphas, axis=0)
def trim_values(self, *args: Any) -> Generator:
"""Returns the data ranges where the volume is non-trivial and non-absurd."""
# Go until you are 1 past the last sub-reaction.
limiter = len(self.analyte.pKas) + 1
good_val_index = np.where((self.phi >= [0]) & (self.phi <= [limiter]))
# Trim the values for every chosen data set
rets = (arg[good_val_index] for arg in args) # Add the trimmed dataset to the return variable
return rets
def calculate_volume(self, acid_titrant: bool) -> Tuple[List, List]:
"""Calculate the volume of titrant required to reach each pH value."""
# Alpha values scaled by their index
scaled_alphas_analyte = self.scale_alphas(self.alpha_analyte)
scaled_alphas_titrant = self.scale_alphas(self.alpha_titrant)
# Sum the scaled alpha values. Axis=1 forces the summation to occur for each individual [H+] value.
summed_scaled_alphas_analyte = np.sum(scaled_alphas_analyte, axis=1)
summed_scaled_alphas_titrant = np.sum(scaled_alphas_titrant, axis=1)
# I found this written as delta somewhere, and thus it will be named.
delta = self.hydronium - self.hydroxide
# Conditional addition or subtraction based on the titrant.
if acid_titrant:
numerator = summed_scaled_alphas_analyte + (delta / self.concentration_analyte)
denominator = summed_scaled_alphas_titrant - (delta / self.concentration_titrant)
else:
numerator = summed_scaled_alphas_analyte - (delta / self.concentration_analyte)
denominator = summed_scaled_alphas_titrant + (delta / self.concentration_titrant)
# Solve for the volume
phi = numerator / denominator
volume = phi * self.volume_analyte * self.concentration_analyte / self.concentration_titrant
return volume, phi
def find_buffer_points(self) -> Tuple[List[int], np.array]:
"""Find the volumes of the buffer points based on the pKa values."""
pH, volume = self.trim_values(self.ph, self.volume_titrant)
pKas = np.array(self.analyte.pKas)
# All the volumes where the pH equals pKa
volume_indices = []
for pKa in pKas:
if pKa > 14: # Should never be larger than 14
continue
places = np.where(pH == closest_value(pKa, pH))[0][0]
volume_indices.append(places)
return volume[volume_indices], pKas
def find_equiv_points(self) -> Tuple[List, List]:
"""Find the equivalence points based on the progression of the reaction."""
pH, volume, phi = self.trim_values(self.ph, self.volume_titrant, self.phi)
points = []
for i in range(1, len(self.analyte.pKas) + 1):
closest = closest_value(i, phi)
points.append(np.where(phi == closest)[0][0])
return list(volume[points]), list(pH[points])
def deriv(self, degree: int) -> Tuple[np.array, np.array]:
"""Find the n-th derivative"""
pH, volume = self.trim_values(self.ph, self.volume_titrant)
# An object which makes splines
spline_maker = IUS(volume, pH)
# An object which calculates the derivative of those splines
deriv_function = spline_maker.derivative(n=degree)
# Calculate the derivative at all of the splines
d = deriv_function(volume)
return volume, d
| 39.072243 | 136 | 0.653854 | 1,457 | 10,276 | 4.515443 | 0.221002 | 0.024472 | 0.016416 | 0.01064 | 0.158535 | 0.128895 | 0.108223 | 0.076607 | 0.052592 | 0.052592 | 0 | 0.013556 | 0.253406 | 10,276 | 262 | 137 | 39.221374 | 0.843978 | 0.40327 | 0 | 0.1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003817 | 0 | 1 | 0.116667 | false | 0 | 0.033333 | 0 | 0.366667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
adb205adeaf6131b36e3e7497cadc1259b0bbc86 | 3,270 | py | Python | game_machine/sprites/film_strip.py | thismachinekillszombies/game_machine | 63e578ebb3e0d220e02f05f5e3c579c7d1cb1ae2 | [
"MIT"
] | null | null | null | game_machine/sprites/film_strip.py | thismachinekillszombies/game_machine | 63e578ebb3e0d220e02f05f5e3c579c7d1cb1ae2 | [
"MIT"
] | null | null | null | game_machine/sprites/film_strip.py | thismachinekillszombies/game_machine | 63e578ebb3e0d220e02f05f5e3c579c7d1cb1ae2 | [
"MIT"
] | null | null | null | from .sprite import Sprite
import pygame
import math
class FilmStrip(Sprite) :
def __init__(self,
image_file,
aperture,
no_of_frames = None,
frame = 0,
coords = (0, 0),
visible = True,
interactive = True,
transparency = True,
stop_frames = []) :
super(FilmStrip, self).__init__(coords, visible, \
interactive, transparency)
self.size = aperture
self._image = pygame.image.load(image_file)
if transparency :
self._image = self._image.convert_alpha()
self.__image_size = self._image.get_width(), self._image.get_height()
self.__image_centre = self.__image_size[0] / 2, self.__image_size[1] / 2
self._fw = int(self._image.get_width() / self.size[0])
self._fh = int(self._image.get_height() / self.size[1])
if no_of_frames == None :
self.no_of_frames = int(self._fw * self._fh)
else :
self.no_of_frames = no_of_frames
self._frame = frame
self._frames_pos = [(-(f % self._fw) * self.size[0],
-int(f / self._fw) * self.size[1])
for f in range(self.no_of_frames)]
self._fill_needed = True
self.stop_frames = stop_frames
def fill_surface(self) :
super(FilmStrip, self).fill_surface()
self._surface.blit(self._image, self._frames_pos[self._frame])
self._fill_needed = False
def _draw(self) :
if self._surface == None or self._fill_needed :
self.fill_surface()
self.place_surface()
## def _draw(self, screen) :
## frame_image = pygame.Surface(self.aperture)
## offset = ((self.frame % self._fw) * self.aperture[0],
## (self.frame % self._fh) * self.aperture[1])
## frame_image.blit(self.image, (0, 0), pygame.Rect(offset, self.aperture))
## if self.rotation is not None:
## frame_image = pygame.transform.rotate(frame_image, self.rotation)
## screen_co_ords = screen.cartesian(self.co_ords, self.aperture)
## screen_co_ords = (screen_co_ords[0] - ((frame_image.get_width() -
## self.aperture[0]) / 2),
## screen_co_ords[1] - ((frame_image.get_height() -
## self.aperture[1]) / 2))
##
## screen.pygame_screen.blit(frame_image, screen_co_ords)
def update(self) :
super(FilmStrip, self).update()
if not self.frame in self.stop_frames :
self.advance()
def advance(self, by = 1) :
self.frame += by
self.frame = self.frame % self.no_of_frames
@property
def frame(self) :
return self._frame
@frame.setter
def frame(self, frame) :
old_frame = self._frame
self._frame = int(frame)
self._fill_needed = True
if frame != old_frame :
self.trigger('frame_change',
{'old_frame': old_frame, 'new_frame': frame})
| 36.741573 | 82 | 0.534251 | 373 | 3,270 | 4.380697 | 0.201072 | 0.077111 | 0.04284 | 0.034272 | 0.068543 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010372 | 0.351376 | 3,270 | 88 | 83 | 37.159091 | 0.760019 | 0.25107 | 0 | 0.033333 | 0 | 0 | 0.012417 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.116667 | false | 0 | 0.05 | 0.016667 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
adb22f6d3725c87feb06b09b3e9a07fbbf039413 | 8,078 | py | Python | assistant_dialog_skill_analysis/term_analysis/chi2_analyzer.py | GunnarHorve-ibm/assistant-dialog-skill-analysis | f48a5ea8dad7779f1a3ba0d785af4dc27fbfef95 | [
"Apache-2.0"
] | 1 | 2019-09-18T22:55:02.000Z | 2019-09-18T22:55:02.000Z | assistant_dialog_skill_analysis/term_analysis/chi2_analyzer.py | GunnarHorve-ibm/assistant-dialog-skill-analysis | f48a5ea8dad7779f1a3ba0d785af4dc27fbfef95 | [
"Apache-2.0"
] | null | null | null | assistant_dialog_skill_analysis/term_analysis/chi2_analyzer.py | GunnarHorve-ibm/assistant-dialog-skill-analysis | f48a5ea8dad7779f1a3ba0d785af4dc27fbfef95 | [
"Apache-2.0"
] | null | null | null | import re
from collections import Counter
import pandas as pd
import numpy as np
from IPython.display import display, Markdown, HTML
from sklearn.feature_selection import chi2
from sklearn.feature_extraction.text import CountVectorizer
from nltk import word_tokenize
from ..utils import skills_util
def strip_punctuations(utterance: str):
"""
function to strip punctuations from the utternace
:param utterance:
:return:
"""
normalization_pattern = "'s"
utterance = re.sub(normalization_pattern, " is", utterance)
puncuation_pattern = "|".join(skills_util.PUNCTUATION)
utterance = re.sub(puncuation_pattern, " ", utterance)
return utterance
def _preprocess_chi2(workspace_pd):
"""
Preprocess dataframe for chi2 analysis
:param workspace_pd: Preprocess dataframe for chi2
:return labels: intents processed
:return count_vectorizer: vectorizer instance
:return features: features from transform
"""
stopword_list = skills_util.STOP_WORDS
workspace_pd["utterance_punc_stripped"] = workspace_pd["utterance"].apply(
strip_punctuations
)
count_vectorizer = CountVectorizer(
min_df=1,
encoding="utf-8",
ngram_range=(1, 2),
stop_words=stopword_list,
tokenizer=word_tokenize,
token_pattern="(?u)\b\w+\b",
)
features = count_vectorizer.fit_transform(
workspace_pd["utterance_punc_stripped"]
).toarray()
labels = workspace_pd["intent"]
return labels, count_vectorizer, features
def _compute_chi2_top_feature(
features, labels, vectorizer, cls, significance_level=0.05
):
"""
Perform chi2 analysis, punctuation filtering and deduplication
:param features: count vectorizer features
:param labels: intents processed
:param vectorizer: count vectorizer instances
:param cls: classes for chi square
:param significance_level: specify an alpha
:return deduplicated_unigram:
:return deduplicated_bigram:
"""
features_chi2, pval = chi2(features, labels == cls)
feature_names = np.array(vectorizer.get_feature_names())
features_chi2 = features_chi2[pval < significance_level]
feature_names = feature_names[pval < significance_level]
indices = np.argsort(features_chi2)
feature_names = feature_names[indices]
unigrams = [v.strip() for v in feature_names if len(v.strip().split()) == 1]
deduplicated_unigram = list()
for unigram in unigrams:
if unigram not in deduplicated_unigram:
deduplicated_unigram.append(unigram)
bigrams = [v.strip() for v in feature_names if len(v.strip().split()) == 2]
deduplicated_bigram = list()
for bigram in bigrams:
if bigram not in deduplicated_bigram:
deduplicated_bigram.append(bigram)
return deduplicated_unigram, deduplicated_bigram
def get_chi2_analysis(workspace_pd, significance_level=0.05):
"""
find correlated unigram and bigram of each intent with Chi2 analysis
:param workspace_pd: dataframe, workspace data
:param signficance_level: float, significance value to reject the null hypothesis
:return unigram_intent_dict:
:return bigram_intent_dict:
"""
labels, vectorizer, features = _preprocess_chi2(workspace_pd)
label_frequency_dict = dict(Counter(workspace_pd["intent"]).most_common())
N = 5
# keys are the set of unigrams/bigrams and value will be the intent
# maps one-to-many relationship between unigram and intent,
unigram_intent_dict = dict()
# maps one-to-many relationship between bigram and intent
bigram_intent_dict = dict()
classes = list()
chi_unigrams = list()
chi_bigrams = list()
for cls in label_frequency_dict.keys():
unigrams, bigrams = _compute_chi2_top_feature(
features, labels, vectorizer, cls, significance_level
)
classes.append(cls)
if unigrams:
chi_unigrams.append(", ".join(unigrams[-N:]))
else:
chi_unigrams.append("None")
if bigrams:
chi_bigrams.append(", ".join(bigrams[-N:]))
else:
chi_bigrams.append("None")
if unigrams:
if frozenset(unigrams[-N:]) in unigram_intent_dict:
unigram_intent_dict[frozenset(unigrams[-N:])].append(cls)
else:
unigram_intent_dict[frozenset(unigrams[-N:])] = list()
unigram_intent_dict[frozenset(unigrams[-N:])].append(cls)
if bigrams:
if frozenset(bigrams[-N:]) in bigram_intent_dict:
bigram_intent_dict[frozenset(bigrams[-N:])].append(cls)
else:
bigram_intent_dict[frozenset(bigrams[-N:])] = list()
bigram_intent_dict[frozenset(bigrams[-N:])].append(cls)
chi_df = pd.DataFrame(data={"Intent": classes})
chi_df["Correlated Unigrams"] = chi_unigrams
chi_df["Correlated Bigrams"] = chi_bigrams
display(Markdown(("## Chi-squared Analysis")))
with pd.option_context(
"display.max_rows",
None,
"display.max_columns",
None,
"display.max_colwidth",
100,
):
chi_df.index = np.arange(1, len(chi_df) + 1)
display(chi_df)
return unigram_intent_dict, bigram_intent_dict
def get_confusing_key_terms(keyterm_intent_map):
"""
Greedy search for overlapping intents
:param keyterm_intent_map: correlated terms
:return df: ambiguous terms data frame
"""
ambiguous_intents = list()
ambiguous_keywords = list()
intents_seen = list()
for i in range(len(keyterm_intent_map)):
correlated_unigrams = list(keyterm_intent_map.keys())[i]
current_label = keyterm_intent_map[correlated_unigrams]
intents_seen.append(current_label)
if len(keyterm_intent_map[correlated_unigrams]) > 1:
print(keyterm_intent_map[correlated_unigrams])
print(correlated_unigrams)
for other_correlated_unigrams in keyterm_intent_map.keys():
if keyterm_intent_map[other_correlated_unigrams] in intents_seen:
continue
overlap = correlated_unigrams.intersection(other_correlated_unigrams)
if overlap:
for keyword in overlap:
ambiguous_intents.append(
"<"
+ current_label[0]
+ ", "
+ keyterm_intent_map[other_correlated_unigrams][0]
+ ">"
)
ambiguous_keywords.append(keyword)
df = pd.DataFrame(
data={"Intent Pairs": ambiguous_intents, "Terms": ambiguous_keywords}
)
if not ambiguous_intents:
display(
Markdown("There is no ambiguity based on top 5 key terms in chi2 analysis")
)
else:
display_size = 10
if not df.empty:
if len(df) < display_size:
display_size = len(df)
display(HTML(df.sample(n=display_size).to_html(index=False)))
return df
def chi2_overlap_check(ambiguous_unigram_df, ambiguous_bigram_df, intent1, intent2):
"""
looks for intent overlap for specific intent or intent pairs
:param ambiguous_unigram_df:
:param ambiguous_bigram_df:
:param intent1:
:param intent2:
"""
intent = intent1 + ", " + intent2 + "|" + intent2 + ", " + intent1
part1 = None
part2 = None
if not ambiguous_unigram_df.empty:
part1 = ambiguous_unigram_df[
ambiguous_unigram_df["Intent Pairs"].str.contains(intent)
]
if not ambiguous_bigram_df.empty:
part2 = ambiguous_bigram_df[
ambiguous_bigram_df["Intent Pairs"].str.contains(intent)
]
if part1 is not None and part2 is not None:
display(HTML(pd.concat([part1, part2]).to_html(index=False)))
elif part1 is not None:
display(HTML(part1.to_html(index=False)))
elif part2 is not None:
display(HTML(part2.to_html(index=False)))
| 33.106557 | 87 | 0.658331 | 935 | 8,078 | 5.458824 | 0.217112 | 0.027429 | 0.031348 | 0.02547 | 0.23413 | 0.16732 | 0.087382 | 0.074843 | 0.041144 | 0.041144 | 0 | 0.010079 | 0.250805 | 8,078 | 243 | 88 | 33.242798 | 0.833278 | 0.163159 | 0 | 0.106918 | 0 | 0 | 0.051103 | 0.006996 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037736 | false | 0 | 0.056604 | 0 | 0.125786 | 0.012579 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
adb3276743da54523ef3608b3b71f4fe0fc954cf | 2,101 | py | Python | module/embedding.py | liurenfeng007/DSRE | 7b3b257c68b1991b8b12c817a245af022a5fbeaa | [
"MIT"
] | null | null | null | module/embedding.py | liurenfeng007/DSRE | 7b3b257c68b1991b8b12c817a245af022a5fbeaa | [
"MIT"
] | null | null | null | module/embedding.py | liurenfeng007/DSRE | 7b3b257c68b1991b8b12c817a245af022a5fbeaa | [
"MIT"
] | null | null | null | import torch.nn as nn
import torch
import torch.nn.functional as F
class Embedding(nn.Module):
def __init__(self, opt, data_word_vec):
super(Embedding, self).__init__()
self.opt = opt
self.word_embedding = nn.Embedding(self.opt.vocab_size, self.opt.word_dim)
self.pos1_embedding = nn.Embedding(self.opt.pos_size, self.opt.pos_dim)
self.pos2_embedding = nn.Embedding(self.opt.pos_size, self.opt.pos_dim)
self.init_word_weights(data_word_vec)
self.init_pos_weights()
self.word = None
self.pos1 = None
self.pos2 = None
def init_pos_weights(self):
nn.init.xavier_uniform_(self.pos1_embedding.weight.data)
if self.pos1_embedding.padding_idx is not None:
self.pos1_embedding.weight.data[self.pos1_embedding.padding_idx].fill_(0)
nn.init.xavier_uniform_(self.pos2_embedding.weight.data)
if self.pos2_embedding.padding_idx is not None:
self.pos2_embedding.weight.data[self.pos2_embedding.padding_idx].fill_(0)
def init_word_weights(self, data_word_vec):
self.word_embedding.weight.data.copy_(torch.from_numpy(data_word_vec))
# nn.init.xavier_uniform_(self.word_embedding.weight.data)
def forward(self):
word = self.word_embedding(self.word) # [sent_num, sent_max_length, word_dim]
pos1 = self.pos1_embedding(self.pos1) # [sent_num, sent_max_length, pos_dim]
pos2 = self.pos2_embedding(self.pos2) # [sent_num, sent_max_length, pos_dim]
embedding = torch.cat((word, pos1, pos2), dim=2) # [sent_num, sent_max_length, word_dim+2*pos_dim]
return embedding
if __name__ == '__main__':
from config import get_args
import numpy as np
opt = get_args()
data_word_vec = np.load('F:\LCS\RE\DSRE\data/vec.npy')
model = Embedding(opt, data_word_vec)
model.word = torch.LongTensor([[1, 2, 4, 5], [4, 3, 2, 9]])
model.pos1 = torch.LongTensor([[2, 2, 4, 5], [4, 3, 2, 9]])
model.pos2 = torch.LongTensor([[3, 2, 4, 5], [4, 3, 2, 9]])
out = model()
print(out.shape)
| 38.907407 | 107 | 0.675869 | 320 | 2,101 | 4.153125 | 0.209375 | 0.042137 | 0.049661 | 0.042137 | 0.470278 | 0.227991 | 0.227991 | 0.094808 | 0.076749 | 0.076749 | 0 | 0.029744 | 0.199905 | 2,101 | 53 | 108 | 39.641509 | 0.760857 | 0.102808 | 0 | 0 | 0 | 0 | 0.018637 | 0.014377 | 0 | 0 | 0 | 0 | 0 | 1 | 0.097561 | false | 0 | 0.121951 | 0 | 0.268293 | 0.02439 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
adb3927a1293df7444df7bc6ece622aed9648c83 | 15,579 | py | Python | dbsa/redshift.py | dataleio/dbsa | dcd148a01aa6c025c249373e3a49120ca1b01b59 | [
"MIT"
] | 3 | 2020-07-06T13:58:02.000Z | 2022-02-04T15:35:56.000Z | dbsa/redshift.py | bfaludi/dbsa | dcd148a01aa6c025c249373e3a49120ca1b01b59 | [
"MIT"
] | null | null | null | dbsa/redshift.py | bfaludi/dbsa | dcd148a01aa6c025c249373e3a49120ca1b01b59 | [
"MIT"
] | null | null | null | from . import (
Boolean,
Tinyint,
Smallint,
Integer,
Bigint,
Real,
Double,
Decimal,
Varchar,
Char,
Date,
Timestamp,
Sortkey,
DistributionKey,
DistributionStyle,
cleanup_fn,
Dialect as BaseDialect,
)
from jinja2 import Template
import json
COLUMN_ENCODE = ['BYTEDICT', 'DELTA', 'DELTA32K', 'LZO', 'MOSTLY8', 'MOSTLY16', 'MOSTLY32', 'RAW', 'RUNLENGTH', 'TEXT255', 'TEXT32K', 'ZSTD']
class Table(BaseDialect):
_column_types = {
Boolean: 'BOOLEAN',
Tinyint: 'TINYINT',
Smallint: 'SMALLINT',
Integer: 'INTEGER',
Bigint: 'BIGINT',
Real: 'REAL',
Double: 'FLOAT',
Decimal: 'NUMERIC({{ precision }},{{ scale }})',
Varchar: 'VARCHAR({{ length }})',
Char: 'CHAR({{ length }})',
Date: 'DATE',
Timestamp: 'TIMESTAMP',
}
_req_properties = {
Tinyint: {'encode'},
Smallint: {'encode'},
Integer: {'encode'},
Bigint: {'encode'},
Real: {'encode'},
Double: {'encode'},
Decimal: {'precision', 'scale', 'encode'},
Char: {'length', 'encode'},
Varchar: {'length', 'encode'},
Date: {'encode'},
Timestamp: {'encode'},
Sortkey: {'keys'},
DistributionKey: {'key'},
DistributionStyle: {'style'},
}
_property_types = {
Sortkey: 'SORTKEY({% for c in keys %}"{{ c }}"{% if not loop.last %}, {% endif%}{% endfor %})',
DistributionKey: 'DISTKEY("{{ key }}")',
DistributionStyle: 'DISTSTYLE {{ style }}',
}
_how_to_quote_table = '"{}"'
_how_to_quote_column = '"{}"'
_column_setter = '{} AS {}'
_sample_value_function = 'MAX({c})'
ENCODE=dict(zip(COLUMN_ENCODE, COLUMN_ENCODE))
@property
def jsonpath(self):
return json.dumps({
'jsonpaths': [
c.attrs['jsonpath']
for c in self.table.columns()
if 'jsonpath' in c.attrs
]
})
def get_create_table(self, filter_fn=None, suffix=''):
return Template("""
CREATE TABLE IF NOT EXISTS {{ t.full_table_name(quoted=True, with_prefix=True, suffix=suffix) }} (
{%- for column in t.columns(filter_fn=filter_fn) %}
{{ column.quoted_name }} {{ column.column_type }}{% if column.default_value %} DEFAULT {{ column.default_value }}{% endif %}{% if column.encode %} ENCODE {{ column.encode|upper }}{% endif %}{% if not loop.last %},{% endif %}
{%- endfor %}
)
{%- for property in t.properties %}
{{ property }}
{%- endfor %};
""").render(t=self.table, filter_fn=filter_fn, suffix=suffix)
def get_create_table_as(self, select, embed_select=True, filter_fn=None, suffix=''):
return Template("""
CREATE TABLE IF NOT EXISTS {{ t.full_table_name(quoted=True, with_prefix=True, suffix=suffix) }}
{%- for property in t.properties %}
{{ property }}
{%- endfor %} AS
SELECT
{%- for column_value in t.column_values(filter_fn=filter_fn) %}
{{ column_value }}{% if not loop.last %},{% endif %}
{%- endfor %}
FROM {{ select if not embed_select else '({}) AS vw'.format(select.strip().strip(';')) }};
""").render(t=self.table, select=select, embed_select=embed_select, filter_fn=filter_fn, suffix=suffix)
def get_create_external_table(self, hdfs_path, fileformat, tblformat, tblproperties=None, filter_fn=None, suffix=''):
return Template("""
CREATE EXTERNAL TABLE {{ t.full_table_name(quoted=True, with_prefix=True, suffix=suffix) }} (
{%- for column in t.columns(filter_fn=filter_fn, include_partitions=False) %}
{{ column.quoted_name }} {{ column.column_type }}{% if not loop.last %},{% endif %}
{%- endfor %}
)
{%- if t.partitions %}
PARTITIONED BY (
{%- for partition in t.partitions %}
{{ partition.quoted_name }} {{ partition.column_type }}{% if not loop.last %},{% endif %}
{%- endfor %}
)
{%- endif %}
{{ tblformat }}
STORED AS {{ fileformat }}
LOCATION '{{ hdfs_path }}'
{%- if tblproperties %}
TABLE PROPERTIES ({{ ','.join(tblproperties) }})
{%- endif %}
""").render(t=self.table, filter_fn=filter_fn, suffix=suffix, tblformat=tblformat, fileformat=fileformat, tblproperties=tblproperties, hdfs_path=hdfs_path)
def get_create_staging_table(self, cleanup_fn=cleanup_fn, filter_fn=None, include_partitions=False, suffix=''):
return Template("""
CREATE TABLE IF NOT EXISTS {{ t.full_staging_table_name(cleanup_fn=cleanup_fn, quoted=True, with_prefix=True, suffix=suffix) }} (
{%- for column in t.columns(filter_fn=filter_fn, include_partitions=include_partitions) %}
{{ column.quoted_name }} {{ column.column_type}}{% if column.default_value %} DEFAULT {{ column.default_value }}{% endif %}{% if column.encode %} ENCODE {{ column.encode|upper }}{% endif %}{% if not loop.last %},{% endif %}
{%- endfor %}
);
""").render(t=self.table, cleanup_fn=cleanup_fn, filter_fn=filter_fn, include_partitions=include_partitions, suffix=suffix)
def get_add_external_current_partition(self, hdfs_path=None, condition='', params=None, ignored_partitions=None, suffix=''):
return Template("""
ALTER TABLE {{ t.full_table_name(quoted=True, with_prefix=True, suffix=suffix) }} ADD IF NOT EXISTS PARTITION(
{{ condition }}
) LOCATION '{{ hdfs_path }}'
""").render(
t=self.table,
suffix=suffix,
hdfs_path=hdfs_path,
condition=self.table.get_current_partition_condition(condition, ignored_partitions, sep=', ') \
.format(**self.table.get_current_partition_params(params))
)
def get_delete_external_current_partition(self, condition='', params=None, ignored_partitions=None, suffix=''):
return Template("""
ALTER TABLE {{ t.full_table_name(quoted=True, with_prefix=True, suffix='') }} DROP IF EXISTS PARTITION(
{{ condition }}
)
""").render(
t=self.table,
suffix=suffix,
condition=self.table.get_current_partition_condition(condition, ignored_partitions, sep=', ') \
.format(**self.table.get_current_partition_params(params))
)
def get_drop_table(self, suffix=''):
return Template("""
DROP TABLE IF EXISTS {{ t.full_table_name(quoted=True, with_prefix=True, suffix=suffix) }};
""").render(t=self.table, suffix=suffix)
def get_drop_staging_table(self, suffix=''):
return Template("""
DROP TABLE IF EXISTS {{ t.full_staging_table_name(quoted=True, with_prefix=True, suffix=suffix) }};
""").render(t=self.table, suffix=suffix)
def get_truncate_table(self, suffix=''):
return Template("""
TRUNCATE TABLE {{ t.full_table_name(quoted=True, with_prefix=True, suffix=suffix) }};
""").render(t=self.table, suffix=suffix)
def get_update_current_partition_for_manually_set_columns(self, suffix='', condition='', ignored_partitions=None, params=None):
filter_fn = lambda x: x.manually_set
if not len(self.table.columns(filter_fn=filter_fn, include_partitions=False)):
return ''
return Template("""
UPDATE {{ t.full_table_name(quoted=True, with_prefix=True) }}
SET
{%- for column in t.columns(filter_fn=filter_fn, include_partitions=False) %}
{{ column.quoted_name }} = {{ column.value }}{% if not loop.last %},{% endif %}
{%- endfor %}
{%- if condition %}
WHERE {{ condition }}
{%- endif %}
""").render(t=self.table, suffix=suffix, filter_fn=filter_fn,
condition=self.table.get_current_partition_condition(condition, ignored_partitions) \
.format(**self.table.get_current_partition_params(params)))
def get_copy_to_staging(self, cleanup_fn=cleanup_fn, filter_fn=None, include_partitions=False, suffix=''):
return Template("""
COPY {{ t.full_staging_table_name(cleanup_fn=cleanup_fn, quoted=True, with_prefix=True, suffix=suffix) }} (
{%- for column in t.columns(filter_fn=filter_fn, include_partitions=include_partitions) %}
{{ column.quoted_name }}{% if not loop.last %},{% endif %}
{%- endfor %}
)
{% raw %}
FROM '{{ '{{ path_prefix }}://{{ path }}' }}'
{{ '{% if access_key and secret_key %}' }}
WITH CREDENTIALS '{{ 'aws_access_key_id={{ access_key }};aws_secret_access_key={{ secret_key }}' }}'
{{ '{% else %}' }}
IAM_ROLE '{{ '{{ iam_role }}' }}'
{{ '{% endif %}' }}
{{ '{{ copy_options }}' }}
{% endraw %};
""").render(t=self.table, cleanup_fn=cleanup_fn, filter_fn=filter_fn, include_partitions=include_partitions, suffix=suffix)
def get_select(self, filter_fn=None, suffix='', condition='', order_by_sortkey=False, use_star=False, transforms=None, limit=None):
sortkey = self.table.get_property_by_type(Sortkey) \
if order_by_sortkey \
else None
return Template("""
SELECT
{%- if use_star %}
*
{%- else %}
{%- for column in t.columns(filter_fn=filter_fn) %}
{% if tf[column.name] %}{{ tf[column.name].format(c=column.quoted_name) }} AS {{ column.quoted_name }}{% else %}{{ column.quoted_name }}{% endif %}{% if not loop.last %},{% endif %}
{%- endfor %}
{%- endif %}
FROM {{ t.full_table_name(quoted=True, with_prefix=True, suffix=suffix) }}
{%- if condition %}
WHERE {{ condition }}
{%- endif %}
{%- if sortkey %}
ORDER BY {% for c in sortkey.attrs['keys'] %}"{{ c }}"{% if not loop.last %}, {% endif %}{% endfor %}
{%- endif %}
{%- if limit %}
LIMIT {{ limit }}
{%- endif %}
""").render(t=self.table, limit=limit, filter_fn=filter_fn, suffix=suffix, condition=condition, sortkey=sortkey, use_star=use_star, tf=transforms or {})
def get_unload_table(self, filter_fn=None):
return self.get_unload_via_select(select=self.get_select(filter_fn))
@classmethod
def get_unload_via_select(cls, select):
return Template(Template("""
UNLOAD ('
{{ select }}
')
TO '{{ 's3://{{ s3_bucket }}/{{ s3_key }}' }}'
{{ '{% if access_key and secret_key %}' }}
WITH CREDENTIALS '{{ 'aws_access_key_id={{ access_key }};aws_secret_access_key={{ secret_key }}' }}'
{{ '{% else %}' }}
IAM_ROLE '{{ '{{ iam_role }}' }}'
{{ '{% endif %}' }}
{{ '{{ unload_options }}' }};
""").render(select=select.strip().strip(';').translate(str.maketrans({"'": r"\'"}))))
def get_delete_from(self, condition=None, params=None, using=None, suffix=''):
r = Template("""
DELETE FROM {{ t.full_table_name(quoted=True, with_prefix=True, suffix=suffix) }}
{%- if using %}
USING {{ using }} AS u
{%- endif %}
{%- if condition %}
WHERE {{ condition }}
{%- endif %};
""").render(t=self.table, condition=condition, using=using, suffix=suffix)
if params:
return r.format(**(params or {}))
return r
def get_delete_upsert(self, pk_columns, cleanup_fn=cleanup_fn, using=None, params=None, suffix=''):
table = self.table.full_table_name(quoted=True, with_prefix=True, suffix=suffix)
if not using:
using = self.table.full_staging_table_name(cleanup_fn=cleanup_fn, quoted=True, with_prefix=True, suffix=suffix)
condition = ' AND '.join(
'u."{c}" = {table}."{c}"'.format(c=c, table=table)
for c in pk_columns
)
return self.get_delete_from(condition, using=using, params=params, suffix=suffix)
def get_insert_into_from_table(self, source_table_name, filter_fn=None, suffix=''):
return self.get_insert_into_via_select(select=source_table_name, filter_fn=filter_fn, embed_select=False, suffix=suffix)
def get_insert_into_via_select(self, select, filter_fn=None, embed_select=True, suffix=''):
return Template("""
INSERT INTO {{ t.full_table_name(quoted=True, with_prefix=True, suffix=suffix) }} (
{%- for column in t.columns(filter_fn=filter_fn) %}
{{ column.quoted_name }}{% if not loop.last %},{% endif %}
{%- endfor %}
)
SELECT
{%- for column_value in t.column_values(filter_fn=filter_fn) %}
{{ column_value }}{% if not loop.last %},{% endif %}
{%- endfor %}
FROM {{ select if not embed_select else '({}) AS vw'.format(select.strip().strip(';')) }};
""").render(t=self.table, select=select, embed_select=embed_select, filter_fn=filter_fn, suffix=suffix)
def get_drop_current_partition_view(self, suffix='_latest'):
return Template("""
DROP VIEW IF EXISTS {{ t.full_table_name(quoted=True, with_prefix=True, suffix=suffix) }};
""").render(t=self.table, suffix=suffix)
def get_create_current_partition_view(self, suffix='_latest', condition='', ignored_partitions=None, params=None, transforms=None):
return Template("""
CREATE OR REPLACE VIEW {{ t.full_table_name(quoted=True, with_prefix=True, suffix=suffix) }} AS
{{ select }};
""").render(
t=self.table,
select=self.get_select_current_partition(condition=condition, ignored_partitions=ignored_partitions, params=params, transforms=transforms),
suffix=suffix,
)
def get_create_materialized_view_via_select(self, select, filter_fn=None, embed_select=True, suffix=''):
return Template("""
CREATE MATERIALIZED VIEW {{ t.full_table_name(quoted=True, with_prefix=True, suffix=suffix) }}
{%- for property in t.properties %}
{{ property }}
{%- endfor %} AS
SELECT
{%- for column_value in t.column_values(filter_fn=filter_fn) %}
{{ column_value }}{% if not loop.last %},{% endif %}
{%- endfor %}
FROM {{ select if not embed_select else '({}) AS vw'.format(select.strip().strip(';')) }};
""").render(t=self.table, select=select, embed_select=embed_select, filter_fn=filter_fn, suffix=suffix)
def get_drop_materialized_view(self, suffix=''):
return Template("""
DROP MATERIALIZED VIEW {{ t.full_table_name(quoted=True, with_prefix=True, suffix=suffix) }};
""").render(t=self.table, suffix=suffix)
def get_refresh_materialized_view(self, suffix=''):
return Template("""
REFRESH MATERIALIZED VIEW {{ t.full_table_name(quoted=True, with_prefix=True, suffix=suffix) }};
""").render(t=self.table, suffix=suffix)
| 46.783784 | 238 | 0.578086 | 1,713 | 15,579 | 5.033859 | 0.104495 | 0.050099 | 0.028992 | 0.048707 | 0.651861 | 0.619506 | 0.568248 | 0.5541 | 0.530906 | 0.504117 | 0 | 0.001406 | 0.269465 | 15,579 | 332 | 239 | 46.924699 | 0.75626 | 0 | 0 | 0.36755 | 0 | 0.062914 | 0.518133 | 0.090956 | 0 | 0 | 0 | 0 | 0 | 1 | 0.07947 | false | 0 | 0.009934 | 0.066225 | 0.205298 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
adb658dad45a81d9ded998d77bf22e4ee81c82a0 | 24,320 | py | Python | src/word_senser.py | glicerico/wordcat_transformer | f1b2f105c0878aeac5755003eafebd23edf49cad | [
"Apache-2.0"
] | null | null | null | src/word_senser.py | glicerico/wordcat_transformer | f1b2f105c0878aeac5755003eafebd23edf49cad | [
"Apache-2.0"
] | null | null | null | src/word_senser.py | glicerico/wordcat_transformer | f1b2f105c0878aeac5755003eafebd23edf49cad | [
"Apache-2.0"
] | null | null | null | # Based on the code by Wiedemann et al. (2019, github.com/uhh-lt/bert-sense), and
# modified for unsupervised word-sense disambiguation purposes
# Tries to disambiguate words from sentences in plain text file.
# Similar code that works with xml file sentences is tried in word_senser_XML.py
import os
import pickle
import argparse
import numpy as np
import random as rand
from sklearn.cluster import KMeans, DBSCAN, OPTICS
from sklearn.preprocessing import normalize
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from spherecluster import SphericalKMeans, VonMisesFisherMixture
import matplotlib.pyplot as plt
from tqdm import tqdm
import warnings
from BertModel import BertLM, BertTok
warnings.filterwarnings('ignore')
MASK = '[MASK]'
class WordSenseModel:
def __init__(self, pretrained_model, device_number='cuda:1', use_cuda=True, freq_threshold=5):
self.sentences = [] # List of corpus textual sentences
self.vocab_map = dict() # Dictionary with counts and coordinates of every occurrence of each word
self.function_words = dict() # List with function words (most frequent)
self.cluster_centroids = dict() # Dictionary with cluster centroid embeddings for word senses
self.matrix = [] # sentence-word matrix, containing instance vectors to cluster
self.pretrained_model = pretrained_model
self.device_number = device_number
self.use_cuda = use_cuda
self.lang_mod = None
self.estimator = None # Clustering object
self.save_dir = None # Directory to save disambiguated senses
self.freq_threshold = freq_threshold
def apply_bert_tokenizer(self, word):
return self.lang_mod.tokenizer.tokenize(word)
def load_matrix(self, pickle_filename, corpus_file, verbose=False, norm_pickle=None, norm_file=''):
"""
First pass on the corpus sentences. If pickle file is present, load data; else, calculate it.
This method:
a) Stores sentences as an array.
b) Creates dictionary where each vocabulary word is mapped to its occurrences in corpus.
c) Calculates instance-word matrix, for instances and vocab words in corpus.
:param norm_file:
:param norm_pickle:
:param verbose:
:param pickle_filename
:param corpus_file
"""
try:
with open(pickle_filename, 'rb') as h:
_data = pickle.load(h)
self.sentences = _data[0]
self.vocab_map = _data[1]
self.matrix = _data[2]
print("MATRIX FOUND!")
# Load tokenizer, needed by export_clusters method
self.lang_mod = BertTok(self.pretrained_model)
except:
print("MATRIX File Not Found!! \n")
print("Loading Bert MLM...")
self.lang_mod = BertLM(self.pretrained_model, self.device_number, self.use_cuda)
# Calculate normalization scores
self.lang_mod.load_norm_scores(norm_pickle, norm_file)
print("Loading vocabulary")
self.get_vocabulary(corpus_file, verbose=verbose)
print("Calculate matrix...")
self.calculate_matrix(verbose=verbose)
with open(pickle_filename, 'wb') as h:
_data = (self.sentences, self.vocab_map, self.matrix)
pickle.dump(_data, h)
print("Data stored in " + pickle_filename)
def get_words(self, tokenized_sent):
"""
Returns the complete words in a BERT-tokenized sentence (merges sub-words)
:param tokenized_sent:
:return:
"""
sentence = self.lang_mod.tokenizer.convert_tokens_to_string(tokenized_sent[1:-1]) # Ignore boundary tokens
return sentence.split()
def find_function_words(self, functional_threshold):
"""
Find top words from vocabulary, assuming that most common words are functional words,
which we don't want to disambiguate
:param functional_threshold: Fraction of words to remove
"""
sorted_vocab = sorted(self.vocab_map.items(), key=lambda kv: len(kv[1])) # Sort words by frequency
nbr_functionwords= int(len(sorted_vocab) * functional_threshold) # Nbr of function words
if nbr_functionwords > 0: # Prevent choosing all words if nbr_functionwords is zero
self.function_words = dict(sorted_vocab[-nbr_functionwords:]) # List most common words
def get_vocabulary(self, corpus_file, verbose=False):
"""
Reads all word instances in file, stores their location
:param verbose:
:param corpus_file: file to get vocabulary
"""
with open(corpus_file, 'r') as fi:
instance_nbr = 0
# Process each sentence in corpus
for sent_nbr, sent in tqdm(enumerate(fi)):
bert_tokens = self.lang_mod.tokenize_sent(sent)
words = self.get_words(bert_tokens)
self.sentences.append(words)
# Store word instances in vocab_map
for word_pos, word in enumerate(words):
if word not in self.vocab_map:
self.vocab_map[word] = []
# TODO: Can avoid storing coordinates if not CAPS target word in export_clusters
self.vocab_map[word].append((sent_nbr, word_pos, instance_nbr)) # Register instance location
instance_nbr += 1
if verbose:
print("Vocabulary:")
print(self.vocab_map)
print(f"Vocabulary size: {len(self.vocab_map)}")
def calculate_matrix(self, verbose=False):
"""
Calculates embeddings for all word instances in corpus_file
"""
instances = {} # Stores matrix indexes for each instance embedding
embeddings_count = 0 # Counts embeddings created (matrix row nbr)
# Process each sentence in corpus
for words in tqdm(self.sentences):
print(f"Processing sentence: {words}")
bert_tokens = self.lang_mod.tokenize_sent(" ".join(words))
word_starts = [index for index, token in enumerate(bert_tokens) if not token.startswith("##")]
# Replace all words in sentence to get their instance-embeddings
for word_pos, word in tqdm(enumerate(words)):
print(f"Processing {word} (position {word_pos}) with all vocabulary.")
if word not in instances:
instances[word] = []
instances[word].append(embeddings_count)
embeddings_count += 1
embedding = [] # Store one word instance (sentence with blank) embedding
# Calculate common part of sentence probability steps for all words to fill
# Will only be used when replacement word is composed of one token, otherwise, we need to do the
# whole calculation
left_sent = bert_tokens[:word_starts[word_pos + 1]]
right_sent = bert_tokens[word_starts[word_pos + 2]:]
common_probs = self.get_common_probs(left_sent, right_sent, verbose=verbose)
# Calculate sentence's probabilities with different filling words: embedding
for repl_word in self.vocab_map.keys():
word_tokens = self.lang_mod.tokenizer.tokenize(repl_word)
if len(word_tokens) > 1: # Ignore common probs; do whole calculation
replaced_sent = left_sent + word_tokens + right_sent
score = self.lang_mod.get_sentence_prob_directional(replaced_sent, verbose=verbose)
sent_len = len(replaced_sent)
else:
score = self.complete_probs(common_probs, left_sent, right_sent, repl_word)
sent_len = len(left_sent) + len(right_sent) + 1
# curr_prob = self.lang_mod.normalize_score(sent_len, score)
# embedding.append(curr_prob)
embedding.append(score)
# Store this sentence embeddings in the general list
self.matrix.append(normalize([embedding])[0]) # Store embedding normalized to unit vector
def complete_probs(self, common_probs, left_sent, right_sent, word_token, verbose=False):
"""
Given the common probability calculations for a sentence, complete calculations filling blank with word_tokens
"""
preds_blank_left, preds_blank_right, log_sent_prob_forw, log_sent_prob_back = common_probs
temp_left = left_sent[:]
temp_right = right_sent[:]
# Get probabilities for word filling the blank: b) and g)
log_sent_prob_forw += self.get_log_prob(preds_blank_left, word_token, len(left_sent), verbose=verbose)
log_sent_prob_back += self.get_log_prob(preds_blank_right, word_token, len(left_sent), verbose=verbose)
# Get remaining probs with blank filled: c), d), and h)
for i in range(1, len(right_sent)): # d), c)
temp_right[-1 - i] = MASK
repl_sent = left_sent + [word_token] + temp_right
predictions = self.lang_mod.get_predictions(repl_sent)
log_sent_prob_forw += self.get_log_prob(predictions, right_sent[-1 - i], -1 - i, verbose=verbose)
for j in range(len(left_sent) - 1): # h)
temp_left[1 + j] = MASK
repl_sent = temp_left + [word_token] + right_sent
predictions = self.lang_mod.get_predictions(repl_sent)
log_sent_prob_back += self.get_log_prob(predictions, left_sent[1 + j], 1 + j, verbose=verbose)
# Obtain geometric average of forward and backward probs
log_geom_mean_sent_prob = 0.5 * (log_sent_prob_forw + log_sent_prob_back)
if verbose:
print(f"Raw forward sentence probability: {log_sent_prob_forw}")
print(f"Raw backward sentence probability: {log_sent_prob_back}\n")
print(f"Average normalized sentence prob: {log_geom_mean_sent_prob}\n")
return np.power(10, log_geom_mean_sent_prob)
def get_log_prob(self, predictions, token, position, verbose=False):
"""
Given BERT's predictions, return probability for required token, in required position
"""
probs_first = self.lang_mod.sm(predictions[0, position]) # Softmax to get probabilities for first (sub)word
if verbose:
self.lang_mod.print_top_predictions(probs_first)
log_prob_first = probs_first[self.lang_mod.tokenizer.convert_tokens_to_ids(token)]
return np.log10(log_prob_first.detach().cpu().numpy())
def get_common_probs(self, left_sent, right_sent, verbose=False):
"""
Calculate partial forward and backwards probabilities of sentence probability estimation, for
the sections that are common to all iterations of a fill-in-the-blank process.
Example sentence: "Not ___ real sentence". We need probabilities:
FORWARD:
a) P(M1 = Not |M1 M2 M3 M4)
b) P(M2 = ___ |Not M2 M3 M4)
c) P(M3 = real |Not ___ M3 M4)
d) P(M4 = sentence |Not ___ real M4)
BACKWARD:
e) P(M4 = sentence |M1 M2 M3 M4)
f) P(M3 = real |M1 M2 M3 sentence)
g) P(M2 = ___ |M1 M2 real sentence)
h) P(M1 = Not |M1 ___ real sentence)
:param left_sent: Tokens before the blank
:param right_sent: Tokens after the blank
:param verbose:
:return: log10(a)) as log_common_prob_forw,
log10(e) * f)) as log_common_prob_back,
The whole vocabulary prediction array for both b) and g), to be used later by all
words filling the blank.
"""
masks_left = ['[CLS]'] + [MASK] * (len(left_sent) - 1)
masks_right = [MASK] * (len(right_sent) - 1) + ['[SEP]']
temp_left = left_sent[:]
temp_right = right_sent[:]
log_common_prob_forw = 0
log_common_prob_back = 0
# Estimate a) and e) if they are not the position of the blank
repl_sent = masks_left + [MASK] + masks_right # Fully masked sentence
predictions = self.lang_mod.get_predictions(repl_sent)
if len(left_sent) > 1:
log_common_prob_forw += self.get_log_prob(predictions, left_sent[1], 1, verbose=verbose)
if len(right_sent) > 1:
log_common_prob_back += self.get_log_prob(predictions, right_sent[-2], len(repl_sent) - 2, verbose=verbose)
# Get all predictions for b)
repl_sent = left_sent + [MASK] + masks_right
preds_blank_left = self.lang_mod.get_predictions(repl_sent)
# Get all predictions for g)
repl_sent = masks_left + [MASK] + right_sent
preds_blank_right = self.lang_mod.get_predictions(repl_sent)
# Estimate common probs for forward sentence probability
for i in range(1, len(left_sent) - 1): # Skip [CLS] token
temp_left[-i] = MASK
repl_sent = temp_left + [MASK] + masks_right
predictions = self.lang_mod.get_predictions(repl_sent)
log_common_prob_forw += self.get_log_prob(predictions, left_sent[-i], len(left_sent) - i, verbose=verbose)
# Estimate common probs for backwards sentence probability (f in the example)
for j in range(len(right_sent) - 2):
temp_right[j] = MASK
repl_sent = masks_left + [MASK] + temp_right
predictions = self.lang_mod.get_predictions(repl_sent)
log_common_prob_back += self.get_log_prob(predictions, right_sent[j], len(left_sent) + 1 + j,
verbose=verbose)
return preds_blank_left, preds_blank_right, log_common_prob_forw, log_common_prob_back
@staticmethod
def plot_instances(embeddings, labels, word):
"""
Plot word-instance embeddings
:param embeddings:
:param labels:
:param word:
:return:
"""
# PCA processing
comps_pca = min(3, len(embeddings))
pca = PCA(n_components=comps_pca)
pca_result = pca.fit_transform(embeddings)
print('Explained variation per principal component: {}'.format(pca.explained_variance_ratio_))
# t-SNE processing
tsne = TSNE(n_components=2, verbose=1, perplexity=40, n_iter=300)
tsne_results = tsne.fit_transform(embeddings)
# PLOTTING
plt.figure()
plt.subplot(211)
plt.scatter(pca_result[:, 0], pca_result[:, 1], c=labels)
plt.title(word)
plt.subplot(212)
plt.scatter(tsne_results[:, 0], tsne_results[:, 1], c=labels)
plt.show()
print("PLOTTED")
def init_estimator(self, save_to, clust_method='OPTICS', **kwargs):
if clust_method == 'OPTICS':
min_samples = kwargs.get('min_samples', 1)
# Init clustering object
self.estimator = OPTICS(min_samples=min_samples, metric='cosine', n_jobs=4)
self.save_dir = save_to + "_OPTICS_minsamp" + str(min_samples)
elif clust_method == 'KMeans':
k = kwargs.get('k', 5) # 5 is default value, if no kwargs were passed
self.freq_threshold = max(self.freq_threshold, k)
self.estimator = KMeans(init="k-means++", n_clusters=k, n_jobs=4)
self.save_dir = save_to + "_KMeans_k" + str(k)
elif clust_method == 'DBSCAN':
min_samples = kwargs.get('min_samples', 2)
eps = kwargs.get('eps', 0.3)
self.estimator = DBSCAN(metric='cosine', n_jobs=4, min_samples=min_samples, eps=eps)
self.save_dir = save_to + "_DBSCAN_minsamp" + str(min_samples) + '_eps' + str(eps)
elif clust_method == 'SphericalKMeans':
k = kwargs.get('k', 5) # 5 is default value, if no kwargs were passed
self.freq_threshold = max(self.freq_threshold, k)
self.estimator = SphericalKMeans(n_clusters=k, n_jobs=4)
self.save_dir = save_to + "_SphericalKMeans_k" + str(k)
elif clust_method == 'movMF-soft':
k = kwargs.get('k', 5) # 5 is default value, if no kwargs were passed
self.freq_threshold = max(self.freq_threshold, k)
self.estimator = VonMisesFisherMixture(n_clusters=k, posterior_type="soft")
self.save_dir = save_to + "_movMF-soft_k" + str(k)
elif clust_method == 'movMF-hard':
k = kwargs.get('k', 5) # 5 is default value, if no kwargs were passed
self.freq_threshold = max(self.freq_threshold, k)
self.estimator = VonMisesFisherMixture(n_clusters=k, posterior_type="hard")
self.save_dir = save_to + "_movMF-hard_k" + str(k)
else:
print("Clustering methods implemented are: OPTICS, DBSCAN, KMeans, SphericalKMeans, movMF-soft, movMF-hard")
exit(1)
def disambiguate(self, pickle_cent='test_cent.pickle', plot=False):
"""
Disambiguate word senses through clustering their transformer embeddings.
Clustering is done using the sklearn algorithm selected in init_estimator()
:param pickle_cent:
:param plot: Flag to plot 2D projection of word instance embeddings
"""
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
fl = open(self.save_dir + "/clustering.log", 'w') # Logging file
fl.write(f"# WORD\t\tCLUSTERS\n")
# Loop for each word in vocabulary
for word, instances in self.vocab_map.items():
self.cluster_centroids[word] = [0] # Placeholder for non-ambiguous words
if word in self.function_words.keys(): # Don't disambiguate if function word
print(f"Won't disambiguate word \"{word}\": too frequent (function word)")
continue
# Build embeddings list for this word
curr_embeddings = [self.matrix[row] for _, _, row in instances]
# curr_embeddings = normalize(curr_embeddings) # Make unit vectors
if len(curr_embeddings) < self.freq_threshold: # Don't disambiguate if word is infrequent
print(f"Won't disambiguate word \"{word}\": frequency is lower than threshold")
continue
print(f'Disambiguating word \"{word}\"...')
self.estimator.fit(curr_embeddings) # Disambiguate
if plot:
self.plot_instances(curr_embeddings, self.estimator.labels_, word)
curr_centroids = self.export_clusters(fl, word, self.estimator.labels_)
self.cluster_centroids[word] = curr_centroids
with open(pickle_cent, 'wb') as h:
pickle.dump(self.cluster_centroids, h)
print("Cluster centroids stored in " + pickle_cent)
fl.write("\n")
fl.close()
def export_clusters(self, fl, word, labels):
"""
Write clustering results to files
:param fl: handle for logging file
:param word: Current word to disambiguate
:param labels: Cluster labels for each word instance
"""
sense_centroids = [] # List with word sense centroids
num_clusters = max(labels) + 1
print(f"Num clusters: {num_clusters}")
fl.write(f"{word}\t\t{num_clusters}\n")
# Write senses to file, with some sentence examples
with open(self.save_dir + '/' + word + ".disamb", "w") as fo:
for i in range(-1, num_clusters): # Also write unclustered words
sense_members = [self.vocab_map[word][j] for j, k in enumerate(labels) if k == i]
fo.write(f"Cluster #{i}")
if len(sense_members) > 0: # Handle empty clusters
fo.write(": \n[")
np.savetxt(fo, sense_members, fmt="(%s, %s, %s)", newline=", ")
fo.write(']\n')
# Write at most 3 sentence examples for the word sense
sent_samples = rand.sample(sense_members, min(len(sense_members), 3))
fo.write('Samples:\n')
# Write sample sentences to file, with focus word in CAPS for easier reading
for sample, focus_word, _ in sent_samples:
bold_sent = self.sentences[sample]
bold_sent[focus_word] = bold_sent[focus_word].upper()
fo.write(" ".join(bold_sent) + '\n')
# Calculate cluster centroid and save
if i >= 0: # Don't calculate centroid for unclustered (noise) instances
sense_embeddings = [self.matrix[row] for _, _, row in sense_members]
# Average and normalize centroid
sense_centroids.append(normalize([np.mean(sense_embeddings, 0)])[0])
else:
fo.write(" is empty\n\n")
return sense_centroids
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='WSD using BERT')
parser.add_argument('--use_cuda', action='store_true', help='Use GPU?')
parser.add_argument('--device', type=str, default='cuda:2', help='GPU Device to Use?')
parser.add_argument('--corpus', type=str, required=True, help='Training Corpus')
parser.add_argument('--threshold', type=int, default=2, help='Min freq of word to be disambiguated')
parser.add_argument('--func_frac', type=float, default=0.05, help='Top fraction of words considered functional')
parser.add_argument('--start_k', type=int, default=10, help='First number of clusters to use in KMeans')
parser.add_argument('--end_k', type=int, default=10, help='Final number of clusters to use in KMeans')
parser.add_argument('--step_k', type=int, default=1, help='Increase in number of clusters to use')
parser.add_argument('--save_to', type=str, default='test', help='Directory to save disambiguated words')
parser.add_argument('--pretrained', type=str, default='bert-large-uncased', help='Pretrained model to use')
parser.add_argument('--clustering', type=str, default='SphericalKmeans', help='Clustering method to use')
parser.add_argument('--pickle_cent', type=str, default='test_cent.pickle', help='Pickle file for cluster centroids')
parser.add_argument('--verbose', action='store_true', help='Print processing details')
parser.add_argument('--plot', action='store_true', help='Plot word embeddings?')
parser.add_argument('--pickle_emb', type=str, default='test.pickle', help='Pickle file for Embeddings/Save '
'Embeddings to file')
parser.add_argument('--norm_file', type=str, default='', help='Sentences file to use for normalization')
parser.add_argument('--norm_pickle', type=str, default='test.pickle', help='Pickle file to use for normalization')
args = parser.parse_args()
print("Corpus is: " + args.corpus)
if args.use_cuda:
print("Processing with CUDA!")
else:
print("Processing without CUDA!")
WSD = WordSenseModel(pretrained_model=args.pretrained, device_number=args.device, use_cuda=args.use_cuda,
freq_threshold=args.threshold)
print("Obtaining word embeddings...")
WSD.load_matrix(args.pickle_emb, args.corpus, verbose=args.verbose, norm_pickle=args.norm_pickle,
norm_file=args.norm_file)
# Find most frequent words to not disambiguate them
print(f"Finding the top {args.func_frac} fraction of words")
WSD.find_function_words(args.func_frac)
print("Start disambiguation...")
for nn in range(args.start_k, args.end_k + 1, args.step_k):
WSD.init_estimator(args.save_to, clust_method=args.clustering, k=nn)
WSD.disambiguate(pickle_cent=args.pickle_cent, plot=args.plot)
print("\n\n*******************************************************")
print(f"WSD finished. Output files written in {args.save_to}")
| 49.531568 | 120 | 0.627015 | 3,075 | 24,320 | 4.765528 | 0.162276 | 0.012556 | 0.015764 | 0.007643 | 0.21646 | 0.163095 | 0.138665 | 0.098744 | 0.072403 | 0.072403 | 0 | 0.007891 | 0.27574 | 24,320 | 490 | 121 | 49.632653 | 0.82406 | 0.236801 | 0 | 0.086379 | 0 | 0 | 0.126566 | 0.008669 | 0 | 0 | 0 | 0.002041 | 0 | 1 | 0.046512 | false | 0 | 0.046512 | 0.003322 | 0.116279 | 0.10299 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
adb724ad1d9b3b184949b2b361524a1aea37ecd1 | 2,419 | py | Python | datasets/cifar10.py | cnc-ood/cnc_ood | a149fa22ea32e14e977c893f2ce524ad8e770cf4 | [
"MIT"
] | null | null | null | datasets/cifar10.py | cnc-ood/cnc_ood | a149fa22ea32e14e977c893f2ce524ad8e770cf4 | [
"MIT"
] | null | null | null | datasets/cifar10.py | cnc-ood/cnc_ood | a149fa22ea32e14e977c893f2ce524ad8e770cf4 | [
"MIT"
] | null | null | null | import torch
from torchvision import datasets
from torchvision import transforms
from torch.utils import data
import logging
from augmentations import aug_dict
transform_pre = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.RandomRotation(15),
transforms.ToTensor()
])
transform_post = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
# train set = 45000
# val set = 5000
# test set = 10000
def get_train_valid_test_loader(args):
train_set = datasets.CIFAR10(root='/home/jatin/scratch/Data/', train=True, download=True, transform=transform_train)
# Define augmentations
if args.aug == "cutmix":
train_set_corr = datasets.CIFAR10(root='/home/jatin/scratch/Data/', train=True, download=True, transform=transform_train)
else:
train_set_corr = datasets.CIFAR10(root='/home/jatin/scratch/Data/', train=True, download=True, transform=transform_pre)
logging.info("workers being used : {}".format(args.workers))
train_loader = data.DataLoader(train_set, batch_size=args.train_batch_size, num_workers=args.workers, shuffle=True)
corr_loader = data.DataLoader(train_set_corr, batch_size=args.train_batch_size, num_workers=args.workers, shuffle=True, collate_fn=aug_dict[args.aug])
test_set = datasets.CIFAR10(root='/home/jatin/scratch/Data/', train=False, download=True, transform=transform_test)
test_loader = data.DataLoader(test_set, batch_size=args.test_batch_size, num_workers=args.workers, drop_last=False)
return train_loader, corr_loader, test_loader
def get_datasets(args):
trainset = datasets.CIFAR10(root='/home/jatin/scratch/Data/', train=True, download=True, transform=None)
testset = datasets.CIFAR10(root='/home/jatin/scratch/Data/', train=False, download=True, transform=None)
return trainset, testset
def get_transforms():
return transform_train, transform_test | 37.796875 | 154 | 0.745349 | 316 | 2,419 | 5.553797 | 0.240506 | 0.02735 | 0.064957 | 0.078632 | 0.62906 | 0.597151 | 0.580057 | 0.580057 | 0.580057 | 0.576638 | 0 | 0.058601 | 0.125258 | 2,419 | 64 | 155 | 37.796875 | 0.770794 | 0.028938 | 0 | 0.311111 | 0 | 0 | 0.076333 | 0.063966 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.133333 | 0.022222 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
adb992ce66bb549841b24b0492b73ccbf41ab32d | 1,344 | py | Python | wait.py | Spico197/DocEE | d6b585e29e5908b891e765066b96ff7642587e5a | [
"MIT"
] | 90 | 2021-12-14T02:13:51.000Z | 2022-03-28T09:37:50.000Z | wait.py | Spico197/DocEE | d6b585e29e5908b891e765066b96ff7642587e5a | [
"MIT"
] | 24 | 2021-12-15T07:22:15.000Z | 2022-03-30T09:38:27.000Z | wait.py | Spico197/DocEE | d6b585e29e5908b891e765066b96ff7642587e5a | [
"MIT"
] | 22 | 2021-12-14T03:05:25.000Z | 2022-03-30T00:47:25.000Z | import argparse
import random
import string
import sys
from watchmen import WatchClient
def parse_args(in_args=None):
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("--task_name", type=str, required=True, help="Take Name")
arg_parser.add_argument("--cuda", type=str, required=True, help="cuda to be waited")
arg_parser.add_argument(
"--req_gpu_num",
type=int,
required=False,
default=1,
help="request number of gpus",
)
arg_parser.add_argument(
"--wait",
choices=["schedule", "queue", "none"],
default="none",
help="scheduling/queue wait",
)
arg_info = arg_parser.parse_args(args=in_args)
return arg_info
if __name__ == "__main__":
in_argv = parse_args()
if in_argv.wait == "none":
sys.exit(0)
random_id = "-" + "".join(random.sample(string.ascii_letters + string.digits, 8))
exp_id = in_argv.task_name + random_id
watch_client = WatchClient(
id=exp_id,
gpus=eval(f"[{in_argv.cuda}]"),
server_host="localhost",
server_port=62333,
req_gpu_num=in_argv.req_gpu_num,
mode=in_argv.wait,
timeout=60,
)
available_gpus = watch_client.wait()
available_gpus = [str(x) for x in available_gpus]
print(",".join(available_gpus))
| 28 | 88 | 0.637649 | 178 | 1,344 | 4.522472 | 0.432584 | 0.067081 | 0.059627 | 0.099379 | 0.057143 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009699 | 0.232887 | 1,344 | 47 | 89 | 28.595745 | 0.771096 | 0 | 0 | 0.047619 | 0 | 0 | 0.122768 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02381 | false | 0 | 0.119048 | 0 | 0.166667 | 0.02381 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
adbc775ffef9124c8d2dd228661309d6d57c1369 | 3,617 | py | Python | kolmogorov_methods.py | googleinterns/invobs-data-assimilation | 36e0ff6319a596d99d6f4197bff0f00a38d299c4 | [
"Apache-2.0"
] | 16 | 2021-07-05T08:09:43.000Z | 2022-03-21T19:12:06.000Z | kolmogorov_methods.py | googleinterns/invobs-data-assimilation | 36e0ff6319a596d99d6f4197bff0f00a38d299c4 | [
"Apache-2.0"
] | null | null | null | kolmogorov_methods.py | googleinterns/invobs-data-assimilation | 36e0ff6319a596d99d6f4197bff0f00a38d299c4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import typing
from typing import Union, Tuple, Callable, NewType
import numpy as np
import jax
import jax.numpy as jnp
import flax.nn as nn
from dynamical_system import KolmogorovFlow
from util import aa_tuple_to_jnp, jnp_to_aa_tuple
Array = Union[np.ndarray, jnp.ndarray]
PrngKey = NewType('PrngKey', jnp.ndarray)
def generate_data_kolmogorov(
prng_key: PrngKey,
dyn_sys: KolmogorovFlow,
num_samples: int,
num_time_steps: int,
num_warmup_steps: int,
) -> Tuple[Array, Array, Array, Array]:
"""
Generates data for the Kolmogorov Flow model.
Args:
prng_key: key for random number generation.
dyn_sys: KolmogorovFlow dynamical system.
num_samples: number of independent samples to generate.
num_times_steps: number of snapshots to generate; the number of inner
integration steps is specified with the dynamical system instance.
num_warmup_steps: number of warmup steps.
Returns:
X0: initial state after warmup.
X: trajectory of physical states.
Y: trajectory of observed states.
offsets: offsets for AlignedArray data structure.
"""
X0_keys = jax.random.split(prng_key, num_samples)
X0 = dyn_sys.generate_filtered_velocity_fields(X0_keys)
total_warm_up_steps = num_warmup_steps * dyn_sys.num_inner_steps
X0 = dyn_sys.batch_warmup(X0, total_warm_up_steps)
X = dyn_sys.batch_integrate(X0, num_time_steps)
Y = dyn_sys.batch_observe(X)
return X0, X, Y, dyn_sys.offsets
def interpolate_periodic_kolmogorov(
u: Array,
factor: int,
method: str = 'bicubic',
) -> Array:
"""
Upsamples velocity field(s) `u` by `factor` under
the assumption that `u` is periodic in both upsampling dimensions.
Args:
u: jax.numpy.DeviceArray of shape (..., grid_x, grid_y, 2).
factor: scalar factor by which to resize grid_x and grid_y.
Returns:
Resized version of the velocity field(s).
"""
paddings = [(0,0)] * u.ndim
paddings[-2] = (1,1)
paddings[-3] = (1,1)
u_pad = jnp.pad(u, paddings, 'wrap')
out_shape = list(u_pad.shape)
out_shape[-2] = int(factor * out_shape[-2])
out_shape[-3] = int(factor * out_shape[-3])
out = jax.image.resize(u_pad, shape=out_shape, method=method)
fi = int(factor)
return out[..., fi:-fi, fi:-fi, :]
def interpolation_da_init_kolmogorov(
dyn_sys: KolmogorovFlow,
X0: Array,
) -> Array:
"""
Generates initial conditions for data assimilation by copying the observed
grid points and inferring the unobserved grid points as an average over
the dataset samples.
Args:
dyn_sys: DynamicalSystem.
X0: ground truth initial conditions of
shape (num_samples, grid_x, grid_y, 2).
Returns:
Initial conditions with observed grid points and otherwise sample
averaged grid points.
"""
Y0 = dyn_sys.batch_observe(X0)
factor = X0.shape[-2] / Y0.shape[-2]
X0_init = interpolate_periodic_kolmogorov(Y0, factor, method='bicubic')
return X0_init | 32.881818 | 80 | 0.705834 | 524 | 3,617 | 4.719466 | 0.358779 | 0.026688 | 0.017792 | 0.01294 | 0.022645 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013927 | 0.186066 | 3,617 | 110 | 81 | 32.881818 | 0.826087 | 0.520874 | 0 | 0.085106 | 0 | 0 | 0.015423 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.06383 | false | 0 | 0.170213 | 0 | 0.297872 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
adbd078ed1dee04b7b17e5a4625fa3532f341ae7 | 18,700 | py | Python | pypowervm/tasks/monitor/lpar.py | stephenfin/pypowervm | 68f2b586b4f17489f379534ab52fc56a524b6da5 | [
"Apache-2.0"
] | 24 | 2015-12-02T19:49:45.000Z | 2021-11-17T11:43:51.000Z | pypowervm/tasks/monitor/lpar.py | stephenfin/pypowervm | 68f2b586b4f17489f379534ab52fc56a524b6da5 | [
"Apache-2.0"
] | 18 | 2017-03-01T05:54:25.000Z | 2022-03-14T17:32:47.000Z | pypowervm/tasks/monitor/lpar.py | stephenfin/pypowervm | 68f2b586b4f17489f379534ab52fc56a524b6da5 | [
"Apache-2.0"
] | 17 | 2016-02-10T22:53:04.000Z | 2021-11-10T09:47:10.000Z | # Copyright 2015, 2017 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Objects that contain the per LPAR monitor data."""
import abc
import six
class LparMetric(object):
"""Represents a set of metrics for a given LPAR.
This is a reduction and consolidation of the raw PCM statistics.
"""
def __init__(self, uuid):
"""Creates a LPAR Metric. Data will be set by invoker.
- uuid - The LPAR's UUID.
- memory - The LPAR's memory statistics.
- processor - The LPAR's processor statistics.
- network - The LparNetwork aggregation of network statistics.
- storage - the LparStorage aggregation of storage statistics.
If certain attributes are None, that means the statistics could not
be pulled.
:param uuid: The LPAR UUID
"""
self.uuid = uuid
self.memory = None
self.processor = None
self.network = None
self.storage = None
@six.add_metaclass(abc.ABCMeta)
class PropertyWrapper(object):
"""Provides a thin wrapper around the raw metrics class.
Sub class should have the _supported_metrics element defined.
"""
def __init__(self, elem):
self.elem = elem
def __getattr__(self, attr):
if attr not in self._supported_metrics:
raise AttributeError()
return getattr(self.elem, attr)
class LparMemory(object):
"""Represents the memory for a given LPAR.
Requires the following as inputs:
- PhypLparMemory raw metric
- LparInfo.LparUtil raw metric. These metrics are got from IBM.Host
Resource Manager through RMC.
The supported metrics are as follows:
- logical_mem: The amount of memory on the LPAR.
- backed_physical_mem: The amount of backing physical memory used by
the LPAR.
- pct_real_mem_avbl: Percentage of available memory on VMs. It is
only available for newer RSCT packages.
This statistic does not count cached memory as
in use.
- total_pg_count: Page count of swap space for this VM. Page
size is 4k.
- free_pg_count: Page count of free swap space for this VM.
Page size is 4k.
- active_pg_count: Page count of total active memory for this VM.
Page size is 4k.
- real_mem_size_bytes: Total amount of memory assigned to this VM in
bytes.
- pct_real_mem_free: Percentage of real page frames that are currently
available on the VMM (Virtual Memory Manager)
free list. VMM manages the allocation of real
memory page frames, resolves references to virtual
memory pages that are not currently in real memory
and manages the reading and writing of pages to
disk storage.
- vm_pg_in_rate: Represents the rate (in pages per second) that the VMM
is reading both persistent and working pages from
disk storage. A -1 value indicates that system
could not determine this metric.
- vm_pg_out_rate: Represents the rate (in pages per second) that the VMM
is writing both persistent and working pages to
disk storage. A -1 value indicates that system
could not determine this metric.
- vm_pg_swap_in_rate: Represents the rate (in pages per second) that the
VMM is reading working pages from paging-space
disk storage. A -1 value indicates that system
could not determine this metric.
- vm_pg_swap_out_rate: Represents the rate (in pages per second) that the
VMM is writing working pages to paging-space
disk storage. A -1 value indicates that system
could not determine this metric.
"""
def __init__(self, lpar_mem_phyp, lpar_mem_pcm):
self.logical_mem = lpar_mem_phyp.logical_mem
self.backed_physical_mem = lpar_mem_phyp.backed_physical_mem
# Its possible that for the lpar_sample, the memory metric was not
# collected. If the metric is not available,
# then assume 0 i.e. all memory is being utilized.
if lpar_mem_pcm:
self.pct_real_mem_avbl = lpar_mem_pcm.memory.pct_real_mem_avbl
self.total_pg_count = lpar_mem_pcm.memory.total_pg_count
self.free_pg_count = lpar_mem_pcm.memory.free_pg_count
self.active_pg_count = lpar_mem_pcm.memory.active_pg_count
self.real_mem_size_bytes = lpar_mem_pcm.memory.real_mem_size_bytes
self.pct_real_mem_free = lpar_mem_pcm.memory.pct_real_mem_free
self.vm_pg_in_rate = lpar_mem_pcm.memory.vm_pg_in_rate
self.vm_pg_out_rate = lpar_mem_pcm.memory.vm_pg_out_rate
self.vm_pg_swap_in_rate = lpar_mem_pcm.memory.vm_pg_swap_in_rate
self.vm_pg_swap_out_rate = lpar_mem_pcm.memory.vm_pg_swap_out_rate
else:
self.pct_real_mem_free = 0
self.vm_pg_in_rate = -1
self.vm_pg_out_rate = -1
self.vm_pg_swap_in_rate = -1
self.vm_pg_swap_out_rate = -1
class LparProc(PropertyWrapper):
"""Represents the CPU statistics for a given LPAR.
Requires the PhypLparProc raw metric as input.
The supported metrics are as follows:
- pool_id: The CPU pool for this LPAR.
- mode: The CPU mode. Typically cap or uncap.
- virt_procs: The number of virtual processors assigned to the LPAR.
- proc_units: The number of proc units assigned to the LPAR.
Ex. if virt_procs is 4 and proc_units is .4, then each virtual
processor has .1 CPUs.
- weight: The CPU weight for uncapped processors.
This defines how aggressive this CPU should be when using unused
cycles from other LPARs (as compared to other VMs that may also
request those unused cycles).
- entitled_proc_cycles: The entitled number of processor cycles.
- util_cap_proc_cycles: The number of used processor cycles from its
capped capacity.
- util_uncap_proc_cycles: The number of utilized processor cycles pulled
from uncap spare.
- idle_proc_cycles: The CPU cycles spent idling.
- donated_proc_cycles: The number of CPU cycles donated to other VMs due
to no need.
- time_wait_dispatch: Time spent waiting for CPU dispatch.
- total_instructions: The total instructions executed.
- total_inst_exec_time: The time for the instructions to execute.
"""
_supported_metrics = ('pool_id', 'mode', 'virt_procs', 'proc_units',
'weight', 'entitled_proc_cycles',
'util_cap_proc_cycles', 'util_uncap_proc_cycles',
'idle_proc_cycles', 'donated_proc_cycles',
'time_wait_dispatch', 'total_instructions',
'total_inst_exec_time')
class LparStorage(object):
"""Represents the Storage statistics for a given LPAR.
Requires the PhypLparStorage and list of ViosInfo raw metrics as input.
Contains the various LPAR storage statistic elements.
- virt_adapters - List of LparVirtStorageAdpt on the LPAR
- vfc_adpts - List of LparVFCAdpt on the LPAR
"""
def __init__(self, lpar_phyp_storage, vios_metrics):
"""Fills the VM storage metrics from the raw PHYP/VIOS metrics.
:param lpar_phyp_storage: The raw Phyp Storage object.
:param vios_metrics: The list of Virtual I/O Server raw metrics that
are paired to the sample from the lpar_phyp
metrics.
"""
# Add the various adapters.
self.virt_adpts = []
for vadpt in lpar_phyp_storage.v_stor_adpts:
vio_adpt = self._find_vio_vstor_adpt(vadpt, vios_metrics)
if vio_adpt is not None:
self.virt_adpts.append(LparVirtStorageAdpt(vio_adpt))
self.vfc_adpts = []
for phyp_vfc_adpt in lpar_phyp_storage.v_fc_adpts:
vfc_adpt = self._find_vio_vfc_adpt(phyp_vfc_adpt, vios_metrics)
if vfc_adpt is not None:
self.vfc_adpts.append(LparVFCAdpt(vfc_adpt))
@staticmethod
def _find_vio_vstor_adpt(phyp_vadpt, vios_metrics):
"""Finds the appropriate VIOS virtual storage adapter.
For a given PHYP virtual adapter, PHYP only has a little bit of
information about it. Which VIOS is hosting it, and the slot.
The VIOS metrics actually contain the information for that
device.
This method will look through all the VIOS samples to find the
matching ViosStorageVAdpt for the given PhypStorageVAdpt. If one
can not be found, None is returned.
:param phyp_vadpt: The PhypStorageVAdpt raw metric.
:param vios_metrics: The list of ViosInfos.
:return: The corresponding ViosStorageVAdpt from the ViosInfos
if one can be found. None otherwise.
"""
for vios_ltm in vios_metrics:
# We need to find the VIOS sample that matches this storage
# element. Loop until we find one (if one doesn't exist, then
# this will just return None).
if vios_ltm.sample.id != phyp_vadpt.vios_id:
continue
# If we reach here, we found the VIOS. From that sample, see
# if we have the appropriate storage.
raw_stor = vios_ltm.sample.storage
if raw_stor is None or raw_stor.virt_adpts is None:
break
# See if this virtual adapters has the right data.
slot_str = "-C%d" % phyp_vadpt.vios_slot
for vadpt in raw_stor.virt_adpts:
# We have to match on the location code. We can only match
# on the tail end of the slot (we've already validated that
# we have the right VIOS, so slot is sufficient).
if vadpt.physical_location.endswith(slot_str):
return vadpt
# If we reached this point, we found the right VIOS, but couldn't
# find proper data. Therefore we can just exit the loop.
break
return None
@staticmethod
def _find_vio_vfc_adpt(phyp_vfc_adpt, vios_metrics):
"""Finds the appropriate VIOS virtual FC adapter.
For a given PHYP virtual FC adapter, PHYP only has a little bit of
information about it. Which VIOS is hosting it, and the WWPNs.
The VIOS metrics actually contain the information for that
device.
This method will look through all the VIOS samples to find the
matching ViosFCVirtAdpt for the given PhypVirtualFCAdpt. If one
can not be found, None is returned.
:param phyp_vadpt: The PhypVirtualFCAdpt raw metric.
:param vios_metrics: The list of ViosInfos.
:return: The corresponding ViosFCVirtAdpt from the ViosInfos
if one can be found. None otherwise.
"""
for vios_ltm in vios_metrics:
# We need to find the VIOS sample that matches this VFC
# element. Loop until we find one (if one doesn't exist, then
# this will just return None).
if vios_ltm.sample.id != phyp_vfc_adpt.vios_id:
continue
# If we reach here, we found the VIOS. From that sample, see
# if we have the appropriate storage.
raw_stor = vios_ltm.sample.storage
if raw_stor is None or raw_stor.fc_adpts is None:
return None
# Check the WWPNs.
for pfc_adpt in raw_stor.fc_adpts:
vfc_adpt = LparStorage._find_vfc(phyp_vfc_adpt, pfc_adpt)
if vfc_adpt is not None:
return vfc_adpt
return None
@staticmethod
def _find_vfc(phyp_vfc_adpt, vio_pfc_adpt):
"""Finds the matching VIOS vfc adpt for a given PHYP adapter
:param phyp_vfc_adpt: The raw PhypVirtualFCAdpt object.
:param vio_pfc_adpt: The raw ViosFCPhysAdpt.
:return: The matching ViosFCVirtAdpt contained within the physical VIOS
adapter. If one can't be found, None will be returned.
"""
if vio_pfc_adpt.ports is None:
return None
for vfc_adpt in vio_pfc_adpt.ports:
for wwpn in phyp_vfc_adpt.wwpn_pair:
if wwpn == vfc_adpt.wwpn:
return vfc_adpt
return None
@six.add_metaclass(abc.ABCMeta)
class LparStorageAdpt(PropertyWrapper):
"""Base class for storage adapters on a given LPAR.
Requires the vios storage adapter raw metric as input. Specific classes
are defined by the subclasses.
The supported metrics are as follows:
- name: The identifier of the adapter. Ex: vhost2.
- physical_location: The physical location code of the adapter.
- num_reads: The number of read operations done against the adapter.
- num_writes: The number of write operations done against the adapter.
- read_bytes: The number of bytes read from the adapter.
- write_bytes: The number of bytes written to the adapter.
- type: The type of the adapter.
"""
_supported_metrics = ('name', 'physical_location', 'num_reads', 'type',
'num_writes', 'read_bytes', 'write_bytes')
class LparVFCAdpt(LparStorageAdpt):
"""A Virtual Fibre Channel Adapter attached to the LPAR.
Requires the ViosFCVirtAdpt raw metric as input.
The supported metrics are as follows:
- name: The identifier of the adapter. Ex: vhost2.
- physical_location: The physical location code of the adapter.
- num_reads: The number of read operations done against the adapter.
- num_writes: The number of write operations done against the adapter.
- read_bytes: The number of bytes read from the adapter.
- write_bytes: The number of bytes written to the adapter.
- type: The type of the adapter. Will be set to VFC.
"""
@property
def type(self):
"""Overrides the type property as the raw metric.
The VFC Adapter does not natively have a type in the raw metric. This
property overrides and circumvents the standard property lookup
mechanism.
"""
return "VFC"
class LparPhysAdpt(LparStorageAdpt):
"""A physical adapter (ex SAS drive) on the LPAR.
Requires the ViosStoragePAdpt raw metric as input.
The supported metrics are as follows:
- name: The identifier of the adapter. Ex: vhost2.
- physical_location: The physical location code of the adapter.
- num_reads: The number of read operations done against the adapter.
- num_writes: The number of write operations done against the adapter.
- read_bytes: The number of bytes read from the adapter.
- write_bytes: The number of bytes written to the adapter.
- type: The type of the adapter.
"""
pass
class LparVirtStorageAdpt(LparStorageAdpt):
"""A Virutal Storage Adapter (ex. vscsi) attached to the LPAR.
Requires the ViosStorageVAdpt raw metric as input.
The supported metrics are as follows:
- name: The identifier of the adapter. Ex: vhost2.
- physical_location: The physical location code of the adapter.
- num_reads: The number of read operations done against the adapter.
- num_writes: The number of write operations done against the adapter.
- read_bytes: The number of bytes read from the adapter.
- write_bytes: The number of bytes written to the adapter.
- type: The type of the adapter.
"""
pass
class LparNetwork(object):
"""Represents the Network statistics for a given LPAR.
Requires the PhypNetwork raw metric as input.
Aggregates the various types of network statistics for a given LPAR.
- cnas - List of the Client Network Adapter stats.
"""
def __init__(self, lpar_sample_net):
"""Creates the Network Statistics aggregation element.
Puts the network information into the lpar_metric.network variable.
:param lpar_sample_net: The PHYP raw data sample.
"""
# Fill in the Client Network Adapter data sources
self.cnas = ([] if lpar_sample_net.veas is None else
[LparCNA(x) for x in lpar_sample_net.veas])
# TODO(thorst) Additional network metrics. Ex. SR-IOV ports
class LparCNA(PropertyWrapper):
"""Represents a Client Network Adapter on a LPAR.
Requires the PhypVEA raw metric as input.
The supported metrics are as follows:
- vlan_id: The PVID of the Client Network Adapter.
- vswitch_id: The virtual switch ID (not UUID).
- physical_location: The physical location for the Client Network
Adapter.
- received_packets: The count of packets received to the Client Network
Adapter.
- sent_packets: The count of packets sent by the Client Network Adapter.
- dropped_packets: The count of the packets dropped by the Client Network
Adapter.
- sent_bytes: The count of the bytes sent by the Client Network Adapter.
- received_bytes: The count of the bytes received by the Client Network
Adapter.
"""
_supported_metrics = ('vlan_id', 'vswitch_id', 'physical_location',
'received_packets', 'sent_packets',
'dropped_packets', 'sent_bytes', 'received_bytes')
| 42.21219 | 79 | 0.64123 | 2,496 | 18,700 | 4.647436 | 0.175481 | 0.024138 | 0.019914 | 0.013793 | 0.443793 | 0.379483 | 0.334828 | 0.307759 | 0.297931 | 0.286552 | 0 | 0.00247 | 0.307273 | 18,700 | 442 | 80 | 42.307692 | 0.893006 | 0.650695 | 0 | 0.212389 | 0 | 0 | 0.066814 | 0.004049 | 0 | 0 | 0 | 0.002262 | 0 | 1 | 0.088496 | false | 0.017699 | 0.017699 | 0 | 0.318584 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
adbf323b959ffe50eebc8abbeb3035572a07263d | 2,012 | py | Python | src/apd/aggregation/typing.py | MatthewWilkes/apd.aggregation | 427fa908f45332d623295f92e1ccfdaf545d6997 | [
"BSD-3-Clause"
] | null | null | null | src/apd/aggregation/typing.py | MatthewWilkes/apd.aggregation | 427fa908f45332d623295f92e1ccfdaf545d6997 | [
"BSD-3-Clause"
] | 11 | 2020-11-23T21:36:48.000Z | 2022-03-12T00:48:58.000Z | src/apd/aggregation/typing.py | MatthewWilkes/apd.aggregation | 427fa908f45332d623295f92e1ccfdaf545d6997 | [
"BSD-3-Clause"
] | 1 | 2020-08-09T01:47:59.000Z | 2020-08-09T01:47:59.000Z | import datetime
import sys
import typing as t
if sys.version_info >= (3, 8):
from typing import TypedDict, Protocol
else:
from typing_extensions import TypedDict, Protocol
from apd.aggregation.database import DataPoint
# Aliases for common types
# These type variables allow for generic functions. T_key represents the place
# in a chart that an item will be placed, and T_value the kind of data that is plotted.
T_key = t.TypeVar("T_key")
T_value = t.TypeVar("T_value")
# Cleaned is a placeholder type, representing an async iterator of key and value functions
# Cleaned[float, float] is equivalent to typing.AsyncIterator[Tuple[builtins.float, builtins.float]]
Cleaned = t.AsyncIterator[t.Tuple[T_key, T_value]]
# T_cleaned represents a placeholder for the result of a cleaner function, and is *covariant*
# because it can accept compatible types (such as ints where floats were declared).
# It is bound to Cleaned, so only items that match the specification for Cleaned (for any values
# of T_key or T_value) are valid
T_cleaned = t.TypeVar("T_cleaned", covariant=True, bound=Cleaned)
# CleanerFunc is a generic protocol, it matches any Callable that converts an async iterator
# of datapoints to its type. So, CleanerFunc[float] would be equivalent to t.Callable[[t.AsyncIterator[DataPoint]], float]
class CleanerFunc(Protocol[T_cleaned]):
def __call__(self, datapoints: t.AsyncIterator[DataPoint]) -> T_cleaned:
...
# CLEANED_DT_FLOAT is a Cleaned represents datetime/float pairs, for simple charts
CLEANED_DT_FLOAT = Cleaned[datetime.datetime, float]
# and CLEANED_COORD_FLOAT represents (lat/lon), float pairs
CLEANED_COORD_FLOAT = Cleaned[t.Tuple[float, float], float]
# The _CLEANER variants are functions that return their matching iterators from above
DT_FLOAT_CLEANER = CleanerFunc[CLEANED_DT_FLOAT]
COORD_FLOAT_CLEANER = CleanerFunc[CLEANED_COORD_FLOAT]
class IntermediateMapData(TypedDict):
coord: t.Optional[t.Tuple[float, float]]
value: t.Optional[float]
| 40.24 | 122 | 0.781312 | 302 | 2,012 | 5.07947 | 0.380795 | 0.013038 | 0.009778 | 0.013038 | 0.014342 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00116 | 0.143141 | 2,012 | 49 | 123 | 41.061224 | 0.888631 | 0.55169 | 0 | 0 | 0 | 0 | 0.023675 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.272727 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
adbfa04ae35f9bf4a7dee4bdc7e138f0827ff7bf | 6,881 | py | Python | arms/app/resources/pirate.py | uyamazak/oceanus | 6158cdc313a381f8228562605d33713ad4e776f1 | [
"MIT"
] | 8 | 2017-02-10T07:24:43.000Z | 2019-06-03T07:45:29.000Z | arms/app/resources/pirate.py | uyamazak/oceanus | 6158cdc313a381f8228562605d33713ad4e776f1 | [
"MIT"
] | 3 | 2017-02-20T10:24:20.000Z | 2017-08-15T04:54:36.000Z | arms/app/resources/pirate.py | uyamazak/oceanus | 6158cdc313a381f8228562605d33713ad4e776f1 | [
"MIT"
] | null | null | null | import falcon
import json
from cerberus import Validator
from datetime import datetime
from common.gopub_utils import publish2gopub
from common.errors import RedisWritingError
from common.utils import oceanus_logging
from resources.execution import ExecutionResource
logger = oceanus_logging()
class PirateResource(ExecutionResource):
method_label = "pirate"
"""
Pirate receives POST data.
From HTML form, AJAX etc.
Response empty string.
"""
def adjust_user_data(self, user_data):
"""
in order to prevent unnecessary validate error
convert lower, upper, length
"""
if user_data['enc']:
user_data['enc'] = user_data['enc'].upper()
if user_data['sid']:
user_data['sid'] = user_data['sid'].lower()
ua_max = 512
if not user_data['ua']:
user_data['ua'] = ''
if len(user_data['ua']) > ua_max:
user_data['ua'] = user_data['ua'][0:ua_max]
logger.info('cut ua {}:{}'.format(ua_max, user_data['ua']))
return user_data
def on_post(self, req, resp, site_name):
if not site_name:
site_name = self.get_default_site_name(self.method_label)
logger.debug("use defaule site_name "
"{} {}".format(site_name, self.method_label))
if not self.site_exists(site_name, self.method_label):
resp.status = falcon.HTTP_404
message = 'site name not found:{0}'.format(site_name)
resp.body = message
logger.error(message)
return
logger.debug("{}".format(req.query_string))
resp.set_header('Access-Control-Allow-Origin', '*')
# resp.set_header('Access-Control-Allow-Methods', 'POST, GET, OPTIONS')
# resp.set_header('Access-Control-Allow-Headers', '*')
# resp.set_header('X-Content-Type-Options', 'nosniff')
resp.content_type = 'text/plain; charset=UTF-8'
"""
item_dict = { key: (
user_data,
validate_schema
),
}
key is BigQuery's column name
"""
client_rad = self.get_client_rad(req.access_route)
item_dict = {
'dt': (str(datetime.utcnow()),
{'type': 'string'}
),
'sid': (req.get_param('sid', required=True),
{'type': 'string',
'regex': '^[0-9a-f]{1,32}$'}
),
'uid': (req.get_param('uid', required=False, default=""),
{'type': 'string',
'nullable': True,
'empty': True,
'maxlength': 64}
),
'oid': (req.get_param('oid', required=False, default=""),
{'type': 'string',
'maxlength': 16}
),
'rad': (client_rad,
{'validator': self.validate_ip}
),
'ua': (req.user_agent,
{'type': 'string',
'nullable': True,
'empty': True,
'maxlength': 512}
),
'dev': (self. get_client_device(req.user_agent),
{'type': 'string',
'nullable': True,
'empty': True,
'maxlength': 16}
),
'url': (req.get_param('url', required=False),
{'type': 'string',
'nullable': True,
'empty': True,
'maxlength': 1024},
),
# user name
'name': (req.get_param('name', required=False),
{'type': 'string',
'nullable': True,
'empty': True,
'maxlength': 1024},
),
# company name
'cname': (req.get_param('cname', required=False),
{'type': 'string',
'nullable': True,
'empty': True,
'maxlength': 1024},
),
'email': (req.get_param('email', required=False),
{'type': 'string',
'nullable': True,
'empty': True,
'maxlength': 1024},
),
'tel': (req.get_param('tel', required=False),
{'type': 'string',
'nullable': True,
'empty': True,
'maxlength': 1024},
),
'jsn': (req.get_param('jsn', required=False),
{'validator': self.validate_json,
'nullable': True,
'empty': True,
'maxlength': 10000},
),
'enc': (req.get_param('enc', required=False, default=""),
{'type': 'string',
'empty': True,
'nullable': True,
'regex': '^[0-9a-zA-Z\-(\)_\s]*$',
'maxlength': 16}
)
}
user_data = self.adjust_user_data({k: v[0]
for k, v in item_dict.items()})
v = Validator({k: v[1] for k, v in item_dict.items()})
validate_result = v.validate(user_data)
if validate_result:
resp.status = falcon.HTTP_200
else:
if req.get_param('debug', required=False):
resp.body = "{}".format(v.errors)
logger.error(resp.body)
resp.status = falcon.HTTP_400
return
user_data['jsn'] = self.clean_json(user_data['jsn'])
redis_data = json.dumps(user_data, sort_keys=True, separators=(',', ':'))
redis_result = None
try:
redis_result = self.write_to_redis(site_name, redis_data)
except RedisWritingError:
resp.status = falcon.HTTP_500
self.publish_to_redis(site_name, redis_data)
try:
publish2gopub(site_name, redis_data)
except Exception as e:
logger.error("gopub failed:{}".format(e))
if req.get_param('debug', required=False):
logger.info("site_name:{}\n"
"user_data:{}\n"
"validate errors:{}\n"
"redis result: \n"
"".format(site_name,
user_data,
v.errors,
redis_result))
else:
resp.body = ""
| 37.396739 | 81 | 0.444993 | 634 | 6,881 | 4.659306 | 0.26183 | 0.064997 | 0.044685 | 0.063981 | 0.340555 | 0.263372 | 0.179756 | 0.131686 | 0.131686 | 0.131686 | 0 | 0.016481 | 0.426828 | 6,881 | 183 | 82 | 37.601093 | 0.732505 | 0.039965 | 0 | 0.4 | 0 | 0 | 0.119444 | 0.007835 | 0 | 0 | 0 | 0 | 0 | 1 | 0.013333 | false | 0 | 0.053333 | 0 | 0.1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |