hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3ec03f18a5a848881af36a1d5ceb7207605662f7 | 3,428 | py | Python | huaweicloud-sdk-iam/huaweicloudsdkiam/v3/model/agency_token_user_domain.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 64 | 2020-06-12T07:05:07.000Z | 2022-03-30T03:32:50.000Z | huaweicloud-sdk-iam/huaweicloudsdkiam/v3/model/agency_token_user_domain.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 11 | 2020-07-06T07:56:54.000Z | 2022-01-11T11:14:40.000Z | huaweicloud-sdk-iam/huaweicloudsdkiam/v3/model/agency_token_user_domain.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 24 | 2020-06-08T11:42:13.000Z | 2022-03-04T06:44:08.000Z | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class AgencyTokenUserDomain:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'id': 'str',
'name': 'str'
}
attribute_map = {
'id': 'id',
'name': 'name'
}
def __init__(self, id=None, name=None):
"""AgencyTokenUserDomain - a model defined in huaweicloud sdk"""
self._id = None
self._name = None
self.discriminator = None
self.id = id
self.name = name
@property
def id(self):
"""Gets the id of this AgencyTokenUserDomain.
委托方A的账号ID。
:return: The id of this AgencyTokenUserDomain.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this AgencyTokenUserDomain.
委托方A的账号ID。
:param id: The id of this AgencyTokenUserDomain.
:type: str
"""
self._id = id
@property
def name(self):
"""Gets the name of this AgencyTokenUserDomain.
委托方A的账号名称。
:return: The name of this AgencyTokenUserDomain.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this AgencyTokenUserDomain.
委托方A的账号名称。
:param name: The name of this AgencyTokenUserDomain.
:type: str
"""
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AgencyTokenUserDomain):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 24.485714 | 79 | 0.531505 |
3d942ac896791be5c6cdacb07b8396e3a1a8391e | 20,017 | py | Python | LibVQ/learnable_index/learnable_index.py | staoxiao/LibVQ | f844c60055ace872279daa272b0bad1005c02e2b | [
"MIT"
] | 4 | 2022-03-30T01:39:59.000Z | 2022-03-30T06:20:41.000Z | LibVQ/learnable_index/learnable_index.py | staoxiao/LibVQ | f844c60055ace872279daa272b0bad1005c02e2b | [
"MIT"
] | null | null | null | LibVQ/learnable_index/learnable_index.py | staoxiao/LibVQ | f844c60055ace872279daa272b0bad1005c02e2b | [
"MIT"
] | null | null | null | import logging
import os
import pickle
import shutil
import time
from typing import List, Dict, Type, Union
import faiss
import numpy
import numpy as np
import torch
import torch.multiprocessing as mp
from torch.optim import Optimizer
from transformers import AdamW
from LibVQ.base_index import FaissIndex, IndexConfig
from LibVQ.dataset import write_rel
from LibVQ.models import LearnableVQ
from LibVQ.train import train_model
class LearnableIndex(FaissIndex):
def __init__(self,
index_method: str,
init_index_file: str = None,
init_index_type: str = 'faiss',
ivf_centers_num: int = 10000,
subvector_num: int = 32,
subvector_bits: int = 8,
dist_mode: str = 'ip',
doc_embeddings: np.ndarray = None,
emb_size: int = 768,
config: IndexConfig = None
):
"""
finetune the index
:param index_method: The type of index, e.g., ivf_pq, ivf_opq, pq, opq
:param init_index_file: Create the learnable idex from the faiss index file; if is None, it will create a faiss index and save it
:param ivf_centers_num: The number of post lists
:param subvector_num: The number of codebooks
:param subvector_bits: The number of codewords for each codebook
:param dist_mode: Metric to calculate the distance between query and doc
:param doc_embeddings: Embeddings of docs, needed when there is no a trained index in init_index_file
:param emb_size: Dim of embeddings
:param config: Config of index. Default is None.
"""
super(LearnableIndex).__init__()
if init_index_file is None or not os.path.exists(init_index_file):
logging.info(f"generating the init index by faiss")
faiss_index = FaissIndex(doc_embeddings=doc_embeddings,
emb_size=emb_size,
ivf_centers_num=ivf_centers_num,
subvector_num=subvector_num,
subvector_bits=subvector_bits,
index_method=index_method,
dist_mode=dist_mode)
if init_index_file is None:
init_index_file = f'./temp/{index_method}_ivf{ivf_centers_num}_pq{subvector_num}x{subvector_bits}.index'
os.makedirs('./temp', exist_ok=True)
logging.info(f"save the init index to {init_index_file}")
faiss_index.save_index(init_index_file)
self.index = faiss_index.index
else:
if init_index_type == 'SPANN':
logging.info(f"loading the init SPANN index from {init_index_file}")
self.index = None
self.learnable_vq = LearnableVQ(config, init_index_file=init_index_file, init_index_type='SPANN',
index_method=index_method, dist_mode=dist_mode)
else:
logging.info(f"loading the init faiss index from {init_index_file}")
self.index = faiss.read_index(init_index_file)
self.learnable_vq = LearnableVQ(config, init_index_file=init_index_file,
index_method=index_method, dist_mode=dist_mode)
self.check_index_parameters(self.learnable_vq, ivf_centers_num, subvector_num, subvector_bits, init_index_file,
index_method)
def check_index_parameters(self,
vq_model: LearnableVQ,
ivf_centers_num: int,
subvector_num: int,
subvector_bits: int,
init_index_file: str,
index_method: str):
if 'ivf' in index_method:
if ivf_centers_num is not None and vq_model.ivf.ivf_centers_num != ivf_centers_num:
raise ValueError(
f"The ivf_centers_num :{vq_model.ivf.ivf_centers_num} of index from {init_index_file} is not equal to you set: {ivf_centers_num}. "
f"please use the correct saved index or set it None to create a new faiss index")
if 'pq' in index_method:
if subvector_num is not None and vq_model.pq.subvector_num != subvector_num:
raise ValueError(
f"The subvector_num :{vq_model.pq.subvector_num} of index from {init_index_file} is not equal to you set: {subvector_num}. "
f"please use the correct saved index or set it None to create a new faiss index")
if subvector_bits is not None and vq_model.pq.subvector_bits != subvector_bits:
raise ValueError(
f"The subvector_bits :{vq_model.pq.subvector_bits} of index from {init_index_file} is not equal to you set: {subvector_bits}. "
f"please use the correct saved index or set it None to create a new faiss index")
def update_index_with_ckpt(self,
ckpt_file: str = None,
saved_ckpts_path: str = None,
doc_embeddings: numpy.ndarray = None):
'''
Update the index based on the saved ckpt
:param ckpt_path: The trained ckpt file. If set None, it will select the lateset ckpt in saved_ckpts_path.
:param saved_ckpts_path: The path to save the ckpts
:param doc_embeddings: embeddings of docs
:return:
'''
if ckpt_file is None:
assert saved_ckpts_path is not None
ckpt_file = self.get_latest_ckpt(saved_ckpts_path)
logging.info(f"updating index based on {ckpt_file}")
ivf_file = os.path.join(ckpt_file, 'ivf_centers.npy')
if os.path.exists(ivf_file):
logging.info(f"loading ivf centers from {ivf_file}")
center_vecs = np.load(ivf_file)
self.update_ivf(center_vecs)
codebook_file = os.path.join(ckpt_file, 'codebook.npy')
if os.path.exists(codebook_file):
logging.info(f"loading codebook from {codebook_file}")
codebook = np.load(codebook_file)
self.update_pq(codebook=codebook, doc_embeddings=doc_embeddings)
def update_ivf(self,
center_vecs: numpy.ndarray):
if isinstance(self.index, faiss.IndexPreTransform):
ivf_index = faiss.downcast_index(self.index.index)
coarse_quantizer = faiss.downcast_index(ivf_index.quantizer)
else:
coarse_quantizer = faiss.downcast_index(self.index.quantizer)
faiss.copy_array_to_vector(
center_vecs.ravel(),
coarse_quantizer.xb)
def update_pq(self,
codebook: numpy.ndarray,
doc_embeddings: numpy.ndarray):
if isinstance(self.index, faiss.IndexPreTransform):
ivf_index = faiss.downcast_index(self.index.index)
faiss.copy_array_to_vector(
codebook.ravel(),
ivf_index.pq.centroids)
else:
faiss.copy_array_to_vector(
codebook.ravel(),
self.index.pq.centroids)
logging.info(f"updating the quantized results of docs' embeddings")
self.index.remove_ids(faiss.IDSelectorRange(0, len(doc_embeddings)))
self.index.add(doc_embeddings)
def get_latest_ckpt(self, saved_ckpts_path: str):
if len(os.listdir(saved_ckpts_path)) == 0: raise IOError(f"There is no ckpt in path: {saved_ckpts_path}")
latest_epoch, latest_step = 0, 0
for ckpt in os.listdir(saved_ckpts_path):
if 'epoch' in ckpt and 'step' in ckpt:
name = ckpt.split('_')
epoch, step = int(name[1]), int(name[3])
if epoch > latest_epoch:
latest_epoch, latest_step = epoch, step
elif epoch == latest_epoch:
latest_step = max(latest_step, step)
assert latest_epoch > 0 or latest_step > 0
return os.path.join(saved_ckpts_path, f"epoch_{latest_epoch}_step_{latest_step}")
def get_temp_checkpoint_save_path(self):
time_str = time.strftime('%m_%d-%H-%M-%S', time.localtime(time.time()))
return f'./temp/{time_str}'
def load_embedding(self,
emb: Union[str, numpy.ndarray],
emb_size: int):
if isinstance(emb, str):
assert 'npy' in emb or 'memmap' in emb
if 'memmap' in emb:
embeddings = np.memmap(emb, dtype=np.float32, mode="r")
return embeddings.reshape(-1, emb_size)
elif 'npy' in emb:
return np.load(emb)
else:
return emb
def fit(self,
query_embeddings: Union[str, numpy.ndarray],
doc_embeddings: Union[str, numpy.ndarray],
rel_data: Union[str, Dict[int, List[int]]] = None,
neg_data: Union[str, Dict[int, List[int]]] = None,
epochs: int = 5,
per_device_train_batch_size: int = 128,
per_query_neg_num: int = 1,
emb_size: int = None,
warmup_steps_ratio: float = 0.1,
optimizer_class: Type[Optimizer] = AdamW,
lr_params: Dict[str, float] = {'encoder_lr': 1e-5, 'pq_lr': 1e-4, 'ivf_lr': 1e-3},
loss_weight: Dict[str, object] = {'encoder_weight': 1.0, 'pq_weight': 1.0,
'ivf_weight': 'scaled_to_pqloss'},
temperature: float = 1.0,
loss_method: str = 'distill',
weight_decay: float = 0.01,
max_grad_norm: float = -1,
show_progress_bar: bool = True,
checkpoint_path: str = None,
checkpoint_save_steps: int = None,
logging_steps: int = 100,
):
"""
Train the index
:param query_embeddings: Embeddings for each query, also support pass a file('.npy', '.memmap').
:param doc_embeddings: Embeddigns for each doc, also support pass a filename('.npy', '.memmap').
:param rel_data: Positive doc ids for each query: {query_id:[doc_id1, doc_id2,...]}, or a tsv file which save the relevance relationship: qeury_id \t doc_id \n.
If set None, it will automatically generate the data for training based on the retrieval results.
:param neg_data: Negative doc ids for each query: {query_id:[doc_id1, doc_id2,...]}, or a pickle file which save the query2neg.
If set None, it will randomly sample negative.
:param epochs: The epochs of training
:param per_device_train_batch_size: The number of query-doc positive pairs in a batch
:param per_query_neg_num: The number of negatives for each query
:param emb_size: Dim of embeddings.
:param warmup_steps_ratio: The ration of warmup steps
:param optimizer_class: torch.optim.Optimizer
:param lr_params: Learning rate for encoder, ivf, and pq
:param loss_weight: Wight for loss of encoder, ivf, and pq. "scaled_to_pqloss"" means that make the weighted loss closed to the loss of pq module.
:param temperature: Temperature for softmax
:param loss_method: We provide two loss: 'contrastive' and 'distill'
:param weight_decay: Hyper-parameter for Optimizer
:param max_grad_norm: Used for gradient normalization
:param checkpoint_path: Folder to save checkpoints during training. If set None, it will create a temp folder.
:param checkpoint_save_steps: Will save a checkpoint after so many steps
:param logging_steps: Will show the loss information after so many steps
:return:
"""
temp_checkpoint_path = None
if checkpoint_path is None:
temp_checkpoint_path = self.get_temp_checkpoint_save_path()
logging.info(f"The model will be saved into {temp_checkpoint_path}")
checkpoint_path = temp_checkpoint_path
query_embeddings = self.load_embedding(query_embeddings, emb_size=emb_size)
doc_embeddings = self.load_embedding(doc_embeddings, emb_size=emb_size)
if rel_data is None:
# generate train data
logging.info("generating relevance data...")
rel_data, neg_data = self.generate_virtual_traindata(query_embeddings=query_embeddings, topk=400,
nprobe=self.learnable_vq.ivf.ivf_centers_num)
train_model(model=self.learnable_vq,
rel_data=rel_data,
epochs=epochs,
per_device_train_batch_size=per_device_train_batch_size,
per_query_neg_num=per_query_neg_num,
neg_data=neg_data,
query_embeddings=query_embeddings,
doc_embeddings=doc_embeddings,
emb_size=emb_size,
warmup_steps_ratio=warmup_steps_ratio,
optimizer_class=optimizer_class,
lr_params=lr_params,
loss_weight=loss_weight,
temperature=temperature,
loss_method=loss_method,
weight_decay=weight_decay,
max_grad_norm=max_grad_norm,
show_progress_bar=show_progress_bar,
checkpoint_path=checkpoint_path,
checkpoint_save_steps=checkpoint_save_steps,
logging_steps=logging_steps,
fix_emb='query, doc'
)
# update index
if self.index is not None:
self.update_index_with_ckpt(saved_ckpts_path=checkpoint_path,
doc_embeddings=doc_embeddings)
# delete temp folder
if temp_checkpoint_path is not None:
shutil.rmtree(temp_checkpoint_path)
def fit_with_multi_gpus(
self,
query_embeddings_file: str,
doc_embeddings_file: str,
rel_file: str = None,
neg_file: str = None,
epochs: int = 5,
per_device_train_batch_size: int = 128,
per_query_neg_num: int = 1,
cross_device_sample: bool = True,
emb_size: int = None,
warmup_steps_ratio: float = 0.1,
optimizer_class: Type[Optimizer] = AdamW,
lr_params: Dict[str, float] = {'encoder_lr': 1e-5, 'pq_lr': 1e-4, 'ivf_lr': 1e-3},
loss_weight: Dict[str, object] = {'encoder_weight': 1.0, 'pq_weight': 1.0,
'ivf_weight': 'scaled_to_pqloss'},
temperature: float = 1.0,
loss_method: str = 'distill',
weight_decay: float = 0.01,
max_grad_norm: float = -1,
show_progress_bar: bool = True,
checkpoint_path: str = None,
checkpoint_save_steps: int = None,
logging_steps: int = 100,
master_port: str = '12345'
):
"""
Train the VQ model with multi GPUs and update the index
:param query_embeddings_file: Filename('.npy', '.memmap') to query embeddings.
:param doc_embeddings_file: Filename('.npy', '.memmap') to doc embeddings.
:param rel_file: A tsv file which save the relevance relationship: qeury_id \t doc_id \n.
If set None, it will automatically generate the data for training based on the retrieval results.
:param neg_file: A pickle file which save the query2neg. if set None, it will randomly sample negative.
If set None, it will randomly sample negative.
:param epochs: The epochs of training
:param per_device_train_batch_size: The number of query-doc positive pairs in a batch
:param per_query_neg_num: The number of negatives for each query
:param emb_size: Dim of embeddings.
:param warmup_steps_ratio: The ration of warmup steps
:param optimizer_class: torch.optim.Optimizer
:param lr_params: Learning rate for encoder, ivf, and pq
:param loss_weight: Wight for loss of encoder, ivf, and pq. "scaled_to_pqloss"" means that make the weighted loss closed to the loss of pq module.
:param temperature: Temperature for softmax
:param loss_method: We provide two loss: 'contrastive' and 'distill'
:param weight_decay: Hyper-parameter for Optimizer
:param max_grad_norm: Used for gradient normalization
:param checkpoint_path: Folder to save checkpoints during training. If set None, it will create a temp folder.
:param checkpoint_save_steps: Will save a checkpoint after so many steps
:param logging_steps: Will show the loss information after so many steps
:param master_port: setting for distributed training
:return:
"""
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = master_port
world_size = torch.cuda.device_count()
temp_checkpoint_path = None
if checkpoint_path is None:
temp_checkpoint_path = self.get_temp_checkpoint_save_path()
logging.info(f"The model will be saved into {temp_checkpoint_path}")
checkpoint_path = temp_checkpoint_path
doc_embeddings = self.load_embedding(doc_embeddings_file, emb_size=emb_size)
if rel_file is None:
# generate train data
logging.info("generating relevance data...")
query_embeddings = self.load_embedding(query_embeddings_file, emb_size=emb_size)
rel_data, neg_data = self.generate_virtual_traindata(query_embeddings=query_embeddings, topk=400,
nprobe=self.learnable_vq.ivf.ivf_centers_num)
logging.info(f"saving relevance data to {checkpoint_path}...")
rel_file = os.path.join(checkpoint_path, 'train-virtual_rel.tsv')
neg_file = os.path.join(checkpoint_path, f"train-queries-virtual_hardneg.pickle")
write_rel(rel_file, rel_data)
pickle.dump(neg_data, open(neg_file, 'wb'))
mp.spawn(train_model,
args=(self.learnable_vq,
None,
rel_file,
None,
None,
None,
None,
world_size,
epochs,
per_device_train_batch_size,
per_query_neg_num,
cross_device_sample,
neg_file,
query_embeddings_file,
doc_embeddings_file,
emb_size,
warmup_steps_ratio,
optimizer_class,
lr_params,
loss_weight,
temperature,
loss_method,
'query, doc',
weight_decay,
max_grad_norm,
show_progress_bar,
checkpoint_path,
checkpoint_save_steps,
logging_steps
),
nprocs=world_size,
join=True)
# update index
if self.index is not None:
self.update_index_with_ckpt(saved_ckpts_path=checkpoint_path,
doc_embeddings=doc_embeddings)
# delete temp folder
if temp_checkpoint_path is not None:
shutil.rmtree(temp_checkpoint_path)
| 48.467312 | 168 | 0.592347 |
a2d85fcebab57e41b6f81e00820097b402789a82 | 2,598 | py | Python | util.py | mdanie2/221Project | d02e63274dc0b69891a28fba77ecaea8b21df3d6 | [
"MIT"
] | null | null | null | util.py | mdanie2/221Project | d02e63274dc0b69891a28fba77ecaea8b21df3d6 | [
"MIT"
] | null | null | null | util.py | mdanie2/221Project | d02e63274dc0b69891a28fba77ecaea8b21df3d6 | [
"MIT"
] | null | null | null | import os, random, operator, sys
from collections import Counter
def dotProduct(d1, d2):
"""
@param dict d1: a feature vector represented by a mapping from a feature (string) to a weight (float).
@param dict d2: same as d1
@return float: the dot product between d1 and d2
"""
if len(d1) < len(d2):
return dotProduct(d2, d1)
else:
return sum(d1.get(f, 0) * v for f, v in d2.items())
def increment(d1, scale, d2):
"""
Implements d1 += scale * d2 for sparse vectors.
@param dict d1: the feature vector which is mutated.
@param float scale
@param dict d2: a feature vector.
"""
for f, v in d2.items():
d1[f] = d1.get(f, 0) + v * scale
def readExamples(path):
'''
Reads a set of training examples.
'''
examples = []
for line in open(path):
# Format of each line: <output label (+1 or -1)> <input sentence>
y, x = line.split(' ', 1)
examples.append((x.strip(), int(y)))
print 'Read %d examples from %s' % (len(examples), path)
return examples
def evaluatePredictor(examples, predictor):
'''
predictor: a function that takes an x and returns a predicted y.
Given a list of examples (x, y), makes predictions based on |predict| and returns the fraction
of misclassiied examples.
'''
error = 0
for x, y in examples:
if predictor(x) != y:
error += 1
return 1.0 * error / len(examples)
def outputWeights(weights, path):
print "%d weights" % len(weights)
out = open(path, 'w')
for f, v in sorted(weights.items(), key=lambda (f, v) : -v):
print >>out, '\t'.join([f, str(v)])
out.close()
def verbosePredict(phi, y, weights, out):
yy = 1 if dotProduct(phi, weights) >= 0 else -1
if y:
print >>out, 'Truth: %s, Prediction: %s [%s]' % (y, yy, 'CORRECT' if y == yy else 'WRONG')
else:
print >>out, 'Prediction:', yy
for f, v in sorted(phi.items(), key=lambda (f, v) : -v * weights.get(f, 0)):
w = weights.get(f, 0)
print >>out, "%-30s%s * %s = %s" % (f, v, w, v * w)
return yy
def outputErrorAnalysis(examples, featureExtractor, weights, path):
out = open('error-analysis', 'w')
for x, y in examples:
print >>out, '===', x
verbosePredict(featureExtractor(x), y, weights, out)
out.close()
def interactivePrompt(featureExtractor, weights):
while True:
print '> ',
x = sys.stdin.readline()
if not x: break
phi = featureExtractor(x)
verbosePredict(phi, None, weights, sys.stdout) | 32.475 | 106 | 0.589684 |
aa3039a3a6b4cc78db8f2e11b9574ab41eb0f4c8 | 8,822 | py | Python | test/test_tensorboard.py | kevinintel/neural-compressor | b57645566aeff8d3c18dc49d2739a583c072f940 | [
"Apache-2.0"
] | 100 | 2020-12-01T02:40:12.000Z | 2021-09-09T08:14:22.000Z | test/test_tensorboard.py | kevinintel/neural-compressor | b57645566aeff8d3c18dc49d2739a583c072f940 | [
"Apache-2.0"
] | 25 | 2021-01-05T00:16:17.000Z | 2021-09-10T03:24:01.000Z | test/test_tensorboard.py | kevinintel/neural-compressor | b57645566aeff8d3c18dc49d2739a583c072f940 | [
"Apache-2.0"
] | 25 | 2020-12-01T19:07:08.000Z | 2021-08-30T14:20:07.000Z | """Tests for quantization"""
import numpy as np
import unittest
import os
import yaml
import tensorflow as tf
import shutil
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import dtypes
tf.compat.v1.disable_eager_execution()
def build_fake_yaml():
fake_yaml = '''
model:
name: fake_yaml
framework: tensorflow
inputs: input
outputs: conv3
device: cpu
evaluation:
accuracy:
metric:
topk: 1
tuning:
tensorboard: true
strategy:
name: basic
exit_policy:
timeout: 200
accuracy_criterion:
relative: 0.01
workspace:
path: saved
'''
y = yaml.load(fake_yaml, Loader=yaml.SafeLoader)
with open('fake_yaml.yaml',"w",encoding="utf-8") as f:
yaml.dump(y,f)
f.close()
def build_fake_model():
input_node = node_def_pb2.NodeDef()
input_node.name = "input"
input_node.op = "Placeholder"
input_node.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(
type=dtypes.float32.as_datatype_enum))
conv1_weight_node = node_def_pb2.NodeDef()
conv1_weight_node.name = "conv1_weights"
conv1_weight_node.op = "Const"
conv1_weight_value = np.float32(np.abs(np.random.randn(3,3,3,32)))
conv1_weight_node.attr['dtype'].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum))
conv1_weight_node.attr['value'].CopyFrom(attr_value_pb2.AttrValue(
tensor=tensor_util.make_tensor_proto(
conv1_weight_value, conv1_weight_value.dtype.type, conv1_weight_value.shape)))
conv1_node = node_def_pb2.NodeDef()
conv1_node.name = "conv1"
conv1_node.op = "Conv2D"
conv1_node.attr['T'].CopyFrom(attr_value_pb2.AttrValue(
type=dtypes.float32.as_datatype_enum))
conv1_node.input.extend([input_node.name, conv1_weight_node.name])
conv1_node.attr['strides'].CopyFrom(attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(i=[1,1,1,1])))
conv1_node.attr['dilations'].CopyFrom(attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(i=[1,1,1,1])))
conv1_node.attr['padding'].CopyFrom(attr_value_pb2.AttrValue(s=b'SAME'))
conv1_node.attr['data_format'].CopyFrom(attr_value_pb2.AttrValue(s=b'NHWC'))
bias_node = node_def_pb2.NodeDef()
bias_node.name = "conv1_bias"
bias_node.op = "Const"
bias_value = np.float32(np.abs(np.random.randn(32)))
bias_node.attr['dtype'].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum))
bias_node.attr['value'].CopyFrom(attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(
bias_value, bias_value.dtype.type, bias_value.shape)))
bias_add_node = node_def_pb2.NodeDef()
bias_add_node.name = "conv1_bias_add"
bias_add_node.op = "BiasAdd"
bias_add_node.attr['T'].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum))
bias_add_node.input.extend([conv1_node.name, bias_node.name])
bias_add_node.attr['data_format'].CopyFrom(attr_value_pb2.AttrValue(s=b'NHWC'))
relu_node = node_def_pb2.NodeDef()
relu_node.op = "Relu"
relu_node.name = "relu"
relu_node.attr['T'].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum))
relu_node.input.extend([bias_add_node.name])
conv2_weight_node = node_def_pb2.NodeDef()
conv2_weight_node.name = "conv2_weights"
conv2_weight_node.op = "Const"
conv2_weight_value = np.float32(np.abs(np.random.randn(3,3,32,32)))
conv2_weight_node.attr['dtype'].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum))
conv2_weight_node.attr['value'].CopyFrom(attr_value_pb2.AttrValue(
tensor=tensor_util.make_tensor_proto(
conv2_weight_value, conv2_weight_value.dtype.type, conv2_weight_value.shape)))
conv2_node = node_def_pb2.NodeDef()
conv2_node.name = "conv2"
conv2_node.op = "Conv2D"
conv2_node.attr['T'].CopyFrom(attr_value_pb2.AttrValue(
type=dtypes.float32.as_datatype_enum))
conv2_node.input.extend([relu_node.name, conv2_weight_node.name])
conv2_node.attr['strides'].CopyFrom(attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(i=[1,1,1,1])))
conv2_node.attr['dilations'].CopyFrom(attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(i=[1,1,1,1])))
conv2_node.attr['padding'].CopyFrom(attr_value_pb2.AttrValue(s=b'SAME'))
conv2_node.attr['data_format'].CopyFrom(attr_value_pb2.AttrValue(s=b'NHWC'))
bias_node2 = node_def_pb2.NodeDef()
bias_node2.name = "conv2_bias"
bias_node2.op = "Const"
bias_value2 = np.float32(np.abs(np.random.randn(32)))
bias_node2.attr['dtype'].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum))
bias_node2.attr['value'].CopyFrom(attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(
bias_value2, bias_value2.dtype.type, bias_value2.shape)))
bias_add_node2 = node_def_pb2.NodeDef()
bias_add_node2.name = "conv2_bias_add"
bias_add_node2.op = "BiasAdd"
bias_add_node2.attr['T'].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum))
bias_add_node2.input.extend([conv2_node.name, bias_node2.name])
bias_add_node2.attr['data_format'].CopyFrom(attr_value_pb2.AttrValue(s=b'NHWC'))
relu_node2 = node_def_pb2.NodeDef()
relu_node2.op = "Relu"
relu_node2.name = "relu2"
relu_node2.attr['T'].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum))
relu_node2.input.extend([bias_add_node2.name])
conv3_weight_node = node_def_pb2.NodeDef()
conv3_weight_node.name = "conv3_weights"
conv3_weight_node.op = "Const"
conv3_weight_value = np.float32(np.abs(np.random.randn(3,3,32,32)))
conv3_weight_node.attr['dtype'].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum))
conv3_weight_node.attr['value'].CopyFrom(attr_value_pb2.AttrValue(
tensor=tensor_util.make_tensor_proto(
conv3_weight_value, conv3_weight_value.dtype.type, conv3_weight_value.shape)))
conv3_node = node_def_pb2.NodeDef()
conv3_node.name = "conv3"
conv3_node.op = "Conv2D"
conv3_node.attr['T'].CopyFrom(attr_value_pb2.AttrValue(
type=dtypes.float32.as_datatype_enum))
conv3_node.input.extend([relu_node2.name, conv3_weight_node.name])
conv3_node.attr['strides'].CopyFrom(attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(i=[1,1,1,1])))
conv3_node.attr['dilations'].CopyFrom(attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(i=[1,1,1,1])))
conv3_node.attr['padding'].CopyFrom(attr_value_pb2.AttrValue(s=b'SAME'))
conv3_node.attr['data_format'].CopyFrom(attr_value_pb2.AttrValue(s=b'NHWC'))
graph = graph_pb2.GraphDef()
graph.node.extend([input_node,
conv1_weight_node,
conv1_node,
bias_node,
bias_add_node,
relu_node,
conv2_weight_node,
conv2_node,
bias_node2,
bias_add_node2,
relu_node2,
conv3_weight_node,
conv3_node,
])
return graph
class TestTensorboard(unittest.TestCase):
@classmethod
def setUpClass(self):
self.constant_graph = build_fake_model()
build_fake_yaml()
@classmethod
def tearDownClass(self):
os.remove('fake_yaml.yaml')
shutil.rmtree("saved", ignore_errors=True)
shutil.rmtree("runs/", ignore_errors=True)
@unittest.skipIf(tf.version.VERSION > '2.5.0', " Skip test_bf16_fallback case for tf 2.6.0 and above.")
def test_run_basic_one_trial(self):
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml.yaml')
dataset = quantizer.dataset('dummy', (1, 224, 224, 3), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.model = self.constant_graph
quantizer()
self.assertTrue(True if len(os.listdir("./runs/eval")) > 2 else False)
if __name__ == "__main__":
unittest.main()
| 43.034146 | 109 | 0.682158 |
04322b04390c269ca054c952f68be3ba792e2a20 | 3,889 | py | Python | catalog/models.py | Jordon-Chen/Django | 592e7a438e0f3823f5def8bea58f7208ba739372 | [
"MIT"
] | null | null | null | catalog/models.py | Jordon-Chen/Django | 592e7a438e0f3823f5def8bea58f7208ba739372 | [
"MIT"
] | 4 | 2021-04-08T21:59:12.000Z | 2021-06-10T20:31:01.000Z | catalog/models.py | Jordon-Chen/Django | 592e7a438e0f3823f5def8bea58f7208ba739372 | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import User
from datetime import date
# Create your models here.
class Genre(models.Model):
"""Model representing a book genre."""
name = models.CharField(max_length=200, help_text='Enter a book genre (e.g. Science Fiction)')
def __str__(self):
"""String for representing the Model object."""
return self.name
# Used to generate URLs by reversing the URL patterns
from django.urls import reverse
class Book(models.Model):
"""Model representing a book (but not a specific copy of a book)."""
title = models.CharField(max_length=200)
# Foreign Key used because book can only have one author, but authors can have multiple books
# Author as a string rather than object because it hasn't been declared yet in the file
author = models.ForeignKey('Author', on_delete=models.SET_NULL, null=True)
summary = models.TextField(max_length=1000, help_text='Enter a brief description of the book')
isbn = models.CharField('ISBN', max_length=13, help_text='13 Character <a href="https://www.isbn-international.org/content/what-isbn">ISBN number</a>')
# ManyToManyField used because genre can contain many books. Books can cover many genres.
# Genre class has already been defined so we can specify the object above.
genre = models.ManyToManyField(Genre, help_text='Select a genre for this book')
def display_genre(self):
"""Create a string for the Genre. This is required to display genre in Admin."""
return ', '.join(genre.name for genre in self.genre.all()[:3])
display_genre.short_description = 'Genre'
def __str__(self):
"""String for representing the Model object."""
return self.title
def get_absolute_url(self):
"""Returns the url to access a detail record for this book."""
return reverse('book-detail', args=[str(self.id)])
# Required for unique book instances
import uuid
class BookInstance(models.Model):
"""Model representing a specific copy of a book (i.e. that can be borrowed from the library)."""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, help_text='Unique ID for this particular book across whole library')
book = models.ForeignKey('Book', on_delete=models.SET_NULL, null=True)
imprint = models.CharField(max_length=200)
due_back = models.DateField(null=True, blank=True)
borrower = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, blank=True)
LOAN_STATUS = (
('m', 'Maintenance'),
('o', 'On loan'),
('a', 'Available'),
('r', 'Reserved'),
)
status = models.CharField(
max_length=1,
choices=LOAN_STATUS,
blank=True,
default='m',
help_text='Book availability',
)
@property
def is_overdue(self):
if self.due_back and date.today() > self.due_back:
return True
return False
class Meta:
ordering = ['due_back']
permissions = (("can_mark_returned", "Set book as returned"),)
def __str__(self):
"""String for representing the Model object."""
return f'{self.id} ({self.book.title})'
class Author(models.Model):
"""Model representing an author."""
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
date_of_birth = models.DateField(null=True, blank=True)
date_of_death = models.DateField('died', null=True, blank=True)
class Meta:
ordering = ['last_name', 'first_name']
def get_absolute_url(self):
"""Returns the url to access a particular author instance."""
return reverse('author-detail', args=[str(self.id)])
def __str__(self):
"""String for representing the Model object."""
return f'{self.last_name}, {self.first_name}'
| 37.038095 | 155 | 0.673952 |
7502587a22652e48aa4549219fb3af6538f34c5e | 539 | py | Python | run.py | jacekkow/docker-plugin-pyipam | 40a1f97aa6f7ef85c73fcf942f7872b61c64a0fd | [
"BSD-3-Clause"
] | 2 | 2021-08-31T11:19:20.000Z | 2021-12-04T14:54:11.000Z | run.py | jacekkow/docker-plugin-pyipam | 40a1f97aa6f7ef85c73fcf942f7872b61c64a0fd | [
"BSD-3-Clause"
] | null | null | null | run.py | jacekkow/docker-plugin-pyipam | 40a1f97aa6f7ef85c73fcf942f7872b61c64a0fd | [
"BSD-3-Clause"
] | 1 | 2021-09-01T01:37:03.000Z | 2021-09-01T01:37:03.000Z | #!/usr/bin/env python3
import logging
import os
import docker_plugin_api.Plugin
import flask
import waitress
app = flask.Flask('pyIPAM')
app.logger.setLevel(logging.DEBUG)
app.register_blueprint(docker_plugin_api.Plugin.app)
import lib.IpamDriver
docker_plugin_api.Plugin.functions.append('IpamDriver')
app.register_blueprint(lib.IpamDriver.app)
if __name__ == '__main__':
if os.environ.get('ENVIRONMENT', 'dev') == 'dev':
app.run(debug=True)
else:
waitress.serve(app, unix_socket='/run/docker/plugins/pyipam.sock', threads=1)
| 22.458333 | 79 | 0.777365 |
062585cb85c09df7456aabd83a4ec82338f4db0c | 2,280 | py | Python | test_project/settings.py | drummonds/django-sql-explorer | 0c2c642008fa04a309edfa07011d2238e57d4e64 | [
"MIT"
] | null | null | null | test_project/settings.py | drummonds/django-sql-explorer | 0c2c642008fa04a309edfa07011d2238e57d4e64 | [
"MIT"
] | null | null | null | test_project/settings.py | drummonds/django-sql-explorer | 0c2c642008fa04a309edfa07011d2238e57d4e64 | [
"MIT"
] | null | null | null | import os
import djcelery
SECRET_KEY = 'shhh'
DEBUG = True
STATIC_URL = '/static/'
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'tmp',
'TEST': {
'NAME': 'tmp'
}
},
'alt': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'tmp2',
'TEST': {
'NAME': 'tmp2'
}
},
'not_registered': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'tmp3',
'TEST': {
'NAME': 'tmp3'
}
}
}
EXPLORER_CONNECTIONS = {
#'Postgres': 'postgres',
#'MySQL': 'mysql',
'SQLite': 'default',
'Another': 'alt'
}
EXPLORER_DEFAULT_CONNECTION = 'default'
ROOT_URLCONF = 'explorer.tests.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.static',
'django.template.context_processors.request',
],
'debug': DEBUG
},
},
]
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'explorer',
'djcelery'
)
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
)
TEST_RUNNER = 'djcelery.contrib.test_runner.CeleryTestSuiteRunner'
djcelery.setup_loader()
CELERY_ALWAYS_EAGER = True
BROKER_BACKEND = 'memory'
# Explorer-specific
EXPLORER_TRANSFORMS = (
('foo', '<a href="{0}">{0}</a>'),
('bar', 'x: {0}')
)
EXPLORER_USER_QUERY_VIEWS = {}
EXPLORER_TASKS_ENABLED = True
EXPLORER_S3_BUCKET = 'thisismybucket.therearemanylikeit.butthisoneismine'
| 23.265306 | 73 | 0.611842 |
7b357e7db7a643085256b512ac222a777eeb4bde | 635 | py | Python | SpiMediaGallery/manage.py | Swiss-Polar-Institute/spi-media-gallery | 2f66f938cbe1a7a25a5971d42abb1b0b5deca31e | [
"MIT"
] | 5 | 2020-02-21T20:38:50.000Z | 2022-02-19T11:00:46.000Z | SpiMediaGallery/manage.py | Swiss-Polar-Institute/spi-media-gallery | 2f66f938cbe1a7a25a5971d42abb1b0b5deca31e | [
"MIT"
] | 23 | 2019-10-01T17:13:39.000Z | 2022-01-21T20:02:26.000Z | SpiMediaGallery/manage.py | Swiss-Polar-Institute/spi-media-gallery | 2f66f938cbe1a7a25a5971d42abb1b0b5deca31e | [
"MIT"
] | 2 | 2022-02-03T08:52:51.000Z | 2022-02-03T08:58:00.000Z | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'SpiMediaGallery.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.863636 | 79 | 0.686614 |
9c3f101ac51c4e7768382eecd5cf237a331e1284 | 812 | py | Python | third_party/blink/renderer/modules/bluetooth/testing/clusterfuzz/PRESUBMIT.py | sarang-apps/darshan_browser | 173649bb8a7c656dc60784d19e7bb73e07c20daa | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 575 | 2015-06-18T23:58:20.000Z | 2022-03-23T09:32:39.000Z | third_party/blink/renderer/modules/bluetooth/testing/clusterfuzz/PRESUBMIT.py | sarang-apps/darshan_browser | 173649bb8a7c656dc60784d19e7bb73e07c20daa | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 113 | 2015-05-04T09:58:14.000Z | 2022-01-31T19:35:03.000Z | third_party/blink/renderer/modules/bluetooth/testing/clusterfuzz/PRESUBMIT.py | sarang-apps/darshan_browser | 173649bb8a7c656dc60784d19e7bb73e07c20daa | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 52 | 2015-07-14T10:40:50.000Z | 2022-03-15T01:11:49.000Z | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script that runs tests before uploading a patch."""
def _RunTests(input_api, output_api):
"""Runs all test files in the directory."""
cmd_name = 'all_python_tests'
cmd = ['python', '-m', 'unittest', 'discover', '-p', '*test.py']
test_cmd = input_api.Command(
name=cmd_name, cmd=cmd, kwargs={}, message=output_api.PresubmitError)
if input_api.verbose:
print 'Running ' + cmd_name
return input_api.RunTests([test_cmd])
def CheckChangeOnUpload(input_api, output_api):
return _RunTests(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return _RunTests(input_api, output_api)
| 33.833333 | 77 | 0.715517 |
09afe34c839e40b575ec2c61fb72b4d5e5fea6cc | 6,687 | py | Python | pyof/v0x01/asynchronous/error_msg.py | smythtech/python-openflow-legacy | f4ddb06ac8c98f074c04f027df4b52542e41c123 | [
"MIT"
] | null | null | null | pyof/v0x01/asynchronous/error_msg.py | smythtech/python-openflow-legacy | f4ddb06ac8c98f074c04f027df4b52542e41c123 | [
"MIT"
] | null | null | null | pyof/v0x01/asynchronous/error_msg.py | smythtech/python-openflow-legacy | f4ddb06ac8c98f074c04f027df4b52542e41c123 | [
"MIT"
] | null | null | null | """Defines an Error Message."""
# System imports
from enum import Enum
from pyof.foundation.base import GenericMessage
from pyof.foundation.basic_types import BinaryData, UBInt16
from pyof.foundation.exceptions import PackException
# Do not import new_message_from_header directly to avoid cyclic import.
from pyof.v0x01 import common
from pyof.v0x01.common.header import Header, Type
__all__ = ('ErrorMsg', 'ErrorType', 'BadActionCode', 'BadRequestCode',
'FlowModFailedCode', 'HelloFailedCode', 'PortModFailedCode',
'QueueOpFailedCode')
# Enums
class ErrorType(Enum):
"""Values for ’type’ in ofp_error_message.
These values are immutable: they will not change in future versions of the
protocol (although new values may be added).
"""
#: Hello protocol failed
OFPET_HELLO_FAILED = 0
#: Request was not understood
OFPET_BAD_REQUEST = 1
#: Error in action description
OFPET_BAD_ACTION = 2
#: Problem in modifying Flow entry
OFPET_FLOW_MOD_FAILED = 3
#: Problem in modifying Port entry
OFPET_PORT_MOD_FAILED = 4
#: Problem in modifying Queue entry
OFPET_QUEUE_OP_FAILED = 5
class HelloFailedCode(Enum):
"""Error_msg 'code' values for OFPET_HELLO_FAILED.
'data' contains an ASCII text string that may give failure details.
"""
#: No compatible version
OFPHFC_INCOMPATIBLE = 0
#: Permissions error
OFPHFC_EPERM = 1
class BadRequestCode(Enum):
"""Error_msg 'code' values for OFPET_BAD_REQUEST.
'data' contains at least the first 64 bytes of the failed request.
"""
#: ofp_header.version not supported.
OFPBRC_BAD_VERSION = 0
#: ofp_header.type not supported.
OFPBRC_BAD_TYPE = 1
#: ofp_stats_request.type not supported.
OFPBRC_BAD_STAT = 2
#: Vendor not supported (in ofp_vendor_header or ofp_stats_request or
#: ofp_stats_reply).
OFPBRC_BAD_VENDOR = 3
#: Vendor subtype not supported.
OFPBRC_BAD_SUBTYPE = 4
#: Permissions error.
OFPBRC_EPERM = 5
#: Wrong request length for type.
OFPBRC_BAD_LEN = 6
#: Specified buffer has already been used.
OFPBRC_BUFFER_EMPTY = 7
#: Specified buffer does not exist.
OFPBRC_BUFFER_UNKNOWN = 8
class BadActionCode(Enum):
"""Error_msg 'code' values for OFPET_BAD_ACTION.
'data' contains at least the first 64 bytes of the failed request.
"""
#: Unknown action type
OFPBAC_BAD_TYPE = 0
#: Length problem in actions
OFPBAC_BAD_LEN = 1
#: Unknown vendor id specified
OFPBAC_BAD_VENDOR = 2
#: Unknown action type for vendor id
OFPBAC_BAD_VENDOR_TYPE = 3
#: Problem validating output action
OFPBAC_BAD_OUT_PORT = 4
#: Bad action argument
OFPBAC_BAD_ARGUMENT = 5
#: Permissions error
OFPBAC_EPERM = 6
#: Can’t handle this many actions
OFPBAC_TOO_MANY = 7
#: Problem validating output queue
OFPBAC_BAD_QUEUE = 8
class FlowModFailedCode(Enum):
"""Error_msg 'code' values for OFPET_FLOW_MOD_FAILED.
'data' contains at least the first 64 bytes of the failed request.
"""
#: Flow not added because of full tables
OFPFMFC_ALL_TABLES_FULL = 0
#: Attempted to add overlapping flow with CHECK_OVERLAP flag set
OFPFMFC_OVERLAP = 1
#: Permissions error
OFPFMFC_EPERM = 2
#: Flow not added because of non-zero idle/hard timeout
OFPFMFC_BAD_EMERG_TIMEOUT = 3
#: Unknown command
OFPFMFC_BAD_COMMAND = 4
#: Unsupported action list - cannot process in the order specified
OFPFMFC_UNSUPPORTED = 5
class PortModFailedCode(Enum):
"""Error_msg 'code' values for OFPET_PORT_MOD_FAILED.
'data' contains at least the first 64 bytes of the failed request.
"""
#: Specified port does not exist
OFPPMFC_BAD_PORT = 0
#: Specified hardware address is wrong
OFPPMFC_BAD_HW_ADDR = 1
class QueueOpFailedCode(Enum):
"""Error msg 'code' values for OFPET_QUEUE_OP_FAILED.
'data' contains at least the first 64 bytes of the failed request.
"""
#: Invalid port (or port does not exist)
OFPQOFC_BAD_PORT = 0
#: Queue does not exist
OFPQOFC_BAD_QUEUE = 1
#: Permissions error
OFPQOFC_EPERM = 2
# Classes
class ErrorMsg(GenericMessage):
"""OpenFlow Error Message.
This message does not contain a body in addition to the OpenFlow Header.
"""
#: :class:`~.header.Header`: OpenFlow Header
header = Header(message_type=Type.OFPT_ERROR)
error_type = UBInt16(enum_ref=ErrorType)
code = UBInt16()
data = BinaryData()
def __init__(self, xid=None, error_type=None, code=None, data=b''):
"""Assign parameters to object attributes.
Args:
xid (int): To be included in the message header.
error_type (ErrorType): Error type.
code (Enum): Error code.
data (:func:`bytes` or packable): Its content is based on the error
type and code.
"""
super().__init__(xid)
self.error_type = error_type
self.code = code
self.data = data
def pack(self, value=None):
"""Pack the value as a binary representation.
:attr:`data` is packed before the calling :meth:`.GenericMessage.pack`.
After that, :attr:`data`'s value is restored.
Returns:
bytes: The binary representation.
"""
if value is None:
data_backup = None
if self.data is not None and not isinstance(self.data, bytes):
data_backup = self.data
self.data = self.data.pack()
packed = super().pack()
if data_backup is not None:
self.data = data_backup
return packed
elif isinstance(value, type(self)):
return value.pack()
else:
msg = "{} is not an instance of {}".format(value,
type(self).__name__)
raise PackException(msg)
def unpack(self, buff, offset=0):
"""Unpack binary data into python object."""
offset = self.header.get_size()
super().unpack(buff, offset)
self.data = self._unpack_data()
def _unpack_data(self):
if self.data == b'':
return BinaryData()
# header unpacking
header = Header()
header_size = header.get_size()
header_data = self.data.value[:header_size]
header.unpack(header_data)
# message unpacking
msg = common.utils.new_message_from_header(header)
msg_data = self.data.value[header_size:]
msg.unpack(msg_data)
return msg
| 29.328947 | 79 | 0.654853 |
6460f76185df75b783cf3cd8d4a0de594ab8a013 | 4,935 | py | Python | compose/cli/command.py | killifishd/compose | 3cb2b73d1cf68a8da93c3946729f0635eddd1f58 | [
"Apache-2.0"
] | 3 | 2020-08-12T08:19:40.000Z | 2020-08-12T08:19:43.000Z | compose/cli/command.py | Mason613/compose | 3cb2b73d1cf68a8da93c3946729f0635eddd1f58 | [
"Apache-2.0"
] | null | null | null | compose/cli/command.py | Mason613/compose | 3cb2b73d1cf68a8da93c3946729f0635eddd1f58 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from __future__ import unicode_literals
import logging
import os
import re
import six
from . import errors
from . import verbose_proxy
from .. import config
from .. import parallel
from ..config.environment import Environment
from ..const import API_VERSIONS
from ..project import Project
from .docker_client import docker_client
from .docker_client import get_tls_version
from .docker_client import tls_config_from_options
from .utils import get_version_info
log = logging.getLogger(__name__)
def project_from_options(project_dir, options):
environment = Environment.from_env_file(project_dir)
set_parallel_limit(environment)
host = options.get('--host')
if host is not None:
host = host.lstrip('=')
return get_project(
project_dir,
get_config_path_from_options(project_dir, options, environment),
project_name=options.get('--project-name'),
verbose=options.get('--verbose'),
host=host,
tls_config=tls_config_from_options(options, environment),
environment=environment,
override_dir=options.get('--project-directory'),
compatibility=options.get('--compatibility'),
)
def set_parallel_limit(environment):
parallel_limit = environment.get('COMPOSE_PARALLEL_LIMIT')
if parallel_limit:
try:
parallel_limit = int(parallel_limit)
except ValueError:
raise errors.UserError(
'COMPOSE_PARALLEL_LIMIT must be an integer (found: "{}")'.format(
environment.get('COMPOSE_PARALLEL_LIMIT')
)
)
if parallel_limit <= 1:
raise errors.UserError('COMPOSE_PARALLEL_LIMIT can not be less than 2')
parallel.GlobalLimit.set_global_limit(parallel_limit)
def get_config_from_options(base_dir, options):
environment = Environment.from_env_file(base_dir)
config_path = get_config_path_from_options(
base_dir, options, environment
)
return config.load(
config.find(base_dir, config_path, environment),
options.get('--compatibility')
)
def get_config_path_from_options(base_dir, options, environment):
def unicode_paths(paths):
return [p.decode('utf-8') if isinstance(p, six.binary_type) else p for p in paths]
file_option = options.get('--file')
if file_option:
return unicode_paths(file_option)
config_files = environment.get('COMPOSE_FILE')
if config_files:
pathsep = environment.get('COMPOSE_PATH_SEPARATOR', os.pathsep)
return unicode_paths(config_files.split(pathsep))
return None
def get_client(environment, verbose=False, version=None, tls_config=None, host=None,
tls_version=None):
client = docker_client(
version=version, tls_config=tls_config, host=host,
environment=environment, tls_version=get_tls_version(environment)
)
if verbose:
version_info = six.iteritems(client.version())
log.info(get_version_info('full'))
log.info("Docker base_url: %s", client.base_url)
log.info("Docker version: %s",
", ".join("%s=%s" % item for item in version_info))
return verbose_proxy.VerboseProxy('docker', client)
return client
def get_project(project_dir, config_path=None, project_name=None, verbose=False,
host=None, tls_config=None, environment=None, override_dir=None,
compatibility=False):
if not environment:
environment = Environment.from_env_file(project_dir)
config_details = config.find(project_dir, config_path, environment, override_dir)
project_name = get_project_name(
config_details.working_dir, project_name, environment
)
config_data = config.load(config_details, compatibility)
api_version = environment.get(
'COMPOSE_API_VERSION',
API_VERSIONS[config_data.version])
client = get_client(
verbose=verbose, version=api_version, tls_config=tls_config,
host=host, environment=environment
)
with errors.handle_connection_errors(client):
return Project.from_config(
project_name, config_data, client, environment.get('DOCKER_DEFAULT_PLATFORM')
)
def get_project_name(working_dir, project_name=None, environment=None):
def normalize_name(name):
return re.sub(r'[^-_a-z0-9]', '', name.lower())
if not environment:
environment = Environment.from_env_file(working_dir)
project_name = project_name or environment.get('COMPOSE_PROJECT_NAME')
if project_name:
return normalize_name(project_name)
# project = os.path.basename(os.path.abspath(working_dir))
# if project:
# return normalize_name(project)
# I don't like using folder names here, I think it's better to use empty or both dates
# todo .
return ''
# return 'default'
| 33.344595 | 90 | 0.696657 |
64e9a1000750940f4ba03052761b5a0173986268 | 1,754 | py | Python | album/models.py | ndonyemark/marksgallery | 4aad6ee9d6348723b41a7011add44543071f670c | [
"MIT"
] | null | null | null | album/models.py | ndonyemark/marksgallery | 4aad6ee9d6348723b41a7011add44543071f670c | [
"MIT"
] | 8 | 2021-03-19T03:59:05.000Z | 2022-03-12T00:31:37.000Z | album/models.py | ndonyemark/marksgallery | 4aad6ee9d6348723b41a7011add44543071f670c | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
class Location(models.Model):
image_location = models.CharField(max_length = 30)
def save_location(self):
self.save()
def delete_location(self):
self.delete()
class Category(models.Model):
category = models.CharField(max_length = 30)
def save_category(self):
self.save()
def delete_category(self):
self.delete()
class Image(models.Model):
image_name = models.CharField(max_length = 30)
image_description = models.CharField(max_length = 30)
image_location = models.ForeignKey(Location, on_delete=models.CASCADE)
image_category = models.ManyToManyField(Category)
date_posted = models.DateTimeField(auto_now_add=True)
image_url = models.ImageField(upload_to = 'snaps/', default = "image url")
class Meta:
ordering = ['date_posted']
def save_image(self):
self.save()
def delete_image(self):
self.delete()
@classmethod
def get_single_image(cls, image_id):
single_image = cls.objects.get(id = image_id)
return single_image
@classmethod
def get_all_images(cls):
images = cls.objects.all()
return images
@classmethod
def filter_by_location(cls, location):
image_location = cls.objects.filter(image_location__icontains = location)
return location
@classmethod
def search_by_category(cls, search_term):
search_results = Image.objects.filter(image_name__icontains = search_term)
return search_results
# @classmethod
# def update_image_url(cls, image_id):
# update_image_url = cls.objects.filter(id = image_id).update(image_url)
| 27.40625 | 82 | 0.672178 |
c9d7733f9828a2b31eb0df2a5684b087ed96a88d | 1,112 | py | Python | supervised/gradDescent/gradientDescent.py | robotenique/mlAlgorithms | 612d7fb2dcf4430528f63d35d6f1d10a33fee1af | [
"Unlicense"
] | 4 | 2017-08-16T21:37:31.000Z | 2022-02-10T05:37:39.000Z | supervised/gradDescent/gradientDescent.py | robotenique/mlAlgorithms | 612d7fb2dcf4430528f63d35d6f1d10a33fee1af | [
"Unlicense"
] | null | null | null | supervised/gradDescent/gradientDescent.py | robotenique/mlAlgorithms | 612d7fb2dcf4430528f63d35d6f1d10a33fee1af | [
"Unlicense"
] | null | null | null | from computeCost import computeCost
import numpy as np
def gradientDescent(X, y, theta, alpha, num_iters):
"""
Performs gradient descent to learn theta
theta = gradientDescent(x, y, theta, alpha, num_iters) updates theta by
taking num_iters gradient steps with learning rate alpha
"""
# Initialize some useful values
J_history = []
m = y.size # number of training examples
for i in range(num_iters):
hx = hxClosure(theta) # Create a closure with the current theta Value
#Update each theta component simultaneously
for i in range(theta.size):
theta[i] -= alpha*partial_derivative(hx, X, y, i)
J_history.append(computeCost(X, y, theta))
return theta, J_history
def partial_derivative(hx, X, Y, ith):
'''
Returns the partial_derivative in respect to the
ith theta specified;
'''
temp = 0
for i in range(Y.size):
temp += (hx(X[i]) - Y[i])*X[i][ith]
temp /= Y.size
return temp
def hxClosure(thetaVector):
def hx(Xi):
return thetaVector.dot(Xi)
return hx
| 27.8 | 78 | 0.639388 |
979661980d246731af47beedc12caf6fe17e0597 | 4,474 | py | Python | openpyxl/tests/test_vba.py | zhangyu836/openpyxl | c2735a2a0fd81cf78082008bd4bee0fc84a3b130 | [
"MIT"
] | 12 | 2019-08-07T16:48:21.000Z | 2021-12-13T02:47:22.000Z | openpyxl/tests/test_vba.py | zhangyu836/openpyxl | c2735a2a0fd81cf78082008bd4bee0fc84a3b130 | [
"MIT"
] | 19 | 2019-12-29T05:07:36.000Z | 2021-04-22T18:09:49.000Z | openpyxl/tests/test_vba.py | zhangyu836/openpyxl | c2735a2a0fd81cf78082008bd4bee0fc84a3b130 | [
"MIT"
] | 1 | 2020-05-26T20:33:10.000Z | 2020-05-26T20:33:10.000Z | from __future__ import absolute_import
# Copyright (c) 2010-2019 openpyxl
# Python stdlib imports
from io import BytesIO
import zipfile
# package imports
from openpyxl.reader.excel import load_workbook
from openpyxl.writer.excel import save_virtual_workbook
from openpyxl.xml.functions import fromstring
from openpyxl.xml.constants import CONTYPES_NS
def test_content_types(datadir):
datadir.join('reader').chdir()
fname = 'vba+comments.xlsm'
wb = load_workbook(fname, keep_vba=True)
buf = save_virtual_workbook(wb)
ct = fromstring(zipfile.ZipFile(BytesIO(buf), 'r').open('[Content_Types].xml').read())
s = set()
for el in ct.findall("{%s}Override" % CONTYPES_NS):
pn = el.get('PartName')
assert pn not in s, 'duplicate PartName in [Content_Types].xml'
s.add(pn)
def test_save_with_vba(datadir):
datadir.join('reader').chdir()
fname = 'vba-test.xlsm'
wb = load_workbook(fname, keep_vba=True)
buf = save_virtual_workbook(wb)
files = set(zipfile.ZipFile(BytesIO(buf), 'r').namelist())
expected = set(['xl/drawings/_rels/vmlDrawing1.vml.rels',
'xl/worksheets/_rels/sheet1.xml.rels',
'[Content_Types].xml',
'xl/drawings/vmlDrawing1.vml',
'xl/ctrlProps/ctrlProp1.xml',
'xl/vbaProject.bin',
'docProps/core.xml',
'_rels/.rels',
'xl/theme/theme1.xml',
'xl/_rels/workbook.xml.rels',
'customUI/customUI.xml',
'xl/styles.xml',
'xl/worksheets/sheet1.xml',
'docProps/app.xml',
'xl/ctrlProps/ctrlProp2.xml',
'xl/workbook.xml',
'xl/activeX/activeX2.bin',
'xl/activeX/activeX1.bin',
'xl/media/image2.emf',
'xl/activeX/activeX1.xml',
'xl/activeX/_rels/activeX2.xml.rels',
'xl/media/image1.emf',
'xl/activeX/_rels/activeX1.xml.rels',
'xl/activeX/activeX2.xml',
])
assert files == expected
def test_save_with_saved_comments(datadir):
datadir.join('reader').chdir()
fname = 'vba-comments-saved.xlsm'
wb = load_workbook(fname, keep_vba=True)
buf = save_virtual_workbook(wb)
files = set(zipfile.ZipFile(BytesIO(buf), 'r').namelist())
expected = set([
'xl/styles.xml',
'docProps/core.xml',
'xl/_rels/workbook.xml.rels',
'xl/drawings/vmlDrawing1.vml',
'xl/comments/comment1.xml',
'docProps/app.xml',
'[Content_Types].xml',
'xl/worksheets/sheet1.xml',
'xl/worksheets/_rels/sheet1.xml.rels',
'_rels/.rels',
'xl/workbook.xml',
'xl/theme/theme1.xml'
])
assert files == expected
def test_save_without_vba(datadir):
datadir.join('reader').chdir()
fname = 'vba-test.xlsm'
vbFiles = set(['xl/activeX/activeX2.xml',
'xl/drawings/_rels/vmlDrawing1.vml.rels',
'xl/activeX/_rels/activeX1.xml.rels',
'xl/drawings/vmlDrawing1.vml',
'xl/activeX/activeX1.bin',
'xl/media/image1.emf',
'xl/vbaProject.bin',
'xl/activeX/_rels/activeX2.xml.rels',
'xl/worksheets/_rels/sheet1.xml.rels',
'customUI/customUI.xml',
'xl/media/image2.emf',
'xl/ctrlProps/ctrlProp1.xml',
'xl/activeX/activeX2.bin',
'xl/activeX/activeX1.xml',
'xl/ctrlProps/ctrlProp2.xml',
'xl/drawings/drawing1.xml'
])
wb = load_workbook(fname, keep_vba=False)
buf = save_virtual_workbook(wb)
files1 = set(zipfile.ZipFile(fname, 'r').namelist())
files1.discard('xl/sharedStrings.xml')
files2 = set(zipfile.ZipFile(BytesIO(buf), 'r').namelist())
difference = files1.difference(files2)
assert difference.issubset(vbFiles), "Missing files: %s" % ', '.join(difference - vbFiles)
def test_save_same_file(tmpdir, datadir):
fname = 'vba-test.xlsm'
p1 = datadir.join('reader').join(fname)
p2 = tmpdir.join(fname)
p1.copy(p2)
tmpdir.chdir()
wb = load_workbook(fname, keep_vba=True)
wb.save(fname)
| 36.975207 | 94 | 0.571077 |
a6861caae3879c7b10dcf89b31a191819eb929dc | 2,277 | py | Python | src/endpoints/pypi_check/crud.py | devsetgo/pip-checker | ba7cbe115459224ad9581f80901434e12a21b3bf | [
"MIT"
] | 1 | 2020-11-16T01:31:26.000Z | 2020-11-16T01:31:26.000Z | src/endpoints/pypi_check/crud.py | devsetgo/devtools | ba7cbe115459224ad9581f80901434e12a21b3bf | [
"MIT"
] | 158 | 2020-05-30T01:26:33.000Z | 2021-04-18T00:48:36.000Z | src/endpoints/pypi_check/crud.py | devsetgo/devtools | ba7cbe115459224ad9581f80901434e12a21b3bf | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import random
import uuid
from datetime import datetime
from datetime import timedelta
from loguru import logger
import settings
from com_lib import crud_ops
from com_lib.db_setup import libraries
from com_lib.db_setup import requirements
def get_date():
if settings.DEMO_DATA_CREATE == "True":
negative_days = random.randint(1, 400)
result = datetime.today() - timedelta(days=negative_days)
else:
result = datetime.now()
return result
async def store_in_data(store_values: dict):
query = requirements.insert()
await crud_ops.execute_one_db(query=query, values=store_values)
rgi = store_values["request_group_id"]
logger.info(f"Created {rgi}")
# return request_group_id
async def store_lib_request(json_data: dict, request_group_id: str):
now = get_date()
query = libraries.insert()
values = {
"id": str(uuid.uuid4()),
"request_group_id": request_group_id,
"library": json_data["library"],
"currentVersion": json_data["currentVersion"],
"newVersion": json_data["newVersion"],
"dated_created": now,
}
await crud_ops.execute_one_db(query=query, values=values)
logger.info(f"created request_group_id: {request_group_id}")
async def store_lib_data(request_group_id: str, json_data: dict):
bulk_data: list = []
for j in json_data:
lib_update: dict = {
"id": str(uuid.uuid4()),
"request_group_id": request_group_id,
"library": j["library"],
"currentVersion": j["currentVersion"],
"newVersion": j["newVersion"],
"dated_created": datetime.now(),
}
bulk_data.append(lib_update)
query = libraries.insert()
values = bulk_data
await crud_ops.execute_many_db(query=query, values=values)
logger.info(f"created request_group_id: {request_group_id}")
return request_group_id
async def get_request_group_id(request_group_id: str):
query = requirements.select().where(
requirements.c.request_group_id == request_group_id
)
result = await crud_ops.fetch_one_db(query=query)
logger.debug(str(result))
logger.info(f"returning results for {request_group_id}")
return result
| 28.822785 | 68 | 0.681159 |
af5a8dce69c07909b93a8e04ca2f042138c7df34 | 809 | py | Python | scripts/predict.py | YounessDataV/batch9_dyslexia | ec40ac0b79e7269ddc543e55ddc7a241ca313524 | [
"MIT"
] | null | null | null | scripts/predict.py | YounessDataV/batch9_dyslexia | ec40ac0b79e7269ddc543e55ddc7a241ca313524 | [
"MIT"
] | null | null | null | scripts/predict.py | YounessDataV/batch9_dyslexia | ec40ac0b79e7269ddc543e55ddc7a241ca313524 | [
"MIT"
] | null | null | null | from dyslexia import preprocessing
from dyslexia.io import load_image
from dyslexia.ocr import extract_text_from_image
import pathlib
import cv2
from PIL import Image
import numpy as np
in_path = pathlib.Path("../data/images/")
image_paths = in_path.glob("Sample_0.jpeg")
out_path = pathlib.Path("../data/hypothesis_preprocessing/")
out_path.mkdir(exist_ok=True)
for image_path in image_paths:
image_orig = load_image(image_path)
image_no_shadow = preprocessing.remove_shadow(image_orig)
image_gray = preprocessing.image_to_gray(image_no_shadow, threshold=True)
image_gray = image_gray.max() - image_gray
cv2.imwrite(image_path.name, image_gray)
result = extract_text_from_image(image_gray)
with open(out_path / f"{image_path.stem}.txt", "w") as f:
f.write(result)
| 27.896552 | 77 | 0.767614 |
8454f7a4ed10ee1147aedb73240fb8d0a045bbf7 | 1,373 | py | Python | enterprise_manage/urls.py | x315904752/enterprise_manage | ac679d8e7221080dadfdbe6d8ac6ece0b19ed1f1 | [
"MIT"
] | null | null | null | enterprise_manage/urls.py | x315904752/enterprise_manage | ac679d8e7221080dadfdbe6d8ac6ece0b19ed1f1 | [
"MIT"
] | 2 | 2020-06-06T01:09:13.000Z | 2021-06-10T22:26:59.000Z | enterprise_manage/urls.py | x315904752/enterprise_manage | ac679d8e7221080dadfdbe6d8ac6ece0b19ed1f1 | [
"MIT"
] | null | null | null | """enterprise_manage URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
import xadmin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
from rest_framework.routers import DefaultRouter
from rest_framework_jwt.views import ObtainJSONWebToken
from enterprise_manage.apps.user_center import urls as user_center_urls
from enterprise_manage.apps.score_center import urls as score_center_urls
router = DefaultRouter()
urlpatterns = [
path('xadmin/', xadmin.site.urls),
path('user-login/', ObtainJSONWebToken.as_view()),
path(r'', include(router.urls)),
path(r'user/', include(user_center_urls)),
path(r'score/', include(score_center_urls))
]
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| 36.131579 | 78 | 0.753824 |
6e67dc0ac189cb4b807263e62bd16df01d990e5d | 10,491 | py | Python | Src/StdLib/Lib/test/test_base64.py | cwensley/ironpython2 | f854444e1e08afc8850cb7c1a739a7dd2d10d32a | [
"Apache-2.0"
] | 2,209 | 2016-11-20T10:32:58.000Z | 2022-03-31T20:51:27.000Z | Src/StdLib/Lib/test/test_base64.py | cwensley/ironpython2 | f854444e1e08afc8850cb7c1a739a7dd2d10d32a | [
"Apache-2.0"
] | 1,074 | 2016-12-07T05:02:48.000Z | 2022-03-22T02:09:11.000Z | Src/StdLib/Lib/test/test_base64.py | cwensley/ironpython2 | f854444e1e08afc8850cb7c1a739a7dd2d10d32a | [
"Apache-2.0"
] | 269 | 2017-05-21T04:44:47.000Z | 2022-03-31T16:18:13.000Z | import unittest
from test import test_support
import base64
class LegacyBase64TestCase(unittest.TestCase):
def test_encodestring(self):
eq = self.assertEqual
eq(base64.encodestring("www.python.org"), "d3d3LnB5dGhvbi5vcmc=\n")
eq(base64.encodestring("a"), "YQ==\n")
eq(base64.encodestring("ab"), "YWI=\n")
eq(base64.encodestring("abc"), "YWJj\n")
eq(base64.encodestring(""), "")
eq(base64.encodestring("abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789!@#0^&*();:<>,. []{}"),
"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT"
"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==\n")
# Non-bytes
eq(base64.encodestring(bytearray('abc')), 'YWJj\n')
def test_decodestring(self):
eq = self.assertEqual
eq(base64.decodestring("d3d3LnB5dGhvbi5vcmc=\n"), "www.python.org")
eq(base64.decodestring("YQ==\n"), "a")
eq(base64.decodestring("YWI=\n"), "ab")
eq(base64.decodestring("YWJj\n"), "abc")
eq(base64.decodestring("YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT"
"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==\n"),
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789!@#0^&*();:<>,. []{}")
eq(base64.decodestring(''), '')
# Non-bytes
eq(base64.decodestring(bytearray("YWJj\n")), "abc")
def test_encode(self):
eq = self.assertEqual
from cStringIO import StringIO
infp = StringIO('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789!@#0^&*();:<>,. []{}')
outfp = StringIO()
base64.encode(infp, outfp)
eq(outfp.getvalue(),
'YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE'
'RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT'
'Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==\n')
def test_decode(self):
from cStringIO import StringIO
infp = StringIO('d3d3LnB5dGhvbi5vcmc=')
outfp = StringIO()
base64.decode(infp, outfp)
self.assertEqual(outfp.getvalue(), 'www.python.org')
class BaseXYTestCase(unittest.TestCase):
def test_b64encode(self):
eq = self.assertEqual
# Test default alphabet
eq(base64.b64encode("www.python.org"), "d3d3LnB5dGhvbi5vcmc=")
eq(base64.b64encode('\x00'), 'AA==')
eq(base64.b64encode("a"), "YQ==")
eq(base64.b64encode("ab"), "YWI=")
eq(base64.b64encode("abc"), "YWJj")
eq(base64.b64encode(""), "")
eq(base64.b64encode("abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789!@#0^&*();:<>,. []{}"),
"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0NT"
"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==")
# Test with arbitrary alternative characters
eq(base64.b64encode('\xd3V\xbeo\xf7\x1d', altchars='*$'), '01a*b$cd')
# Non-bytes
eq(base64.b64encode(bytearray('abcd')), 'YWJjZA==')
self.assertRaises(TypeError, base64.b64encode,
'\xd3V\xbeo\xf7\x1d', altchars=bytearray('*$'))
# Test standard alphabet
eq(base64.standard_b64encode("www.python.org"), "d3d3LnB5dGhvbi5vcmc=")
eq(base64.standard_b64encode("a"), "YQ==")
eq(base64.standard_b64encode("ab"), "YWI=")
eq(base64.standard_b64encode("abc"), "YWJj")
eq(base64.standard_b64encode(""), "")
eq(base64.standard_b64encode("abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789!@#0^&*();:<>,. []{}"),
"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0NT"
"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==")
# Non-bytes
eq(base64.standard_b64encode(bytearray('abcd')), 'YWJjZA==')
# Test with 'URL safe' alternative characters
eq(base64.urlsafe_b64encode('\xd3V\xbeo\xf7\x1d'), '01a-b_cd')
# Non-bytes
eq(base64.urlsafe_b64encode(bytearray('\xd3V\xbeo\xf7\x1d')), '01a-b_cd')
def test_b64decode(self):
eq = self.assertEqual
eq(base64.b64decode("d3d3LnB5dGhvbi5vcmc="), "www.python.org")
eq(base64.b64decode('AA=='), '\x00')
eq(base64.b64decode("YQ=="), "a")
eq(base64.b64decode("YWI="), "ab")
eq(base64.b64decode("YWJj"), "abc")
eq(base64.b64decode("YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT"
"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ=="),
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789!@#0^&*();:<>,. []{}")
eq(base64.b64decode(''), '')
# Test with arbitrary alternative characters
eq(base64.b64decode('01a*b$cd', altchars='*$'), '\xd3V\xbeo\xf7\x1d')
# Non-bytes
eq(base64.b64decode(bytearray("YWJj")), "abc")
# Test standard alphabet
eq(base64.standard_b64decode("d3d3LnB5dGhvbi5vcmc="), "www.python.org")
eq(base64.standard_b64decode("YQ=="), "a")
eq(base64.standard_b64decode("YWI="), "ab")
eq(base64.standard_b64decode("YWJj"), "abc")
eq(base64.standard_b64decode(""), "")
eq(base64.standard_b64decode("YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0NT"
"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ=="),
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789!@#0^&*();:<>,. []{}")
# Non-bytes
eq(base64.standard_b64decode(bytearray("YWJj")), "abc")
# Test with 'URL safe' alternative characters
eq(base64.urlsafe_b64decode('01a-b_cd'), '\xd3V\xbeo\xf7\x1d')
# Non-bytes
eq(base64.urlsafe_b64decode(bytearray('01a-b_cd')), '\xd3V\xbeo\xf7\x1d')
def test_b64decode_padding_error(self):
self.assertRaises(TypeError, base64.b64decode, 'abc')
def test_b64decode_invalid_chars(self):
# issue 1466065: Test some invalid characters.
tests = ((b'%3d==', b'\xdd'),
(b'$3d==', b'\xdd'),
(b'[==', b''),
(b'YW]3=', b'am'),
(b'3{d==', b'\xdd'),
(b'3d}==', b'\xdd'),
(b'@@', b''),
(b'!', b''),
(b'YWJj\nYWI=', b'abcab'))
for bstr, res in tests:
self.assertEqual(base64.b64decode(bstr), res)
self.assertEqual(base64.standard_b64decode(bstr), res)
self.assertEqual(base64.urlsafe_b64decode(bstr), res)
# Normal alphabet characters not discarded when alternative given
res = b'\xFB\xEF\xBE\xFF\xFF\xFF'
self.assertEqual(base64.b64decode(b'++[[//]]', b'[]'), res)
self.assertEqual(base64.urlsafe_b64decode(b'++--//__'), res)
def test_b32encode(self):
eq = self.assertEqual
eq(base64.b32encode(''), '')
eq(base64.b32encode('\x00'), 'AA======')
eq(base64.b32encode('a'), 'ME======')
eq(base64.b32encode('ab'), 'MFRA====')
eq(base64.b32encode('abc'), 'MFRGG===')
eq(base64.b32encode('abcd'), 'MFRGGZA=')
eq(base64.b32encode('abcde'), 'MFRGGZDF')
# Non-bytes
eq(base64.b32encode(bytearray('abcd')), 'MFRGGZA=')
def test_b32decode(self):
eq = self.assertEqual
eq(base64.b32decode(''), '')
eq(base64.b32decode('AA======'), '\x00')
eq(base64.b32decode('ME======'), 'a')
eq(base64.b32decode('MFRA===='), 'ab')
eq(base64.b32decode('MFRGG==='), 'abc')
eq(base64.b32decode('MFRGGZA='), 'abcd')
eq(base64.b32decode('MFRGGZDF'), 'abcde')
# Non-bytes
self.assertRaises(TypeError, base64.b32decode, bytearray('MFRGG==='))
def test_b32decode_casefold(self):
eq = self.assertEqual
eq(base64.b32decode('', True), '')
eq(base64.b32decode('ME======', True), 'a')
eq(base64.b32decode('MFRA====', True), 'ab')
eq(base64.b32decode('MFRGG===', True), 'abc')
eq(base64.b32decode('MFRGGZA=', True), 'abcd')
eq(base64.b32decode('MFRGGZDF', True), 'abcde')
# Lower cases
eq(base64.b32decode('me======', True), 'a')
eq(base64.b32decode('mfra====', True), 'ab')
eq(base64.b32decode('mfrgg===', True), 'abc')
eq(base64.b32decode('mfrggza=', True), 'abcd')
eq(base64.b32decode('mfrggzdf', True), 'abcde')
# Expected exceptions
self.assertRaises(TypeError, base64.b32decode, 'me======')
# Mapping zero and one
eq(base64.b32decode('MLO23456'), 'b\xdd\xad\xf3\xbe')
eq(base64.b32decode('M1023456', map01='L'), 'b\xdd\xad\xf3\xbe')
eq(base64.b32decode('M1023456', map01='I'), 'b\x1d\xad\xf3\xbe')
def test_b32decode_error(self):
self.assertRaises(TypeError, base64.b32decode, 'abc')
self.assertRaises(TypeError, base64.b32decode, 'ABCDEF==')
def test_b16encode(self):
eq = self.assertEqual
eq(base64.b16encode('\x01\x02\xab\xcd\xef'), '0102ABCDEF')
eq(base64.b16encode('\x00'), '00')
# Non-bytes
eq(base64.b16encode(bytearray('\x01\x02\xab\xcd\xef')), '0102ABCDEF')
def test_b16decode(self):
eq = self.assertEqual
eq(base64.b16decode('0102ABCDEF'), '\x01\x02\xab\xcd\xef')
eq(base64.b16decode('00'), '\x00')
# Lower case is not allowed without a flag
self.assertRaises(TypeError, base64.b16decode, '0102abcdef')
# Case fold
eq(base64.b16decode('0102abcdef', True), '\x01\x02\xab\xcd\xef')
# Non-bytes
eq(base64.b16decode(bytearray("0102ABCDEF")), '\x01\x02\xab\xcd\xef')
# Non-alphabet characters
self.assertRaises(TypeError, base64.b16decode, '0102AG')
# Incorrect "padding"
self.assertRaises(TypeError, base64.b16decode, '010')
def test_main():
test_support.run_unittest(__name__)
if __name__ == '__main__':
test_main()
| 43.35124 | 81 | 0.580402 |
23e08f2fbb12873775643ce07b11ae089402036a | 4,488 | py | Python | temboo/core/Library/Basecamp/UpdateList.py | jordanemedlock/psychtruths | 52e09033ade9608bd5143129f8a1bfac22d634dd | [
"Apache-2.0"
] | 7 | 2016-03-07T02:07:21.000Z | 2022-01-21T02:22:41.000Z | temboo/core/Library/Basecamp/UpdateList.py | jordanemedlock/psychtruths | 52e09033ade9608bd5143129f8a1bfac22d634dd | [
"Apache-2.0"
] | null | null | null | temboo/core/Library/Basecamp/UpdateList.py | jordanemedlock/psychtruths | 52e09033ade9608bd5143129f8a1bfac22d634dd | [
"Apache-2.0"
] | 8 | 2016-06-14T06:01:11.000Z | 2020-04-22T09:21:44.000Z | # -*- coding: utf-8 -*-
###############################################################################
#
# UpdateList
# Updates a specified To-do list record
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class UpdateList(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the UpdateList Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(UpdateList, self).__init__(temboo_session, '/Library/Basecamp/UpdateList')
def new_input_set(self):
return UpdateListInputSet()
def _make_result_set(self, result, path):
return UpdateListResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return UpdateListChoreographyExecution(session, exec_id, path)
class UpdateListInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the UpdateList
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccountName(self, value):
"""
Set the value of the AccountName input for this Choreo. ((required, string) A valid Basecamp account name. This is the first part of the account's URL.)
"""
super(UpdateListInputSet, self)._set_input('AccountName', value)
def set_Description(self, value):
"""
Set the value of the Description input for this Choreo. ((optional, string) The new description for the list.)
"""
super(UpdateListInputSet, self)._set_input('Description', value)
def set_ListID(self, value):
"""
Set the value of the ListID input for this Choreo. ((required, integer) The ID for the list to update.)
"""
super(UpdateListInputSet, self)._set_input('ListID', value)
def set_MilestoneID(self, value):
"""
Set the value of the MilestoneID input for this Choreo. ((optional, integer) The ID of an existing milestone to add to the To-Do list.)
"""
super(UpdateListInputSet, self)._set_input('MilestoneID', value)
def set_Name(self, value):
"""
Set the value of the Name input for this Choreo. ((optional, string) The new name for the list.)
"""
super(UpdateListInputSet, self)._set_input('Name', value)
def set_Password(self, value):
"""
Set the value of the Password input for this Choreo. ((required, password) The Basecamp account password. Use the value 'X' when specifying an API Key for the Username input.)
"""
super(UpdateListInputSet, self)._set_input('Password', value)
def set_Username(self, value):
"""
Set the value of the Username input for this Choreo. ((required, string) A Basecamp account username or API Key.)
"""
super(UpdateListInputSet, self)._set_input('Username', value)
class UpdateListResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the UpdateList Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (No response is returned from Basecamp for update requests.)
"""
return self._output.get('Response', None)
class UpdateListChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return UpdateListResultSet(response, path)
| 39.716814 | 183 | 0.672237 |
32a48034a36c68b9d274638e22b7d9d50af7541f | 3,786 | py | Python | src/stringify/stringify.py | NSBum/anki_asset_manager | 055d8b6c7226ab0b4db4f4f279fae89dcd88da2a | [
"MIT"
] | null | null | null | src/stringify/stringify.py | NSBum/anki_asset_manager | 055d8b6c7226ab0b4db4f4f279fae89dcd88da2a | [
"MIT"
] | null | null | null | src/stringify/stringify.py | NSBum/anki_asset_manager | 055d8b6c7226ab0b4db4f4f279fae89dcd88da2a | [
"MIT"
] | null | null | null | from hashlib import sha1
from typing import Optional, Union, Literal, Tuple
from ..config_types import (
ScriptSetting,
ConcreteScript,
ScriptInsertion,
ScriptPosition,
Fmt,
)
from ..lib.registrar import get_interface
from ..utils import version
from .condition_parser import get_condition_parser
from .groupify import groupify_script_data
from .indent import indent_lines
from .script_data import stringify_sd, merge_sd
from .package import package, package_for_external
def encapsulate_scripts(scripts, version, indent_size) -> str:
pretext = '<div id="anki-am" data-name="Assets by ASSET MANAGER"'
version_text = f' data-version="{version}"' if len(version) > 0 else ""
top_delim = f"{pretext}{version_text}>"
bottom_delim = "</div>"
indented_scripts = [indent_lines(scc, indent_size) for scc in scripts]
combined = [
item
for sublist in [[top_delim], indented_scripts, [bottom_delim]]
for item in sublist
]
return "\n".join(combined)
def gen_data_attributes(name: str, version: str):
return f'data-name="{name}" data-version="{version}"'
# skip this script
def position_does_not_match(script, position: str) -> bool:
return script.position != position and not (
script.position in ["into_template", "external"]
and position in ["question", "answer"]
)
def get_script(
script, model_name, cardtype_name, position
) -> ConcreteScript:
return script if isinstance(script, ConcreteScript) else get_interface(script.tag).getter(
script.id,
script.storage,
)
def get_code(
script, model_name, cardtype_name, position
) -> str:
return script.code if isinstance(script, ConcreteScript) else get_interface(script.tag).generator(
script.id,
script.storage,
model_name,
cardtype_name,
position,
)
def stringify_setting(
setting: ScriptSetting,
model_name: str,
model_id: int,
cardtype_name: str,
position: ScriptInsertion,
) -> str:
the_parser = get_condition_parser(cardtype_name, position)
script_data = []
if not setting.enabled or setting.insert_stub:
return script_data
for scr in setting.scripts:
script = get_script(
scr,
model_name,
cardtype_name,
position,
)
if (
not script.enabled
or position_does_not_match(script, position)
):
continue
needs_inject, conditions_simplified = the_parser(script.conditions)
if not needs_inject:
continue
tag = gen_data_attributes(
script.name,
script.version,
)
code = get_code(
scr,
model_name,
cardtype_name,
position,
)
if script.position == "external":
filename = f"_am_{model_id}_{sha1(script.name.encode()).hexdigest()}.js"
script_data.append(
package_for_external(
tag, script.type, filename, code, conditions_simplified
)
)
else:
script_data.append(
package(tag, script.type, script.label, code, conditions_simplified)
)
stringified = []
groups = groupify_script_data(script_data)
in_html = position in ["question", "answer"]
for key, group in groups:
if len(key) == 0:
stringified.extend(
[stringify_sd(sd, setting.indent_size, in_html) for sd in group]
)
else:
stringified.append(
stringify_sd(merge_sd(key, list(group)), setting.indent_size, in_html)
)
return stringified
| 26.851064 | 102 | 0.624142 |
1f45d44474152737af08071b9321b60e9422e513 | 7,538 | py | Python | tests/server/unit/services/test_mapping_service.py | Guillon88/tasking-manager | 8f05eef7cb008c680a82dc9884ab7a83a12ab1f8 | [
"BSD-2-Clause"
] | 1 | 2021-08-02T15:32:31.000Z | 2021-08-02T15:32:31.000Z | tests/server/unit/services/test_mapping_service.py | Guillon88/tasking-manager | 8f05eef7cb008c680a82dc9884ab7a83a12ab1f8 | [
"BSD-2-Clause"
] | null | null | null | tests/server/unit/services/test_mapping_service.py | Guillon88/tasking-manager | 8f05eef7cb008c680a82dc9884ab7a83a12ab1f8 | [
"BSD-2-Clause"
] | null | null | null | import unittest
from server.services.mapping_service import (
MappingService,
Task,
MappingServiceError,
TaskStatus,
ProjectService,
NotFound,
StatsService,
MappingNotAllowed,
UserLicenseError,
)
from server.models.dtos.mapping_dto import MappedTaskDTO, LockTaskDTO
from server.models.postgis.task import TaskHistory, TaskAction, User
from unittest.mock import patch, MagicMock
from server import create_app
class TestMappingService(unittest.TestCase):
task_stub = Task
lock_task_dto = LockTaskDTO
mapped_task_dto = MappedTaskDTO
mapping_service = None
def setUp(self):
self.app = create_app()
self.ctx = self.app.app_context()
self.ctx.push()
test_user = User()
test_user.id = 123456
test_user.username = "Thinkwhere"
self.task_stub = Task()
self.task_stub.id = 1
self.task_stub.project_id = 1
self.task_stub.task_status = 0
self.task_stub.locked_by = 123456
self.task_stub.lock_holder = test_user
self.lock_task_dto = LockTaskDTO()
self.lock_task_dto.user_id = 123456
self.mapped_task_dto = MappedTaskDTO()
self.mapped_task_dto.status = TaskStatus.MAPPED.name
self.mapped_task_dto.user_id = 123456
def tearDown(self):
self.ctx.pop()
@patch.object(Task, "get")
def test_get_task_raises_error_if_task_not_found(self, mock_task):
mock_task.return_value = None
with self.assertRaises(NotFound):
MappingService.get_task(12, 12)
@patch.object(MappingService, "get_task")
def test_lock_task_for_mapping_raises_error_if_task_in_invalid_state(
self, mock_task
):
# Arrange
self.task_stub.task_status = TaskStatus.MAPPED.value
mock_task.return_value = self.task_stub
# Act / Assert
with self.assertRaises(MappingServiceError):
MappingService.lock_task_for_mapping(self.lock_task_dto)
@patch.object(ProjectService, "is_user_permitted_to_map")
@patch.object(MappingService, "get_task")
def test_lock_task_for_mapping_raises_error_if_user_already_has_locked_task(
self, mock_task, mock_project
):
# Arrange
mock_task.return_value = self.task_stub
mock_project.return_value = (
False,
MappingNotAllowed.USER_ALREADY_HAS_TASK_LOCKED,
)
# Act / Assert
with self.assertRaises(MappingServiceError):
MappingService.lock_task_for_mapping(self.lock_task_dto)
@patch.object(ProjectService, "is_user_permitted_to_map")
@patch.object(MappingService, "get_task")
def test_lock_task_for_mapping_raises_error_if_user_has_not_accepted_license(
self, mock_task, mock_project
):
# Arrange
mock_task.return_value = self.task_stub
mock_project.return_value = False, MappingNotAllowed.USER_NOT_ACCEPTED_LICENSE
# Act / Assert
with self.assertRaises(UserLicenseError):
MappingService.lock_task_for_mapping(self.lock_task_dto)
@patch.object(MappingService, "get_task")
def test_unlock_of_not_locked_for_mapping_raises_error(self, mock_task):
# Arrange
mock_task.return_value = self.task_stub
# Act / Assert
with self.assertRaises(MappingServiceError):
MappingService.unlock_task_after_mapping(MagicMock())
@patch.object(MappingService, "get_task")
def test_cant_unlock_a_task_you_dont_own(self, mock_task):
# Arrange
self.task_stub.task_status = TaskStatus.LOCKED_FOR_MAPPING.value
self.task_stub.locked_by = 12
mock_task.return_value = self.task_stub
# Act / Assert
with self.assertRaises(MappingServiceError):
MappingService.unlock_task_after_mapping(self.mapped_task_dto)
@patch.object(MappingService, "get_task")
def test_if_new_state_not_acceptable_raise_error(self, mock_task):
# Arrange
self.task_stub.task_status = TaskStatus.LOCKED_FOR_MAPPING.value
mock_task.return_value = self.task_stub
self.mapped_task_dto.status = TaskStatus.LOCKED_FOR_VALIDATION.name
# Act / Assert
with self.assertRaises(MappingServiceError):
MappingService.unlock_task_after_mapping(self.mapped_task_dto)
@patch.object(Task, "get_per_task_instructions")
@patch.object(StatsService, "update_stats_after_task_state_change")
@patch.object(Task, "update")
@patch.object(TaskHistory, "get_last_status")
@patch.object(TaskHistory, "update_task_locked_with_duration")
@patch.object(MappingService, "get_task")
def test_unlock_with_comment_sets_history(
self,
mock_task,
mock_history,
mock_update,
mock_stats,
mock_instructions,
mock_state,
):
# Arrange
self.task_stub.task_status = TaskStatus.LOCKED_FOR_MAPPING.value
self.mapped_task_dto.comment = "Test comment"
mock_task.return_value = self.task_stub
mock_state.return_value = TaskStatus.LOCKED_FOR_MAPPING
# Act
test_task = MappingService.unlock_task_after_mapping(self.mapped_task_dto)
# Assert
self.assertEqual(TaskAction.COMMENT.name, test_task.task_history[-1].action)
self.assertEqual(test_task.task_history[-1].action_text, "Test comment")
@patch.object(Task, "get_per_task_instructions")
@patch.object(StatsService, "update_stats_after_task_state_change")
@patch.object(Task, "update")
@patch.object(TaskHistory, "get_last_status")
@patch.object(TaskHistory, "update_task_locked_with_duration")
@patch.object(MappingService, "get_task")
def test_unlock_with_status_change_sets_history(
self,
mock_task,
mock_history,
mock_update,
mock_stats,
mock_instructions,
mock_state,
):
# Arrange
self.task_stub.task_status = TaskStatus.LOCKED_FOR_MAPPING.value
mock_task.return_value = self.task_stub
mock_state.return_value = TaskStatus.LOCKED_FOR_MAPPING
# Act
test_task = MappingService.unlock_task_after_mapping(self.mapped_task_dto)
# Assert
self.assertEqual(TaskAction.STATE_CHANGE.name, test_task.task_history[0].action)
self.assertEqual(test_task.task_history[0].action_text, TaskStatus.MAPPED.name)
self.assertEqual(TaskStatus.MAPPED.name, test_task.task_status)
@patch.object(TaskHistory, "get_last_action")
def test_task_is_undoable_if_last_change_made_by_you(self, last_action):
# Arrange
task_history = TaskHistory(1, 1, 1)
task_history.user_id = 1
last_action.return_value = task_history
task = Task()
task.task_status = TaskStatus.MAPPED.value
task.mapped_by = 1
# Act
is_undoable = MappingService._is_task_undoable(1, task)
# Assert
self.assertTrue(is_undoable)
@patch.object(TaskHistory, "get_last_action")
def test_task_is_not_undoable_if_last_change_not_made_by_you(self, last_action):
# Arrange
task_history = TaskHistory(1, 1, 1)
task_history.user_id = 2
last_action.return_value = task_history
task = Task()
task.task_status = TaskStatus.MAPPED.value
task.mapped_by = 1
# Act
is_undoable = MappingService._is_task_undoable(1, task)
# Assert
self.assertFalse(is_undoable)
| 34.263636 | 88 | 0.698594 |
c9a02753a9f6074ca09ffb07002890609ca8a595 | 3,006 | py | Python | hid2sticklog.py | vodka-bears/sticklog2heatmap | c7e329b973574f89ea898500f2871112e07bcf6c | [
"Apache-2.0"
] | 2 | 2022-02-01T22:54:36.000Z | 2022-02-02T17:22:56.000Z | hid2sticklog.py | vodka-bears/sticklog2heatmap | c7e329b973574f89ea898500f2871112e07bcf6c | [
"Apache-2.0"
] | null | null | null | hid2sticklog.py | vodka-bears/sticklog2heatmap | c7e329b973574f89ea898500f2871112e07bcf6c | [
"Apache-2.0"
] | null | null | null | import hid
import datetime
import csv
import signal
from time import sleep
from sys import exit, argv
filename = ''
ERSKY_USB = 0
OPENTX_USB = 1
BF_USB = 2
ELRS_BLE = 3
def to_signed(inp):
if inp & 0x80:
return inp - 256
return inp
def main():
signal.signal(signal.SIGINT, sigint)
devs = [d for d in hid.enumerate() if d['usage_page'] == 5 or d['usage'] in (4,5,8)]
if not devs:
print('No game devices seem to be connected')
exit()
elif len(devs) == 1:
j_vid = devs[0]['vendor_id']
j_pid = devs[0]['product_id']
manufacturer = devs[0]['manufacturer_string']
else:
for index, jdev in enumerate(devs):
print(f"{index+1}) 0x{jdev['vendor_id']:04x}:0x{jdev['product_id']:04x} {jdev['manufacturer_string']} {jdev['product_string']}")
num = int(input(f'Which device to use (1-{len(devs)}): ')) - 1
j_vid = devs[num]['vendor_id']
j_pid = devs[num]['product_id']
manufacturer = devs[num]['manufacturer_string']
filename = argv[0] + '-' + datetime.datetime.now().strftime('%Y-%m-%d-%H%M%S') + '.csv'
print('Writing OpenTX style stick log to ' + filename)
print('Press Crtl+C to stop')
with hid.Device(j_vid, j_pid) as jstick, open(filename, 'w', newline='') as csvfile:
csvwriter = csv.DictWriter(csvfile, fieldnames = ['Date', 'Time', 'Rud', 'Ele', 'Thr', 'Ail'])
csvwriter.writeheader()
style = -1
if manufacturer == 'ERSKY':
style = ERSKY_USB
elif manufacturer == 'OpenTX':
style = OPENTX_USB
elif manufacturer == 'Betaflight':
style = BF_USB
elif manufacturer == 'ELRS':
style = ELRS_BLE
else:
assert false, f"Unrecognised device datastream style, manufacturer = \"{manufacturer}\""
while True:
if style == ERSKY_USB:
rawdata = jstick.read(10)
ail = to_signed(rawdata[1]) * 8
ele = to_signed(rawdata[2]) * 8
thr = to_signed(rawdata[3]) * 8
rud = to_signed(rawdata[4]) * 8
elif style == OPENTX_USB:
rawdata = jstick.read(20)
ail = rawdata[3] + rawdata[4] * 256 - 1024
ele = rawdata[5] + rawdata[6] * 256 - 1024
thr = rawdata[7] + rawdata[8] * 256 - 1024
rud = rawdata[9] + rawdata[10] * 256 - 1024
elif style == BF_USB:
rawdata = jstick.read(10)
ail = to_signed(rawdata[0]) << 3
ele = -to_signed(rawdata[1]) << 3
thr = to_signed(rawdata[5]) << 3
rud = to_signed(rawdata[3]) << 3
elif style == ELRS_BLE:
rawdata = jstick.read(18)
ail = ((to_signed(rawdata[2]) << 8) + rawdata[1]) >> 5
ele = ((to_signed(rawdata[4]) << 8) + rawdata[3]) >> 5
thr = ((to_signed(rawdata[6]) << 8) + rawdata[5]) >> 5
rud = ((to_signed(rawdata[8]) << 8) + rawdata[7]) >> 5
cur_datetime = datetime.datetime.now()
date_str = cur_datetime.strftime('%Y-%m-%d')
time_str = cur_datetime.strftime('%H:%M:%S.%f')[:-3]
csvwriter.writerow({'Date' : date_str, 'Time' : time_str, 'Ail' : ail, 'Ele' : ele, 'Thr' : thr, 'Rud' : rud})
sleep(0.1)
def sigint(sig, frame):
print("Written log: " + frame.f_locals['filename'])
exit()
if __name__ == "__main__":
main() | 32.673913 | 131 | 0.630406 |
483fb71b5a78f920b7faf3ea7e6be563bb54da87 | 3,468 | py | Python | multiproc.py | terryyizhong/Fastpitch | 4b4eb0206b9727b9daf36628578943f8915cb5ad | [
"BSD-3-Clause"
] | null | null | null | multiproc.py | terryyizhong/Fastpitch | 4b4eb0206b9727b9daf36628578943f8915cb5ad | [
"BSD-3-Clause"
] | null | null | null | multiproc.py | terryyizhong/Fastpitch | 4b4eb0206b9727b9daf36628578943f8915cb5ad | [
"BSD-3-Clause"
] | null | null | null | # *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import sys
import subprocess
import torch
def main():
argslist = list(sys.argv)[1:]
world_size = torch.cuda.device_count()
if '--set-world-size' in argslist:
idx = argslist.index('--set-world-size')
world_size = int(argslist[idx + 1])
del argslist[idx + 1]
del argslist[idx]
if '--world-size' in argslist:
argslist[argslist.index('--world-size') + 1] = str(world_size)
else:
argslist.append('--world-size')
argslist.append(str(world_size))
workers = []
for i in range(world_size):
if '--rank' in argslist:
argslist[argslist.index('--rank') + 1] = str(i)
else:
argslist.append('--rank')
argslist.append(str(i))
stdout = None if i == 0 else subprocess.DEVNULL
worker = subprocess.Popen(
[str(sys.executable)] + argslist, stdout=stdout)
workers.append(worker)
returncode = 0
try:
pending = len(workers)
while pending > 0:
for worker in workers:
try:
worker_returncode = worker.wait(1)
except subprocess.TimeoutExpired:
continue
pending -= 1
if worker_returncode != 0:
if returncode != 1:
for worker in workers:
worker.terminate()
returncode = 1
except KeyboardInterrupt:
print('Pressed CTRL-C, TERMINATING')
for worker in workers:
worker.terminate()
for worker in workers:
worker.wait()
raise
sys.exit(returncode)
if __name__ == "__main__":
main()
| 37.695652 | 82 | 0.610727 |
a2d3bdb2561e9c3b4f27ee1ac083d527662f1e7f | 3,124 | py | Python | tests/test_led.py | jonasl/python-periphery | 37d2b6d10fdc0fa7779f94047e82d3bed4e79dac | [
"MIT"
] | 58 | 2020-07-23T09:56:16.000Z | 2022-03-15T23:43:26.000Z | tests/test_led.py | jonasl/python-periphery | 37d2b6d10fdc0fa7779f94047e82d3bed4e79dac | [
"MIT"
] | null | null | null | tests/test_led.py | jonasl/python-periphery | 37d2b6d10fdc0fa7779f94047e82d3bed4e79dac | [
"MIT"
] | 16 | 2020-06-09T15:57:39.000Z | 2022-03-23T05:02:47.000Z | import os
import sys
import time
import periphery
from .asserts import AssertRaises
if sys.version_info[0] == 3:
raw_input = input
led_name = None
def test_arguments():
print("Starting arguments test...")
# Invalid open types
with AssertRaises(TypeError):
periphery.LED("abc", "out")
with AssertRaises(TypeError):
periphery.LED(100, 100)
print("Arguments test passed.")
def test_open_close():
print("Starting open/close test...")
# Open non-existent LED
with AssertRaises(LookupError):
periphery.LED("invalid_led_XXX", 0)
# Open legitimate LED
led = periphery.LED(led_name, 0)
assert led.name == led_name
assert led.fd > 0
assert led.max_brightness > 0
# Set brightness to 1, check brightness
led.write(1)
time.sleep(0.01)
assert led.read() >= 1
# Set brightness to 0, check brightness
led.write(0)
time.sleep(0.01)
assert led.read() == 0
# Set brightness to 1, check brightness
led.brightness = 1
time.sleep(0.01)
assert led.brightness >= 1
# Set brightness to 0, check brightness
led.brightness = 0
time.sleep(0.01)
assert led.brightness == 0
# Set brightness to True, check brightness
led.write(True)
time.sleep(0.01)
assert led.read() == led.max_brightness
# Set brightness to False, check brightness
led.write(False)
time.sleep(0.01)
assert led.read() == 0
led.close()
print("Open/close test passed.")
def test_interactive():
print("Starting interactive test...")
led = periphery.LED(led_name, False)
raw_input("Press enter to continue...")
# Check tostring
print("LED description: {}".format(str(led)))
assert raw_input("LED description looks ok? y/n ") == "y"
# Turn LED off
led.write(False)
assert raw_input("LED is off? y/n ") == "y"
# Turn LED on
led.write(True)
assert raw_input("LED is on? y/n ") == "y"
# Turn LED off
led.write(False)
assert raw_input("LED is off? y/n ") == "y"
# Turn LED on
led.write(True)
assert raw_input("LED is on? y/n ") == "y"
led.close()
print("Interactive test passed.")
if __name__ == "__main__":
if os.environ.get("CI") == "true":
test_arguments()
sys.exit(0)
if len(sys.argv) < 2:
print("Usage: python -m tests.test_led <LED name>")
print("")
print("[1/4] Arguments test: No requirements.")
print("[2/4] Open/close test: LED should be real.")
print("[3/4] Loopback test: No test.")
print("[4/4] Interactive test: LED should be observed.")
print("")
print("Hint: for Raspberry Pi 3, disable triggers for led1:")
print(" $ echo none > /sys/class/leds/led1/trigger")
print("Observe led1 (red power LED), and run this test:")
print(" python -m tests.test_led led1")
print("")
sys.exit(1)
led_name = sys.argv[1]
print("Starting LED tests...")
test_arguments()
test_open_close()
test_interactive()
print("All LED tests passed.")
| 23.313433 | 69 | 0.612996 |
9d7d0960f01d011ae94e7053508ddde98b9b8c4f | 856 | py | Python | deep_qa-master/tests/layers/backend/repeat_test.py | RTHMaK/RPGOne | 3f3ada7db1762781668bfb2377154fdc00e17212 | [
"Apache-2.0"
] | 1 | 2017-04-11T13:03:55.000Z | 2017-04-11T13:03:55.000Z | deep_qa-master/tests/layers/backend/repeat_test.py | RTHMaK/RPGOne | 3f3ada7db1762781668bfb2377154fdc00e17212 | [
"Apache-2.0"
] | null | null | null | deep_qa-master/tests/layers/backend/repeat_test.py | RTHMaK/RPGOne | 3f3ada7db1762781668bfb2377154fdc00e17212 | [
"Apache-2.0"
] | null | null | null | # pylint: disable=no-self-use,invalid-name
import numpy
from keras.layers import Input
from keras.models import Model
from deep_qa.layers.backend.repeat import Repeat
class TestRepeatLayer:
def test_call_works_on_simple_input(self):
batch_size = 2
input_length = 3
repetitions = 4
input_layer = Input(shape=(input_length,), dtype='float32')
repeat_output = Repeat(axis=1, repetitions=repetitions)(input_layer)
model = Model(inputs=[input_layer], outputs=[repeat_output])
input_tensor = numpy.asarray([[2, 5, 3], [-1, -4, -2]])
repeat_tensor = model.predict([input_tensor])
assert repeat_tensor.shape == (batch_size, repetitions, input_length)
for i in range(repetitions):
numpy.testing.assert_almost_equal(repeat_tensor[:, i, :], [[2, 5, 3], [-1, -4, -2]])
| 38.909091 | 96 | 0.675234 |
5ea3e8a1347c0a9a29b085f97dbabd5e7575facd | 837 | py | Python | libapp/init/loggerd.py | octopi-labs/bazzinga | fcdd8e925cbaa31cacbec10dd1d599e72b84c588 | [
"MIT"
] | null | null | null | libapp/init/loggerd.py | octopi-labs/bazzinga | fcdd8e925cbaa31cacbec10dd1d599e72b84c588 | [
"MIT"
] | 3 | 2021-03-20T00:43:11.000Z | 2022-01-06T22:33:23.000Z | libapp/init/loggerd.py | octopi-labs/bazzinga | fcdd8e925cbaa31cacbec10dd1d599e72b84c588 | [
"MIT"
] | null | null | null | __author__ = 'rahul'
import logging
import os
from logging.handlers import TimedRotatingFileHandler
from libapp.config import libconf
def init_app(app):
if not os.path.exists(libconf.LOG_FILE_PATH):
os.makedirs(libconf.LOG_FILE_PATH)
logfile = os.path.join(libconf.LOG_FILE_PATH, libconf.LOG_FILE_NAME.format(log=app.import_name))
formatter = logging.Formatter(libconf.LOG_FORMATTER)
handler = TimedRotatingFileHandler(logfile, when=libconf.LOG_ROTATION_WHEN, backupCount=libconf.LOG_BACKUP_COUNT,
utc=libconf.LOG_UTC_STATUS)
handler.setLevel(logging.WARNING)
handler.setFormatter(formatter)
# werkzeug log messages
log = logging.getLogger('werkzeug')
log.setLevel(logging.WARNING)
log.addHandler(handler)
app.logger.addHandler(handler)
| 28.862069 | 117 | 0.738351 |
8cb5c8d74b16568c355f542b18e4bf2b4aeba229 | 263 | py | Python | boa3_test/test_sc/interop_test/oracle/OracleRequestFilterMismatchedType.py | hal0x2328/neo3-boa | 6825a3533384cb01660773050719402a9703065b | [
"Apache-2.0"
] | 25 | 2020-07-22T19:37:43.000Z | 2022-03-08T03:23:55.000Z | boa3_test/test_sc/interop_test/oracle/OracleRequestFilterMismatchedType.py | hal0x2328/neo3-boa | 6825a3533384cb01660773050719402a9703065b | [
"Apache-2.0"
] | 419 | 2020-04-23T17:48:14.000Z | 2022-03-31T13:17:45.000Z | boa3_test/test_sc/interop_test/oracle/OracleRequestFilterMismatchedType.py | hal0x2328/neo3-boa | 6825a3533384cb01660773050719402a9703065b | [
"Apache-2.0"
] | 15 | 2020-05-21T21:54:24.000Z | 2021-11-18T06:17:24.000Z | from typing import Any
from boa3.builtin import public
from boa3.builtin.interop.oracle import Oracle
@public
def oracle_call(url: str, callback: str, user_data: Any, gas_for_response: int):
Oracle.request(url, 1234, callback, user_data, gas_for_response)
| 26.3 | 80 | 0.787072 |
011da87584f92403a982030b24a34b9d37920dec | 442 | py | Python | opconsole/views/dashboardView.py | baalkor/timetracking | 35a1650ceffa55e0ff7ef73b63e5f3457dc07612 | [
"Apache-2.0"
] | 1 | 2017-06-05T10:52:13.000Z | 2017-06-05T10:52:13.000Z | opconsole/views/dashboardView.py | baalkor/timetracking | 35a1650ceffa55e0ff7ef73b63e5f3457dc07612 | [
"Apache-2.0"
] | 2 | 2017-05-10T20:47:33.000Z | 2017-05-10T20:49:24.000Z | opconsole/views/dashboardView.py | baalkor/timetracking | 35a1650ceffa55e0ff7ef73b63e5f3457dc07612 | [
"Apache-2.0"
] | null | null | null | from django.urls import reverse_lazy
from django.views.generic import TemplateView
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.shortcuts import render
class DashboardView(PermissionRequiredMixin, TemplateView):
template_name = "opconsole_dashboard.html"
permission_required = 'opconsole.add_employes'
def get(self, request, *args, **kwargs):
return render(request, self.template_name)
| 27.625 | 62 | 0.794118 |
c63a34bdd82640f0e2d074a1240beb8647e84e5e | 22,935 | py | Python | lib/tool_shed/test/functional/test_1420_tool_dependency_environment_inheritance.py | lesperry/Metagenomics | a1d8b7d96b32ab83cebe513e889b6ef82f7c1dd6 | [
"CC-BY-3.0"
] | null | null | null | lib/tool_shed/test/functional/test_1420_tool_dependency_environment_inheritance.py | lesperry/Metagenomics | a1d8b7d96b32ab83cebe513e889b6ef82f7c1dd6 | [
"CC-BY-3.0"
] | 2 | 2020-08-19T18:14:59.000Z | 2020-08-20T01:19:12.000Z | lib/tool_shed/test/functional/test_1420_tool_dependency_environment_inheritance.py | lesperry/Metagenomics | a1d8b7d96b32ab83cebe513e889b6ef82f7c1dd6 | [
"CC-BY-3.0"
] | null | null | null | import logging
from ..base.twilltestcase import common, ShedTwillTestCase
log = logging.getLogger(__name__)
category_name = 'Test 1420 Tool dependency environment variable inheritance'
category_description = 'Test script 1420 for interpolation of inherited environment variables.'
package_atlas_repository_name = 'package_atlas_3_10_1420'
package_bzlib_repository_name = 'package_bzlib_1_0_1420'
package_boost_repository_name = 'package_boost_1_53_1420'
package_numpy_repository_name = 'package_numpy_1_7_1420'
package_rdkit_repository_name = 'package_rdkit_2012_12_1420'
package_lapack_repository_name = 'package_lapack_3_4_1420'
package_atlas_repository_description = 'Automatically Tuned Linear Algebra Software'
package_bzlib_repository_description = 'Contains a tool dependency definition that downloads and compiles version 1.0 of the bzlib library.'
package_boost_repository_description = 'Contains a tool dependency definition that downloads and compiles version 1.53 of the boost C++ libraries'
package_numpy_repository_description = 'Contains a tool dependency definition that downloads and compiles version 1.7 of the the python numpy package'
package_rdkit_repository_description = 'Contains a tool dependency definition that downloads and compiles version 2012-12 of the RDKit cheminformatics and machine-learning package.'
package_lapack_repository_description = 'Linear Algebra PACKage'
package_atlas_repository_long_description = '{}: {}'.format(package_atlas_repository_name, package_atlas_repository_description)
package_bzlib_repository_long_description = '{}: {}'.format(package_bzlib_repository_name, package_bzlib_repository_description)
package_boost_repository_long_description = '{}: {}'.format(package_boost_repository_name, package_boost_repository_description)
package_numpy_repository_long_description = '{}: {}'.format(package_numpy_repository_name, package_numpy_repository_description)
package_rdkit_repository_long_description = '{}: {}'.format(package_rdkit_repository_name, package_rdkit_repository_description)
package_lapack_repository_long_description = '{}: {}'.format(package_lapack_repository_name, package_lapack_repository_description)
'''
1. Create repository package_lapack_3_4_1420
2. Create repository package_atlas_3_10_1420
3. Create repository package_bzlib_1_0_1420
4. Create repository package_boost_1_53_1420
5. Create repository package_numpy_1_7_1420
6. Create repository package_rdkit_2012_12_1420
Repository dependency structure should be as follows:
Repository package_rdkit_2012_12_1420
Repository package_boost_1_53_1420 (prior install required)
Repository package_bzlib_1_0_1420 (prior install required)
Repository package_numpy_1_7_1420 (prior install required)
Repository package_lapack_3_4_1420 (prior install required)
Repository package_atlas_3_10_1420 (prior install required)
8. Install package_rdkit_2012_12 into Galaxy.
9. Verify that the env.sh file for package_rdkit_2012_12_1420 also defines the variables inherited from package_numpy_1_7_1420 and package_boost_1_53_1420.
'''
class TestEnvironmentInheritance(ShedTwillTestCase):
'''Test referencing environment variables that were defined in a separate tool dependency.'''
def test_0000_initiate_users_and_category(self):
"""Create necessary user accounts and login as an admin user."""
self.login(email=common.admin_email, username=common.admin_username)
admin_user = self.test_db_util.get_user(common.admin_email)
assert admin_user is not None, 'Problem retrieving user with email %s from the database' % common.admin_email
self.test_db_util.get_private_role(admin_user)
self.create_category(name=category_name, description=category_description)
self.login(email=common.test_user_2_email, username=common.test_user_2_name)
test_user_2 = self.test_db_util.get_user(common.test_user_2_email)
assert test_user_2 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_2_email
self.test_db_util.get_private_role(test_user_2)
self.login(email=common.test_user_1_email, username=common.test_user_1_name)
test_user_1 = self.test_db_util.get_user(common.test_user_1_email)
assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_1_email
self.test_db_util.get_private_role(test_user_1)
def test_0005_create_lapack_repository(self):
'''Create and populate package_lapack_3_4_1420.'''
'''
This is step 1 - Create repository package_lapack_3_4_1420.
All tool dependency definitions should download and extract a tarball containing precompiled binaries from the local
filesystem and install them into the path specified by $INSTALL_DIR.
'''
category = self.test_db_util.get_category_by_name(category_name)
repository = self.get_or_create_repository(name=package_lapack_repository_name,
description=package_lapack_repository_description,
long_description=package_lapack_repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id(category.id),
strings_displayed=[])
# Load the original tool dependency definition into memory, then fill in the __PATH__ placeholder with the
# actual system path where the binary tarball is found.
tool_dependency_path = self.generate_temp_path('1420_tool_dependency', additional_paths=['package_lapack_3_4_1420'])
precompiled_binary_tarball = self.get_filename('1420_files/binary_tarballs/lapack.tar')
edited_tool_dependency_filename = self.get_filename(filepath=tool_dependency_path, filename='tool_dependencies.xml')
original_tool_dependency = self.get_filename('1420_files/package_lapack_3_4_1420/tool_dependencies.xml')
tool_dependency_definition = open(original_tool_dependency).read().replace('__PATH__', precompiled_binary_tarball)
open(edited_tool_dependency_filename, 'w').write(tool_dependency_definition)
# Upload the edited tool dependency definition to the package_lapack_3_4_1420 repository.
self.upload_file(repository,
filename='tool_dependencies.xml',
filepath=tool_dependency_path,
valid_tools_only=True,
uncompress_file=False,
remove_repo_files_not_in_tar=False,
commit_message='Populate package_lapack_3_4_1420 with tool dependency definitions.',
strings_displayed=[],
strings_not_displayed=[])
def test_0010_create_atlas_repository(self):
'''Create and populate package_atlas_3_10_1420.'''
'''
This is step 1 - Create repository package_atlas_3_10_1420.
All tool dependency definitions should download and extract a tarball containing precompiled binaries from the local
filesystem and install them into the path specified by $INSTALL_DIR.
'''
category = self.test_db_util.get_category_by_name(category_name)
repository = self.get_or_create_repository(name=package_atlas_repository_name,
description=package_atlas_repository_description,
long_description=package_atlas_repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id(category.id),
strings_displayed=[])
# Load the original tool dependency definition into memory, then fill in the __PATH__ placeholder with the
# actual system path where the binary tarball is found.
tool_dependency_path = self.generate_temp_path('1420_tool_dependency', additional_paths=['package_atlas_3_10_1420'])
precompiled_binary_tarball = self.get_filename('1420_files/binary_tarballs/atlas.tar')
edited_tool_dependency_filename = self.get_filename(filepath=tool_dependency_path, filename='tool_dependencies.xml')
original_tool_dependency = self.get_filename('1420_files/package_atlas_3_10_1420/tool_dependencies.xml')
tool_dependency_definition = open(original_tool_dependency).read().replace('__PATH__', precompiled_binary_tarball)
open(edited_tool_dependency_filename, 'w').write(tool_dependency_definition)
# Upload the edited tool dependency definition to the package_atlas_3_10_1420 repository.
self.upload_file(repository,
filename='tool_dependencies.xml',
filepath=tool_dependency_path,
valid_tools_only=True,
uncompress_file=False,
remove_repo_files_not_in_tar=False,
commit_message='Populate package_atlas_3_10_1420 with tool dependency definitions.',
strings_displayed=[],
strings_not_displayed=[])
def test_0015_create_bzlib_repository(self):
'''Create and populate package_bzlib_1_0_1420.'''
'''
This is step 1 - Create repository package_bzlib_1_0_1420.
All tool dependency definitions should download and extract a tarball containing precompiled binaries from the local
filesystem and install them into the path specified by $INSTALL_DIR.
'''
category = self.test_db_util.get_category_by_name(category_name)
repository = self.get_or_create_repository(name=package_bzlib_repository_name,
description=package_bzlib_repository_description,
long_description=package_bzlib_repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id(category.id),
strings_displayed=[])
# Load the original tool dependency definition into memory, then fill in the __PATH__ placeholder with the
# actual system path where the binary tarball is found.
tool_dependency_path = self.generate_temp_path('1420_tool_dependency', additional_paths=['package_bzlib_1_0_1420'])
precompiled_binary_tarball = self.get_filename('1420_files/binary_tarballs/bzlib.tar')
edited_tool_dependency_filename = self.get_filename(filepath=tool_dependency_path, filename='tool_dependencies.xml')
original_tool_dependency = self.get_filename('1420_files/package_bzlib_1_0_1420/tool_dependencies.xml')
tool_dependency_definition = open(original_tool_dependency).read().replace('__PATH__', precompiled_binary_tarball)
open(edited_tool_dependency_filename, 'w').write(tool_dependency_definition)
# Upload the edited tool dependency definition to the package_bzlib_1_0_1420 repository.
self.upload_file(repository,
filename='tool_dependencies.xml',
filepath=tool_dependency_path,
valid_tools_only=True,
uncompress_file=False,
remove_repo_files_not_in_tar=False,
commit_message='Populate package_bzlib_1_0_1420 with tool dependency definitions.',
strings_displayed=[],
strings_not_displayed=[])
def test_0020_create_boost_repository(self):
'''Create and populate package_boost_1_53_1420.'''
'''
This is step 1 - Create repository package_boost_1_53_1420.
All tool dependency definitions should download and extract a tarball containing precompiled binaries from the local
filesystem and install them into the path specified by $INSTALL_DIR.
'''
category = self.test_db_util.get_category_by_name(category_name)
repository = self.get_or_create_repository(name=package_boost_repository_name,
description=package_boost_repository_description,
long_description=package_boost_repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id(category.id),
strings_displayed=[])
# Load the original tool dependency definition into memory, then fill in the __PATH__ placeholder with the
# actual system path where the binary tarball is found.
tool_dependency_path = self.generate_temp_path('1420_tool_dependency', additional_paths=['package_boost_1_53_1420'])
precompiled_binary_tarball = self.get_filename('1420_files/binary_tarballs/boost.tar')
edited_tool_dependency_filename = self.get_filename(filepath=tool_dependency_path, filename='tool_dependencies.xml')
original_tool_dependency = self.get_filename('1420_files/package_boost_1_53_1420/tool_dependencies.xml')
tool_dependency_definition = open(original_tool_dependency).read().replace('__PATH__', precompiled_binary_tarball)
open(edited_tool_dependency_filename, 'w').write(tool_dependency_definition)
# Upload the edited tool dependency definition to the package_boost_1_53_1420 repository.
self.upload_file(repository,
filename='tool_dependencies.xml',
filepath=tool_dependency_path,
valid_tools_only=True,
uncompress_file=False,
remove_repo_files_not_in_tar=False,
commit_message='Populate package_boost_1_53_1420 with tool dependency definitions.',
strings_displayed=[],
strings_not_displayed=[])
def test_0025_create_numpy_repository(self):
'''Create and populate package_numpy_1_7_1420.'''
'''
This is step 1 - Create repository package_numpy_1_7_1420.
All tool dependency definitions should download and extract a tarball containing precompiled binaries from the local
filesystem and install them into the path specified by $INSTALL_DIR.
'''
category = self.test_db_util.get_category_by_name(category_name)
repository = self.get_or_create_repository(name=package_numpy_repository_name,
description=package_numpy_repository_description,
long_description=package_numpy_repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id(category.id),
strings_displayed=[])
# Load the original tool dependency definition into memory, then fill in the __PATH__ placeholder with the
# actual system path where the binary tarball is found.
tool_dependency_path = self.generate_temp_path('1420_tool_dependency', additional_paths=['package_numpy_1_7_1420'])
precompiled_binary_tarball = self.get_filename('1420_files/binary_tarballs/numpy.tar')
edited_tool_dependency_filename = self.get_filename(filepath=tool_dependency_path, filename='tool_dependencies.xml')
original_tool_dependency = self.get_filename('1420_files/package_numpy_1_7_1420/tool_dependencies.xml')
tool_dependency_definition = open(original_tool_dependency).read().replace('__PATH__', precompiled_binary_tarball)
open(edited_tool_dependency_filename, 'w').write(tool_dependency_definition)
# Upload the edited tool dependency definition to the package_numpy_1_7_1420 repository.
self.upload_file(repository,
filename='tool_dependencies.xml',
filepath=tool_dependency_path,
valid_tools_only=True,
uncompress_file=False,
remove_repo_files_not_in_tar=False,
commit_message='Populate package_numpy_1_7_1420 with tool dependency definitions.',
strings_displayed=[],
strings_not_displayed=[])
def test_0030_create_rdkit_repository(self):
'''Create and populate package_rdkit_2012_12_1420.'''
'''
This is step 1 - Create repository package_rdkit_2012_12_1420.
All tool dependency definitions should download and extract a tarball containing precompiled binaries from the local
filesystem and install them into the path specified by $INSTALL_DIR.
'''
category = self.test_db_util.get_category_by_name(category_name)
repository = self.get_or_create_repository(name=package_rdkit_repository_name,
description=package_rdkit_repository_description,
long_description=package_rdkit_repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id(category.id),
strings_displayed=[])
# Load the original tool dependency definition into memory, then fill in the __PATH__ placeholder with the
# actual system path where the binary tarball is found.
tool_dependency_path = self.generate_temp_path('1420_tool_dependency', additional_paths=['package_rdkit_2012_12_1420'])
precompiled_binary_tarball = self.get_filename('1420_files/binary_tarballs/rdkit.tar')
edited_tool_dependency_filename = self.get_filename(filepath=tool_dependency_path, filename='tool_dependencies.xml')
original_tool_dependency = self.get_filename('1420_files/package_rdkit_2012_12_1420/tool_dependencies.xml')
tool_dependency_definition = open(original_tool_dependency).read().replace('__PATH__', precompiled_binary_tarball)
open(edited_tool_dependency_filename, 'w').write(tool_dependency_definition)
# Upload the edited tool dependency definition to the package_rdkit_2012_12_1420 repository.
self.upload_file(repository,
filename='tool_dependencies.xml',
filepath=tool_dependency_path,
valid_tools_only=True,
uncompress_file=False,
remove_repo_files_not_in_tar=False,
commit_message='Populate package_rdkit_2012_12_1420 with tool dependency definitions.',
strings_displayed=[],
strings_not_displayed=[])
def test_0035_install_rdkit_2012_12_repository(self):
'''Install the package_rdkit_2012_12_1420 repository into Galaxy.'''
'''
This is step 4 - Install package_rdkit_2012_12_1420 into Galaxy.
Install package_rdkit_2012_12_1420 with tool dependencies selected to be installed. The result of this should be
package_atlas_3_10_1420, package_bzlib_1_0_1420, package_boost_1_53_1420, package_numpy_1_7_1420, package_rdkit_2012_12_1420,
and package_lapack_3_4_1420 being installed, and an env.sh generated for package_rdkit_2012_12_1420 that
contains environment variables defined in package_boost_1_53_1420 and package_numpy_1_7_1420.
'''
self.galaxy_login(email=common.admin_email, username=common.admin_username)
post_submit_strings_displayed = ['package_rdkit_2012_12_1420', 'package_atlas_3_10_1420', 'package_bzlib_1_0_1420',
'package_numpy_1_7_1420', 'package_lapack_3_4_1420', 'package_boost_1_53_1420']
self.install_repository('package_rdkit_2012_12_1420',
common.test_user_1_name,
category_name,
install_tool_dependencies=True,
post_submit_strings_displayed=post_submit_strings_displayed)
def test_0040_verify_env_sh_contents(self):
'''Check the env.sh file for the appropriate contents.'''
'''
This is step 5 - Verify that the env.sh file for package_rdkit_2012_12_1420 also defines the variables inherited from package_numpy_1_7_1420
and package_boost_1_53_1420. Test for the numpy and boost tool dependency paths.
'''
package_rdkit_repository = self.test_db_util.get_installed_repository_by_name_owner('package_rdkit_2012_12_1420', common.test_user_1_name)
package_numpy_repository = self.test_db_util.get_installed_repository_by_name_owner('package_numpy_1_7_1420', common.test_user_1_name)
package_boost_repository = self.test_db_util.get_installed_repository_by_name_owner('package_boost_1_53_1420', common.test_user_1_name)
rdkit_env_sh = self.get_env_sh_path(tool_dependency_name='rdkit',
tool_dependency_version='2012_12_1',
repository=package_rdkit_repository)
numpy_tool_dependency_path = self.get_tool_dependency_path(tool_dependency_name='numpy',
tool_dependency_version='1.7.1',
repository=package_numpy_repository)
boost_tool_dependency_path = self.get_tool_dependency_path(tool_dependency_name='boost',
tool_dependency_version='1.53.0',
repository=package_boost_repository)
rdkit_env_file_contents = open(rdkit_env_sh).read()
if numpy_tool_dependency_path not in rdkit_env_file_contents or boost_tool_dependency_path not in rdkit_env_file_contents:
message = 'Environment file for package_rdkit_2012_12_1420 does not contain expected path.'
message += '\nExpected:\n{}\n{}\nContents:\n{}'.format(numpy_tool_dependency_path, boost_tool_dependency_path, rdkit_env_file_contents)
raise AssertionError(message)
| 71.671875 | 181 | 0.687944 |
c4c6824182be0b6a0ecfc885ab492e8356593b8f | 29,781 | py | Python | tests/test_z_cmdline.py | anthrotype/tox-venv | 9c5b1efbec7c2ee38b691e7a5b3f94cf51d69eb6 | [
"BSD-3-Clause"
] | null | null | null | tests/test_z_cmdline.py | anthrotype/tox-venv | 9c5b1efbec7c2ee38b691e7a5b3f94cf51d69eb6 | [
"BSD-3-Clause"
] | null | null | null | tests/test_z_cmdline.py | anthrotype/tox-venv | 9c5b1efbec7c2ee38b691e7a5b3f94cf51d69eb6 | [
"BSD-3-Clause"
] | null | null | null | import os
import platform
import re
import subprocess
import sys
import py
import pytest
import tox
from tox._pytestplugin import ReportExpectMock
try:
import json
except ImportError:
import simplejson as json
pytest_plugins = "pytester"
from tox.session import Session # noqa #E402 module level import not at top of file
from tox.config import parseconfig # noqa #E402 module level import not at top of file
from tox_venv.hooks import use_builtin_venv # noqa #E402 module level import not at top of file
def test_report_protocol(newconfig):
config = newconfig([], """
[testenv:mypython]
deps=xy
""")
class Popen:
def __init__(self, *args, **kwargs):
pass
def communicate(self):
return "", ""
def wait(self):
pass
session = Session(config, popen=Popen,
Report=ReportExpectMock)
report = session.report
report.expect("using")
venv = session.getvenv("mypython")
action = session.newaction(venv, "update")
venv.update(action)
report.expect("logpopen")
def test__resolve_pkg(tmpdir, mocksession):
distshare = tmpdir.join("distshare")
spec = distshare.join("pkg123-*")
pytest.raises(tox.exception.MissingDirectory, 'mocksession._resolve_pkg(spec)')
distshare.ensure(dir=1)
pytest.raises(tox.exception.MissingDependency, 'mocksession._resolve_pkg(spec)')
distshare.ensure("pkg123-1.3.5.zip")
p = distshare.ensure("pkg123-1.4.5.zip")
mocksession.report.clear()
result = mocksession._resolve_pkg(spec)
assert result == p
mocksession.report.expect("info", "determin*pkg123*")
distshare.ensure("pkg123-1.4.7dev.zip")
mocksession._clearmocks()
result = mocksession._resolve_pkg(spec)
mocksession.report.expect("warning", "*1.4.7*")
assert result == p
mocksession._clearmocks()
distshare.ensure("pkg123-1.4.5a1.tar.gz")
result = mocksession._resolve_pkg(spec)
assert result == p
def test__resolve_pkg_doubledash(tmpdir, mocksession):
distshare = tmpdir.join("distshare")
p = distshare.ensure("pkg-mine-1.3.0.zip")
res = mocksession._resolve_pkg(distshare.join("pkg-mine*"))
assert res == p
distshare.ensure("pkg-mine-1.3.0a1.zip")
res = mocksession._resolve_pkg(distshare.join("pkg-mine*"))
assert res == p
class TestSession:
def test_make_sdist(self, initproj):
initproj("example123-0.5", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'tox.ini': '''
'''
})
config = parseconfig([])
session = Session(config)
sdist = session.get_installpkg_path()
assert sdist.check()
assert sdist.ext == ".zip"
assert sdist == config.distdir.join(sdist.basename)
sdist2 = session.get_installpkg_path()
assert sdist2 == sdist
sdist.write("hello")
assert sdist.stat().size < 10
sdist_new = Session(config).get_installpkg_path()
assert sdist_new == sdist
assert sdist_new.stat().size > 10
def test_make_sdist_distshare(self, tmpdir, initproj):
distshare = tmpdir.join("distshare")
initproj("example123-0.6", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'tox.ini': '''
[tox]
distshare=%s
''' % distshare
})
config = parseconfig([])
session = Session(config)
sdist = session.get_installpkg_path()
assert sdist.check()
assert sdist.ext == ".zip"
assert sdist == config.distdir.join(sdist.basename)
sdist_share = config.distshare.join(sdist.basename)
assert sdist_share.check()
assert sdist_share.read("rb") == sdist.read("rb"), (sdist_share, sdist)
def test_log_pcall(self, mocksession):
mocksession.config.logdir.ensure(dir=1)
assert not mocksession.config.logdir.listdir()
action = mocksession.newaction(None, "something")
action.popen(["echo", ])
match = mocksession.report.getnext("logpopen")
assert match[1].outpath.relto(mocksession.config.logdir)
assert match[1].shell is False
def test_summary_status(self, initproj, capfd):
initproj("logexample123-0.5", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'tox.ini': '''
[testenv:hello]
[testenv:world]
'''
})
config = parseconfig([])
session = Session(config)
envs = session.venvlist
assert len(envs) == 2
env1, env2 = envs
env1.status = "FAIL XYZ"
assert env1.status
env2.status = 0
assert not env2.status
session._summary()
out, err = capfd.readouterr()
exp = "%s: FAIL XYZ" % env1.envconfig.envname
assert exp in out
exp = "%s: commands succeeded" % env2.envconfig.envname
assert exp in out
def test_getvenv(self, initproj, capfd):
initproj("logexample123-0.5", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'tox.ini': '''
[testenv:hello]
[testenv:world]
'''
})
config = parseconfig([])
session = Session(config)
venv1 = session.getvenv("hello")
venv2 = session.getvenv("hello")
assert venv1 is venv2
venv1 = session.getvenv("world")
venv2 = session.getvenv("world")
assert venv1 is venv2
pytest.raises(LookupError, lambda: session.getvenv("qwe"))
# not sure we want this option ATM
def XXX_test_package(cmd, initproj):
initproj("myproj-0.6", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'MANIFEST.in': """
include doc
include myproj
""",
'tox.ini': ''
})
result = cmd("package")
assert not result.ret
assert any(re.match(r'.*created sdist package at.*', l) for l in result.outlines)
def test_minversion(cmd, initproj):
initproj("interp123-0.5", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'tox.ini': '''
[tox]
minversion = 6.0
'''
})
result = cmd("-v")
assert re.match(r'ERROR: MinVersionError: tox version is .*,'
r' required is at least 6.0', result.out)
assert result.ret
def test_notoxini_help_still_works(initproj, cmd):
initproj("example123-0.5", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
})
result = cmd("-h")
assert result.err == "ERROR: toxini file 'tox.ini' not found\n"
assert result.out.startswith('usage: ')
assert any('--help' in l for l in result.outlines)
assert not result.ret
def test_notoxini_help_ini_still_works(initproj, cmd):
initproj("example123-0.5", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
})
result = cmd("--help-ini")
assert any('setenv' in l for l in result.outlines)
assert not result.ret
def test_envdir_equals_toxini_errors_out(cmd, initproj):
initproj("interp123-0.7", filedefs={
'tox.ini': '''
[testenv]
envdir={toxinidir}
'''
})
result = cmd()
assert result.outlines[1] == "ERROR: ConfigError: envdir must not equal toxinidir"
assert re.match(r'ERROR: venv \'python\' in .* would delete project', result.outlines[0])
assert result.ret
def test_run_custom_install_command_error(cmd, initproj):
initproj("interp123-0.5", filedefs={
'tox.ini': '''
[testenv]
install_command=./tox.ini {opts} {packages}
'''
})
result = cmd()
assert re.match(r"ERROR: invocation failed \(errno \d+\), args: \['.*[/\\]tox\.ini",
result.outlines[-1])
assert result.ret
def test_unknown_interpreter_and_env(cmd, initproj):
initproj("interp123-0.5", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'tox.ini': '''
[testenv:python]
basepython=xyz_unknown_interpreter
[testenv]
changedir=tests
'''
})
result = cmd()
assert result.ret
assert any('ERROR: InterpreterNotFound: xyz_unknown_interpreter' == l for l in result.outlines)
result = cmd("-exyz")
assert result.ret
assert result.out == "ERROR: unknown environment 'xyz'\n"
def test_unknown_interpreter(cmd, initproj):
initproj("interp123-0.5", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'tox.ini': '''
[testenv:python]
basepython=xyz_unknown_interpreter
[testenv]
changedir=tests
'''
})
result = cmd()
assert result.ret
assert any('ERROR: InterpreterNotFound: xyz_unknown_interpreter' == l for l in result.outlines)
def test_skip_platform_mismatch(cmd, initproj):
initproj("interp123-0.5", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'tox.ini': '''
[testenv]
changedir=tests
platform=x123
'''
})
result = cmd()
assert not result.ret
assert any('SKIPPED: python: platform mismatch' == l for l in result.outlines)
def test_skip_unknown_interpreter(cmd, initproj):
initproj("interp123-0.5", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'tox.ini': '''
[testenv:python]
basepython=xyz_unknown_interpreter
[testenv]
changedir=tests
'''
})
result = cmd("--skip-missing-interpreters")
assert not result.ret
msg = 'SKIPPED: python: InterpreterNotFound: xyz_unknown_interpreter'
assert any(msg == l for l in result.outlines)
def test_skip_unknown_interpreter_result_json(cmd, initproj, tmpdir):
report_path = tmpdir.join("toxresult.json")
initproj("interp123-0.5", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'tox.ini': '''
[testenv:python]
basepython=xyz_unknown_interpreter
[testenv]
changedir=tests
'''
})
result = cmd("--skip-missing-interpreters", "--result-json", report_path)
assert not result.ret
msg = 'SKIPPED: python: InterpreterNotFound: xyz_unknown_interpreter'
assert any(msg == l for l in result.outlines)
setup_result_from_json = json.load(report_path)["testenvs"]["python"]["setup"]
for setup_step in setup_result_from_json:
assert "InterpreterNotFound" in setup_step["output"]
assert setup_step["retcode"] == "0"
def test_unknown_dep(cmd, initproj):
initproj("dep123-0.7", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'tox.ini': '''
[testenv]
deps=qweqwe123
changedir=tests
'''
})
result = cmd()
assert result.ret
assert result.outlines[-1].startswith('ERROR: python: could not install deps [qweqwe123];')
def test_venv_special_chars_issue252(cmd, initproj):
initproj("pkg123-0.7", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'tox.ini': '''
[tox]
envlist = special&&1
[testenv:special&&1]
changedir=tests
'''
})
result = cmd()
assert result.ret == 0
pattern = re.compile('special&&1 installed: .*pkg123==0.7.*')
assert any(pattern.match(line) for line in result.outlines)
def test_unknown_environment(cmd, initproj):
initproj("env123-0.7", filedefs={
'tox.ini': ''
})
result = cmd("-e", "qpwoei")
assert result.ret
assert result.out == "ERROR: unknown environment 'qpwoei'\n"
def test_skip_sdist(cmd, initproj):
initproj("pkg123-0.7", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'setup.py': """
syntax error
""",
'tox.ini': '''
[tox]
skipsdist=True
[testenv]
commands=python -c "print('done')"
'''
})
result = cmd()
assert result.ret == 0
def test_minimal_setup_py_empty(cmd, initproj):
initproj("pkg123-0.7", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'setup.py': """
""",
'tox.ini': ''
})
result = cmd()
assert result.ret == 1
assert result.outlines[-1] == 'ERROR: setup.py is empty'
def test_minimal_setup_py_comment_only(cmd, initproj):
initproj("pkg123-0.7", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'setup.py': """\n# some comment
""",
'tox.ini': ''
})
result = cmd()
assert result.ret == 1
assert result.outlines[-1] == 'ERROR: setup.py is empty'
def test_minimal_setup_py_non_functional(cmd, initproj):
initproj("pkg123-0.7", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'setup.py': """
import sys
""",
'tox.ini': ''
})
result = cmd()
assert result.ret == 1
assert any(re.match(r'.*ERROR.*check setup.py.*', l) for l in result.outlines)
def test_sdist_fails(cmd, initproj):
initproj("pkg123-0.7", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'setup.py': """
syntax error
""",
'tox.ini': '',
})
result = cmd()
assert result.ret
assert any(re.match(r'.*FAIL.*could not package project.*', l) for l in result.outlines)
def test_no_setup_py_exits(cmd, initproj):
initproj("pkg123-0.7", filedefs={
'tox.ini': """
[testenv]
commands=python -c "2 + 2"
"""
})
os.remove("setup.py")
result = cmd()
assert result.ret
assert any(re.match(r'.*ERROR.*No setup.py file found.*', l) for l in result.outlines)
def test_package_install_fails(cmd, initproj):
initproj("pkg123-0.7", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
'setup.py': """
from setuptools import setup
setup(
name='pkg123',
description='pkg123 project',
version='0.7',
license='MIT',
platforms=['unix', 'win32'],
packages=['pkg123',],
install_requires=['qweqwe123'],
)
""",
'tox.ini': '',
})
result = cmd()
assert result.ret
assert result.outlines[-1].startswith('ERROR: python: InvocationError for command ')
@pytest.fixture
def example123(initproj):
yield initproj("example123-0.5", filedefs={
'tests': {
'test_hello.py': """
def test_hello(pytestconfig):
pass
""",
},
'tox.ini': '''
[testenv]
changedir=tests
commands= pytest --basetemp={envtmpdir} \
--junitxml=junit-{envname}.xml
deps=pytest
'''
})
def test_toxuone_env(cmd, example123):
result = cmd()
assert not result.ret
assert re.match(r'.*generated\W+xml\W+file.*junit-python\.xml'
r'.*\W+1\W+passed.*', result.out, re.DOTALL)
result = cmd("-epython", )
assert not result.ret
assert re.match(r'.*\W+1\W+passed.*'
r'summary.*'
r'python:\W+commands\W+succeeded.*', result.out, re.DOTALL)
def test_different_config_cwd(cmd, example123, monkeypatch):
# see that things work with a different CWD
monkeypatch.chdir(example123.dirname)
result = cmd("-c", "example123/tox.ini")
assert not result.ret
assert re.match(r'.*\W+1\W+passed.*'
r'summary.*'
r'python:\W+commands\W+succeeded.*', result.out, re.DOTALL)
def test_json(cmd, example123):
# see that tests can also fail and retcode is correct
testfile = py.path.local("tests").join("test_hello.py")
assert testfile.check()
testfile.write("def test_fail(): assert 0")
jsonpath = example123.join("res.json")
result = cmd("--result-json", jsonpath)
assert result.ret == 1
data = json.load(jsonpath.open("r"))
verify_json_report_format(data)
assert re.match(r'.*\W+1\W+failed.*'
r'summary.*'
r'python:\W+commands\W+failed.*', result.out, re.DOTALL)
def test_developz(initproj, cmd):
initproj("example123", filedefs={'tox.ini': """
"""})
result = cmd("-vv", "--develop")
assert not result.ret
assert "sdist-make" not in result.out
def test_usedevelop(initproj, cmd):
initproj("example123", filedefs={'tox.ini': """
[testenv]
usedevelop=True
"""})
result = cmd("-vv")
assert not result.ret
assert "sdist-make" not in result.out
def test_usedevelop_mixed(initproj, cmd):
initproj("example123", filedefs={'tox.ini': """
[testenv:devenv]
usedevelop=True
[testenv:nondev]
usedevelop=False
"""})
# running only 'devenv' should not do sdist
result = cmd("-vv", "-e", "devenv")
assert not result.ret
assert "sdist-make" not in result.out
# running all envs should do sdist
result = cmd("-vv")
assert not result.ret
assert "sdist-make" in result.out
@pytest.mark.parametrize("src_root", [".", "src"])
def test_test_usedevelop(cmd, initproj, src_root, monkeypatch):
base = initproj("example123-0.5", src_root=src_root, filedefs={
'tests': {
'test_hello.py': """
def test_hello(pytestconfig):
pass
""",
},
'tox.ini': '''
[testenv]
usedevelop=True
changedir=tests
commands=
pytest --basetemp={envtmpdir} --junitxml=junit-{envname}.xml []
deps=pytest
'''
})
result = cmd("-v")
assert not result.ret
assert re.match(r'.*generated\W+xml\W+file.*junit-python\.xml'
r'.*\W+1\W+passed.*', result.out, re.DOTALL)
assert "sdist-make" not in result.out
result = cmd("-epython", )
assert not result.ret
assert "develop-inst-noop" in result.out
assert re.match(r'.*\W+1\W+passed.*'
r'summary.*'
r'python:\W+commands\W+succeeded.*', result.out, re.DOTALL)
# see that things work with a different CWD
monkeypatch.chdir(base.dirname)
result = cmd("-c", "example123/tox.ini")
assert not result.ret
assert "develop-inst-noop" in result.out
assert re.match(r'.*\W+1\W+passed.*'
r'summary.*'
r'python:\W+commands\W+succeeded.*', result.out, re.DOTALL)
monkeypatch.chdir(base)
# see that tests can also fail and retcode is correct
testfile = py.path.local("tests").join("test_hello.py")
assert testfile.check()
testfile.write("def test_fail(): assert 0")
result = cmd()
assert result.ret
assert "develop-inst-noop" in result.out
assert re.match(r'.*\W+1\W+failed.*'
r'summary.*'
r'python:\W+commands\W+failed.*', result.out, re.DOTALL)
# test develop is called if setup.py changes
setup_py = py.path.local("setup.py")
setup_py.write(setup_py.read() + ' ')
result = cmd()
assert result.ret
assert "develop-inst-nodeps" in result.out
def _alwayscopy_not_supported():
# This is due to virtualenv bugs with alwayscopy in some platforms
# see: https://github.com/pypa/virtualenv/issues/565
if hasattr(platform, 'linux_distribution'):
_dist = platform.linux_distribution(full_distribution_name=False)
(name, version, arch) = _dist
if any((name == 'centos' and version[0] == '7',
name == 'SuSE' and arch == 'x86_64')):
return True
return False
@pytest.mark.skipif(_alwayscopy_not_supported(), reason="Platform doesnt support alwayscopy")
def test_alwayscopy(initproj, cmd, mocksession):
initproj("example123", filedefs={'tox.ini': """
[testenv]
commands={envpython} --version
alwayscopy=True
"""})
venv = mocksession.getenv('python')
result = cmd("-vv")
assert not result.ret
if use_builtin_venv(venv):
assert "venv --copies" in result.out
else:
assert "virtualenv --always-copy" in result.out
def test_alwayscopy_default(initproj, cmd, mocksession):
initproj("example123", filedefs={'tox.ini': """
[testenv]
commands={envpython} --version
"""})
venv = mocksession.getenv('python')
result = cmd("-vv")
assert not result.ret
if use_builtin_venv(venv):
assert "venv --copies" not in result.out
else:
assert "virtualenv --always-copy" not in result.out
def test_empty_activity_ignored(initproj, cmd):
initproj("example123", filedefs={'tox.ini': """
[testenv]
list_dependencies_command=echo
commands={envpython} --version
"""})
result = cmd()
assert not result.ret
assert "installed:" not in result.out
def test_empty_activity_shown_verbose(initproj, cmd):
initproj("example123", filedefs={'tox.ini': """
[testenv]
list_dependencies_command=echo
commands={envpython} --version
"""})
result = cmd("-v")
assert not result.ret
assert "installed:" in result.out
def test_test_piphelp(initproj, cmd):
initproj("example123", filedefs={'tox.ini': """
# content of: tox.ini
[testenv]
commands=pip -h
[testenv:py27]
basepython=python
[testenv:py36]
basepython=python
"""})
result = cmd()
assert not result.ret
def test_notest(initproj, cmd):
initproj("example123", filedefs={'tox.ini': """
# content of: tox.ini
[testenv:py26]
basepython=python
"""})
result = cmd("-v", "--notest")
assert not result.ret
assert re.match(r'.*summary.*'
r'py26\W+skipped\W+tests.*', result.out, re.DOTALL)
result = cmd("-v", "--notest", "-epy26")
assert not result.ret
assert re.match(r'.*py26\W+reusing.*', result.out, re.DOTALL)
def test_PYC(initproj, cmd, monkeypatch):
initproj("example123", filedefs={'tox.ini': ''})
monkeypatch.setenv("PYTHONDOWNWRITEBYTECODE", 1)
result = cmd("-v", "--notest")
assert not result.ret
assert 'create' in result.out
def test_env_VIRTUALENV_PYTHON(initproj, cmd, monkeypatch):
initproj("example123", filedefs={'tox.ini': ''})
monkeypatch.setenv("VIRTUALENV_PYTHON", '/FOO')
result = cmd("-v", "--notest")
assert not result.ret, result.outlines
assert 'create' in result.out
def test_sdistonly(initproj, cmd):
initproj("example123", filedefs={'tox.ini': """
"""})
result = cmd("-v", "--sdistonly")
assert not result.ret
assert re.match(r'.*sdist-make.*setup.py.*', result.out, re.DOTALL)
assert "-mvirtualenv" not in result.out
def test_separate_sdist_no_sdistfile(cmd, initproj, tmpdir):
distshare = tmpdir.join("distshare")
initproj(("pkg123-foo", "0.7"), filedefs={
'tox.ini': """
[tox]
distshare={}
""".format(distshare)
})
result = cmd("--sdistonly")
assert not result.ret
distshare_files = distshare.listdir()
assert len(distshare_files) == 1
sdistfile = distshare_files[0]
assert 'pkg123-foo-0.7.zip' in str(sdistfile)
def test_separate_sdist(cmd, initproj, tmpdir):
distshare = tmpdir.join("distshare")
initproj("pkg123-0.7", filedefs={
'tox.ini': """
[tox]
distshare=%s
sdistsrc={distshare}/pkg123-0.7.zip
""" % distshare
})
result = cmd("--sdistonly")
assert not result.ret
sdistfiles = distshare.listdir()
assert len(sdistfiles) == 1
sdistfile = sdistfiles[0]
result = cmd("-v", "--notest")
assert not result.ret
assert "python inst: {}".format(sdistfile) in result.out
def test_sdist_latest(tmpdir, newconfig):
distshare = tmpdir.join("distshare")
config = newconfig([], """
[tox]
distshare=%s
sdistsrc={distshare}/pkg123-*
""" % distshare)
p = distshare.ensure("pkg123-1.4.5.zip")
distshare.ensure("pkg123-1.4.5a1.zip")
session = Session(config)
sdist_path = session.get_installpkg_path()
assert sdist_path == p
def test_installpkg(tmpdir, newconfig):
p = tmpdir.ensure("pkg123-1.0.zip")
config = newconfig(["--installpkg=%s" % p], "")
session = Session(config)
sdist_path = session.get_installpkg_path()
assert sdist_path == p
def test_envsitepackagesdir(cmd, initproj):
initproj("pkg512-0.0.5", filedefs={
'tox.ini': """
[testenv]
commands=
python -c "print(r'X:{envsitepackagesdir}')"
"""})
result = cmd()
assert result.ret == 0
assert re.match(r'.*\nX:.*tox.*site-packages.*', result.out, re.DOTALL)
def test_envsitepackagesdir_skip_missing_issue280(cmd, initproj):
initproj("pkg513-0.0.5", filedefs={
'tox.ini': """
[testenv]
basepython=/usr/bin/qwelkjqwle
commands=
{envsitepackagesdir}
"""})
result = cmd("--skip-missing-interpreters")
assert result.ret == 0
assert re.match(r'.*SKIPPED:.*qwelkj.*', result.out, re.DOTALL)
@pytest.mark.parametrize('verbosity', ['', '-v', '-vv'])
def test_verbosity(cmd, initproj, verbosity):
initproj("pkgX-0.0.5", filedefs={
'tox.ini': """
[testenv]
"""})
result = cmd(verbosity)
assert result.ret == 0
needle = "Successfully installed pkgX-0.0.5"
if verbosity == '-vv':
assert any(needle in line for line in result.outlines)
else:
assert all(needle not in line for line in result.outlines)
def verify_json_report_format(data, testenvs=True):
assert data["reportversion"] == "1"
assert data["toxversion"] == tox.__version__
if testenvs:
for envname, envdata in data["testenvs"].items():
for commandtype in ("setup", "test"):
if commandtype not in envdata:
continue
for command in envdata[commandtype]:
assert command["output"]
assert command["retcode"]
if envname != "GLOB":
assert isinstance(envdata["installed_packages"], list)
pyinfo = envdata["python"]
assert isinstance(pyinfo["version_info"], list)
assert pyinfo["version"]
assert pyinfo["executable"]
def test_envtmpdir(initproj, cmd):
initproj("foo", filedefs={
# This file first checks that envtmpdir is existent and empty. Then it
# creates an empty file in that directory. The tox command is run
# twice below, so this is to test whether the directory is cleared
# before the second run.
'check_empty_envtmpdir.py': '''if True:
import os
from sys import argv
envtmpdir = argv[1]
assert os.path.exists(envtmpdir)
assert os.listdir(envtmpdir) == []
open(os.path.join(envtmpdir, 'test'), 'w').close()
''',
'tox.ini': '''
[testenv]
commands=python check_empty_envtmpdir.py {envtmpdir}
'''
})
result = cmd()
assert not result.ret
result = cmd()
assert not result.ret
def test_missing_env_fails(initproj, cmd):
initproj("foo", filedefs={'tox.ini': "[testenv:foo]\ncommands={env:VAR}"})
result = cmd()
assert result.ret == 1
assert result.out.endswith("foo: unresolvable substitution(s): 'VAR'."
" Environment variables are missing or defined recursively.\n")
def test_tox_console_script():
result = subprocess.check_call(['tox', '--help'])
assert result == 0
def test_tox_quickstart_script():
result = subprocess.check_call(['tox-quickstart', '--help'])
assert result == 0
def test_tox_cmdline_no_args(monkeypatch):
monkeypatch.setattr(sys, 'argv', ['caller_script', '--help'])
with pytest.raises(SystemExit):
tox.cmdline()
def test_tox_cmdline_args(monkeypatch):
with pytest.raises(SystemExit):
tox.cmdline(['caller_script', '--help'])
@pytest.mark.parametrize('exit_code', [0, 6])
def test_exit_code(initproj, cmd, exit_code, mocker):
""" Check for correct InvocationError, with exit code,
except for zero exit code """
mocker.spy(tox, '_exit_code_str')
tox_ini_content = "[testenv:foo]\ncommands=python -c 'import sys; sys.exit(%d)'" % exit_code
initproj("foo", filedefs={'tox.ini': tox_ini_content})
cmd()
if exit_code:
# need mocker.spy above
assert tox._exit_code_str.call_count == 1
(args, kwargs) = tox._exit_code_str.call_args
assert kwargs == {}
(call_error_name, call_command, call_exit_code) = args
assert call_error_name == 'InvocationError'
# quotes are removed in result.out
# do not include "python" as it is changed to python.EXE by appveyor
expected_command_arg = ' -c import sys; sys.exit(%d)' % exit_code
assert expected_command_arg in call_command
assert call_exit_code == exit_code
else:
# need mocker.spy above
assert tox._exit_code_str.call_count == 0
| 31.315457 | 99 | 0.59501 |
40114623f9609a5500bcac0b531121ecb8632b76 | 5,413 | py | Python | src/lxrt/entry.py | ashutoshbaghel/tgifqa-lxmert | 7969f478d20fbfbba1c0eaaf0b96891654bfcc26 | [
"MIT"
] | null | null | null | src/lxrt/entry.py | ashutoshbaghel/tgifqa-lxmert | 7969f478d20fbfbba1c0eaaf0b96891654bfcc26 | [
"MIT"
] | null | null | null | src/lxrt/entry.py | ashutoshbaghel/tgifqa-lxmert | 7969f478d20fbfbba1c0eaaf0b96891654bfcc26 | [
"MIT"
] | null | null | null | # coding=utf-8
# Copyright 2019 project LXRT.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
import torch.nn as nn
from lxrt.tokenization import BertTokenizer
from lxrt.modeling import LXRTFeatureExtraction as VisualBertForLXRFeature, VISUAL_CONFIG
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
def convert_sents_to_features(sents, max_seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
features = []
for (i, sent) in enumerate(sents):
tokens_a = tokenizer.tokenize(sent.strip())
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# Keep segment id which allows loading BERT-weights.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids))
return features
def set_visual_config(args):
VISUAL_CONFIG.l_layers = args.llayers
VISUAL_CONFIG.x_layers = args.xlayers
VISUAL_CONFIG.r_layers = args.rlayers
class LXRTEncoder(nn.Module):
def __init__(self, args, max_seq_length, mode='x', attention=False):
super().__init__()
print(f"Making {__name__}")
self.max_seq_length = max_seq_length
set_visual_config(args)
# Using the bert tokenizer
self.tokenizer = BertTokenizer.from_pretrained(
"bert-base-uncased",
do_lower_case=True
)
print("Made Tokenizer")
# Build LXRT Model
self.model = VisualBertForLXRFeature.from_pretrained(
"bert-base-uncased",
mode=mode,
attention=attention
)
print("Made VisualBertForLXRFeature")
if args.from_scratch:
print("initializing all the weights")
self.model.apply(self.model.init_bert_weights)
print(f"Done {__name__}")
def multi_gpu(self):
self.model = nn.DataParallel(self.model)
@property
def dim(self):
return 768
def forward(self, sents, feats, visual_attention_mask=None):
train_features = convert_sents_to_features(
sents, self.max_seq_length, self.tokenizer)
input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long).cuda()
input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long).cuda()
segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long).cuda()
output = self.model(input_ids, segment_ids, input_mask,
visual_feats=feats,
visual_attention_mask=visual_attention_mask)
return output
def save(self, path):
torch.save(self.model.state_dict(),
os.path.join("%s_LXRT.pth" % path))
def load(self, path):
# Load state_dict from snapshot file
print("Load LXMERT pre-trained model from %s" % path)
state_dict = torch.load("%s_LXRT.pth" % path)
new_state_dict = {}
for key, value in state_dict.items():
if key.startswith("module."):
new_state_dict[key[len("module."):]] = value
state_dict = new_state_dict
# Print out the differences of pre-trained and model weights.
load_keys = set(state_dict.keys())
model_keys = set(self.model.state_dict().keys())
print()
print("Weights in loaded but not in model:")
for key in sorted(load_keys.difference(model_keys)):
print(key)
print()
print("Weights in model but not in loaded:")
for key in sorted(model_keys.difference(load_keys)):
print(key)
print()
# Load weights to model
self.model.load_state_dict(state_dict, strict=False)
| 34.259494 | 100 | 0.642897 |
e23f6d819023a97df87f3f55a10133be588600f0 | 391 | py | Python | cride/events/migrations/0004_event_img.py | albertoaldanar/betmatcherAPI | c0590025efd79f4e489f9c9433b17554ea6ba23f | [
"MIT"
] | null | null | null | cride/events/migrations/0004_event_img.py | albertoaldanar/betmatcherAPI | c0590025efd79f4e489f9c9433b17554ea6ba23f | [
"MIT"
] | 7 | 2020-06-05T20:53:27.000Z | 2022-03-11T23:47:12.000Z | cride/events/migrations/0004_event_img.py | albertoaldanar/betmatcherAPI | c0590025efd79f4e489f9c9433b17554ea6ba23f | [
"MIT"
] | null | null | null | # Generated by Django 2.0.9 on 2019-10-07 16:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0003_banner'),
]
operations = [
migrations.AddField(
model_name='event',
name='img',
field=models.TextField(blank=True, max_length=500, null=True),
),
]
| 20.578947 | 74 | 0.588235 |
9d235015a16fbb013badad63bcac79b4bb7a2282 | 1,776 | py | Python | code/multipliers.py | PetrKryslUCSD/knausj_talon_pk | 6612adb1794e0b02ce8b1c2b478b74cd6858954b | [
"MIT"
] | 1 | 2020-11-13T18:02:12.000Z | 2020-11-13T18:02:12.000Z | code/multipliers.py | PetrKryslUCSD/knausj_talon_pk | 6612adb1794e0b02ce8b1c2b478b74cd6858954b | [
"MIT"
] | null | null | null | code/multipliers.py | PetrKryslUCSD/knausj_talon_pk | 6612adb1794e0b02ce8b1c2b478b74cd6858954b | [
"MIT"
] | null | null | null | from talon import Context, Module, actions, app, ui
# The primitive multiplier words in English below a hundred.
multiplier_words = {
0: "times zero",
1: "times one",
2: "times two",
3: "times three",
4: "times four",
5: "times five",
6: "times six",
7: "times seven",
8: "times eight",
9: "times nine",
10: "times ten",
11: "times eleven",
12: "times twelve",
13: "times thirteen",
14: "times fourteen",
15: "times fifteen",
16: "times sixteen",
17: "times seventeen",
18: "times eighteen",
19: "times nineteen",
20: "times twenty",
30: "times thirty",
40: "times forty",
50: "times fifty",
60: "times sixty",
70: "times seventy",
80: "times eighty",
90: "times ninety",
}
tens_words = "zero ten twenty thirty forty fifty sixty seventy eighty ninety".split()
# dictionaries map multiplier words into their corresponding numbers.
multiplier_numbers = {}
multiplier_small = {}
for n in range(1, 100):
if n in multiplier_words:
word = multiplier_words[n]
if n <= 20:
multiplier_small[word] = n
multiplier_numbers[word] = n
mod = Module()
ctx = Context()
mod.list("multipliers", desc="list of multipliers")
mod.list("multipliers_small", desc="list of multipliers small (1-20)")
ctx.lists["self.multipliers"] = multiplier_numbers.keys()
ctx.lists["self.multipliers_small"] = multiplier_small.keys()
@mod.capture(rule="{self.multipliers}")
def multipliers(m) -> int:
"""Returns a single ordinal as a digit"""
return int(multiplier_numbers[m[0]])
@mod.capture(rule="{self.multipliers_small}")
def multipliers_small(m) -> int:
"""Returns a single ordinal as a digit"""
return int(multiplier_numbers[m[0]])
| 26.909091 | 85 | 0.642455 |
c95997f23a1b3c4a24bb29b0e05ec431c103a841 | 17,643 | py | Python | official/cv/nima/src/resnet.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 77 | 2021-10-15T08:32:37.000Z | 2022-03-30T13:09:11.000Z | official/cv/nima/src/resnet.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 3 | 2021-10-30T14:44:57.000Z | 2022-02-14T06:57:57.000Z | official/cv/nima/src/resnet.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 24 | 2021-10-15T08:32:45.000Z | 2022-03-24T18:45:20.000Z | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""ResNet."""
import math
import numpy as np
from scipy.stats import truncnorm
import mindspore.nn as nn
import mindspore.common.dtype as mstype
from mindspore.ops import operations as P
from mindspore.ops import functional as F
from mindspore.common.tensor import Tensor
def conv_variance_scaling_initializer(in_channel, out_channel, kernel_size):
fan_in = in_channel * kernel_size * kernel_size
scale = 1.0
scale /= max(1., fan_in)
stddev = (scale ** 0.5) / .87962566103423978
mu, sigma = 0, stddev
weight = truncnorm(-2, 2, loc=mu, scale=sigma).rvs(out_channel * in_channel * kernel_size * kernel_size)
weight = np.reshape(weight, (out_channel, in_channel, kernel_size, kernel_size))
return Tensor(weight, dtype=mstype.float32)
def _weight_variable(shape, factor=0.01):
init_value = np.random.randn(*shape).astype(np.float32) * factor
return Tensor(init_value)
def calculate_gain(nonlinearity, param=None):
"""calculate_gain"""
linear_fns = ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose1d', 'conv_transpose2d', 'conv_transpose3d']
res = 0
if nonlinearity in linear_fns or nonlinearity == 'sigmoid':
res = 1
elif nonlinearity == 'tanh':
res = 5.0 / 3
elif nonlinearity == 'relu':
res = math.sqrt(2.0)
elif nonlinearity == 'leaky_relu':
if param is None:
neg_slope = 0.01
elif not isinstance(param, bool) and isinstance(param, int) or isinstance(param, float):
neg_slope = param
else:
raise ValueError("neg_slope {} not a valid number".format(param))
res = math.sqrt(2.0 / (1 + neg_slope ** 2))
else:
raise ValueError("Unsupported nonlinearity {}".format(nonlinearity))
return res
def _calculate_fan_in_and_fan_out(tensor):
"""_calculate_fan_in_and_fan_out"""
dimensions = len(tensor)
if dimensions < 2:
raise ValueError("Fan in and fan out can not be computed for tensor with fewer than 2 dimensions")
if dimensions == 2: # Linear
fan_in = tensor[1]
fan_out = tensor[0]
else:
num_input_fmaps = tensor[1]
num_output_fmaps = tensor[0]
receptive_field_size = 1
if dimensions > 2:
receptive_field_size = tensor[2] * tensor[3]
fan_in = num_input_fmaps * receptive_field_size
fan_out = num_output_fmaps * receptive_field_size
return fan_in, fan_out
def _calculate_correct_fan(tensor, mode):
mode = mode.lower()
valid_modes = ['fan_in', 'fan_out']
if mode not in valid_modes:
raise ValueError("Unsupported mode {}, please use one of {}".format(mode, valid_modes))
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
return fan_in if mode == 'fan_in' else fan_out
def kaiming_normal(inputs_shape, a=0, mode='fan_in', nonlinearity='leaky_relu'):
fan = _calculate_correct_fan(inputs_shape, mode)
gain = calculate_gain(nonlinearity, a)
std = gain / math.sqrt(fan)
return np.random.normal(0, std, size=inputs_shape).astype(np.float32)
def kaiming_uniform(inputs_shape, a=0., mode='fan_in', nonlinearity='leaky_relu'):
fan = _calculate_correct_fan(inputs_shape, mode)
gain = calculate_gain(nonlinearity, a)
std = gain / math.sqrt(fan)
bound = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation
return np.random.uniform(-bound, bound, size=inputs_shape).astype(np.float32)
def _conv3x3(in_channel, out_channel, stride=1, use_se=False, res_base=False):
if use_se:
weight = conv_variance_scaling_initializer(in_channel, out_channel, kernel_size=3)
else:
weight_shape = (out_channel, in_channel, 3, 3)
weight = Tensor(kaiming_normal(weight_shape, mode="fan_out", nonlinearity='relu'))
if res_base:
return nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=stride,
padding=1, pad_mode='pad', weight_init=weight)
return nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=stride,
padding=0, pad_mode='same', weight_init=weight)
def _conv1x1(in_channel, out_channel, stride=1, use_se=False, res_base=False):
if use_se:
weight = conv_variance_scaling_initializer(in_channel, out_channel, kernel_size=1)
else:
weight_shape = (out_channel, in_channel, 1, 1)
weight = Tensor(kaiming_normal(weight_shape, mode="fan_out", nonlinearity='relu'))
if res_base:
return nn.Conv2d(in_channel, out_channel, kernel_size=1, stride=stride,
padding=0, pad_mode='pad', weight_init=weight)
return nn.Conv2d(in_channel, out_channel, kernel_size=1, stride=stride,
padding=0, pad_mode='same', weight_init=weight)
def _conv7x7(in_channel, out_channel, stride=1, use_se=False, res_base=False):
if use_se:
weight = conv_variance_scaling_initializer(in_channel, out_channel, kernel_size=7)
else:
weight_shape = (out_channel, in_channel, 7, 7)
weight = Tensor(kaiming_normal(weight_shape, mode="fan_out", nonlinearity='relu'))
if res_base:
return nn.Conv2d(in_channel, out_channel,
kernel_size=7, stride=stride, padding=3, pad_mode='pad', weight_init=weight)
return nn.Conv2d(in_channel, out_channel,
kernel_size=7, stride=stride, padding=0, pad_mode='same', weight_init=weight)
def _bn(channel, res_base=False):
if res_base:
return nn.BatchNorm2d(channel, eps=1e-5, momentum=0.1,
gamma_init=1, beta_init=0, moving_mean_init=0, moving_var_init=1)
return nn.BatchNorm2d(channel, eps=1e-4, momentum=0.9,
gamma_init=1, beta_init=0, moving_mean_init=0, moving_var_init=1)
def _bn_last(channel):
return nn.BatchNorm2d(channel, eps=1e-4, momentum=0.9,
gamma_init=0, beta_init=0, moving_mean_init=0, moving_var_init=1)
def _fc(in_channel, out_channel, use_se=False):
if use_se:
weight = np.random.normal(loc=0, scale=0.01, size=out_channel * in_channel)
weight = Tensor(np.reshape(weight, (out_channel, in_channel)), dtype=mstype.float32)
else:
weight_shape = (out_channel, in_channel)
weight = Tensor(kaiming_uniform(weight_shape, a=math.sqrt(5)))
return nn.Dense(in_channel, out_channel, has_bias=True, weight_init=weight, bias_init=0)
class ResidualBlock(nn.Cell):
"""
ResNet V1 residual block definition.
Args:
in_channel (int): Input channel.
out_channel (int): Output channel.
stride (int): Stride size for the first convolutional layer. Default: 1.
use_se (bool): Enable SE-ResNet50 net. Default: False.
se_block(bool): Use se block in SE-ResNet50 net. Default: False.
Returns:
Tensor, output tensor.
Examples:
>>> ResidualBlock(3, 256, stride=2)
"""
expansion = 4
def __init__(self,
in_channel,
out_channel,
stride=1,
use_se=False, se_block=False):
super(ResidualBlock, self).__init__()
self.stride = stride
self.use_se = use_se
self.se_block = se_block
channel = out_channel // self.expansion
self.conv1 = _conv1x1(in_channel, channel, stride=1, use_se=self.use_se)
self.bn1 = _bn(channel)
if self.use_se and self.stride != 1:
self.e2 = nn.SequentialCell([_conv3x3(channel, channel, stride=1, use_se=True), _bn(channel),
nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2, pad_mode='same')])
else:
self.conv2 = _conv3x3(channel, channel, stride=stride, use_se=self.use_se)
self.bn2 = _bn(channel)
self.conv3 = _conv1x1(channel, out_channel, stride=1, use_se=self.use_se)
self.bn3 = _bn(out_channel)
if self.se_block:
self.se_global_pool = P.ReduceMean(keep_dims=False)
self.se_dense_0 = _fc(out_channel, int(out_channel / 4), use_se=self.use_se)
self.se_dense_1 = _fc(int(out_channel / 4), out_channel, use_se=self.use_se)
self.se_sigmoid = nn.Sigmoid()
self.se_mul = P.Mul()
self.relu = nn.ReLU()
self.down_sample = False
if stride != 1 or in_channel != out_channel:
self.down_sample = True
self.down_sample_layer = None
if self.down_sample:
if self.use_se:
if stride == 1:
self.down_sample_layer = nn.SequentialCell([_conv1x1(in_channel, out_channel,
stride, use_se=self.use_se), _bn(out_channel)])
else:
self.down_sample_layer = nn.SequentialCell([nn.MaxPool2d(kernel_size=2, stride=2, pad_mode='same'),
_conv1x1(in_channel, out_channel, 1,
use_se=self.use_se), _bn(out_channel)])
else:
self.down_sample_layer = nn.SequentialCell([_conv1x1(in_channel, out_channel, stride,
use_se=self.use_se), _bn(out_channel)])
def construct(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
if self.use_se and self.stride != 1:
out = self.e2(out)
else:
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.se_block:
out_se = out
out = self.se_global_pool(out, (2, 3))
out = self.se_dense_0(out)
out = self.relu(out)
out = self.se_dense_1(out)
out = self.se_sigmoid(out)
out = F.reshape(out, F.shape(out) + (1, 1))
out = self.se_mul(out, out_se)
if self.down_sample:
identity = self.down_sample_layer(identity)
out = out + identity
out = self.relu(out)
return out
class ResNet(nn.Cell):
"""
ResNet architecture.
Args:
block (Cell): Block for network.
layer_nums (list): Numbers of block in different layers.
in_channels (list): Input channel in each layer.
out_channels (list): Output channel in each layer.
strides (list): Stride size in each layer.
num_classes (int): The number of classes that the training images are belonging to.
use_se (bool): Enable SE-ResNet50 net. Default: False.
se_block(bool): Use se block in SE-ResNet50 net in layer 3 and layer 4. Default: False.
res_base (bool): Enable parameter setting of resnet18. Default: False.
Returns:
Tensor, output tensor.
Examples:
>>> ResNet(ResidualBlock,
>>> [3, 4, 6, 3],
>>> [64, 256, 512, 1024],
>>> [256, 512, 1024, 2048],
>>> [1, 2, 2, 2],
>>> 10)
"""
def __init__(self,
block,
layer_nums,
in_channels,
out_channels,
strides,
num_classes,
use_se=False,
res_base=False):
super(ResNet, self).__init__()
if not len(layer_nums) == len(in_channels) == len(out_channels) == 4:
raise ValueError("the length of layer_num, in_channels, out_channels list must be 4!")
self.use_se = use_se
self.res_base = res_base
self.se_block = False
if self.use_se:
self.se_block = True
if self.use_se:
self.conv1_0 = _conv3x3(3, 32, stride=2, use_se=self.use_se)
self.bn1_0 = _bn(32)
self.conv1_1 = _conv3x3(32, 32, stride=1, use_se=self.use_se)
self.bn1_1 = _bn(32)
self.conv1_2 = _conv3x3(32, 64, stride=1, use_se=self.use_se)
else:
self.conv1 = _conv7x7(3, 64, stride=2, res_base=self.res_base)
self.bn1 = _bn(64, self.res_base)
self.relu = P.ReLU()
if self.res_base:
self.pad = nn.Pad(paddings=((0, 0), (0, 0), (1, 1), (1, 1)))
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="valid")
else:
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same")
self.layer1 = self._make_layer(block,
layer_nums[0],
in_channel=in_channels[0],
out_channel=out_channels[0],
stride=strides[0],
use_se=self.use_se)
self.layer2 = self._make_layer(block,
layer_nums[1],
in_channel=in_channels[1],
out_channel=out_channels[1],
stride=strides[1],
use_se=self.use_se)
self.layer3 = self._make_layer(block,
layer_nums[2],
in_channel=in_channels[2],
out_channel=out_channels[2],
stride=strides[2],
use_se=self.use_se,
se_block=self.se_block)
self.layer4 = self._make_layer(block,
layer_nums[3],
in_channel=in_channels[3],
out_channel=out_channels[3],
stride=strides[3],
use_se=self.use_se,
se_block=self.se_block)
self.mean = P.ReduceMean(keep_dims=True)
self.flatten = nn.Flatten()
self.end_point = _fc(out_channels[3], 1000, use_se=self.use_se)
self.out = nn.SequentialCell([nn.Dense(1000, num_classes),# nn.Dropout(0.75),
nn.Softmax()
])
def _make_layer(self, block, layer_num, in_channel, out_channel, stride, use_se=False, se_block=False):
"""
Make stage network of ResNet.
Args:
block (Cell): Resnet block.
layer_num (int): Layer number.
in_channel (int): Input channel.
out_channel (int): Output channel.
stride (int): Stride size for the first convolutional layer.
se_block(bool): Use se block in SE-ResNet50 net. Default: False.
Returns:
SequentialCell, the output layer.
Examples:
>>> _make_layer(ResidualBlock, 3, 128, 256, 2)
"""
layers = []
resnet_block = block(in_channel, out_channel, stride=stride, use_se=use_se)
layers.append(resnet_block)
if se_block:
for _ in range(1, layer_num - 1):
resnet_block = block(out_channel, out_channel, stride=1, use_se=use_se)
layers.append(resnet_block)
resnet_block = block(out_channel, out_channel, stride=1, use_se=use_se, se_block=se_block)
layers.append(resnet_block)
else:
for _ in range(1, layer_num):
resnet_block = block(out_channel, out_channel, stride=1, use_se=use_se)
layers.append(resnet_block)
return nn.SequentialCell(layers)
def construct(self, x):
if self.use_se:
x = self.conv1_0(x)
x = self.bn1_0(x)
x = self.relu(x)
x = self.conv1_1(x)
x = self.bn1_1(x)
x = self.relu(x)
x = self.conv1_2(x)
else:
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
if self.res_base:
x = self.pad(x)
c1 = self.maxpool(x)
c2 = self.layer1(c1)
c3 = self.layer2(c2)
c4 = self.layer3(c3)
c5 = self.layer4(c4)
out = self.mean(c5, (2, 3))
out = self.flatten(out)
out = self.end_point(out)
out = self.out(out)
return out
def resnet50(class_num=10):
"""
Get ResNet50 neural network.
Args:
class_num (int): Class number.
Returns:
Cell, cell instance of ResNet50 neural network.
Examples:
>>> net = resnet50(10)
"""
return ResNet(ResidualBlock,
[3, 4, 6, 3],
[64, 256, 512, 1024],
[256, 512, 1024, 2048],
[1, 2, 2, 2],
class_num)
| 39.558296 | 120 | 0.575469 |
a9456c16529093901bae688bb953d11a2dae3938 | 1,548 | py | Python | aiogram_keyboards/dialog/builtin/builtin_converts.py | TheArcherST/aiogram_keyboards | 2a6a6e76e825b53020de985b237f56fcc1f9a8b0 | [
"MIT"
] | 1 | 2021-10-30T23:38:38.000Z | 2021-10-30T23:38:38.000Z | aiogram_keyboards/dialog/builtin/builtin_converts.py | TheArcherST/aiogram_keyboards | 2a6a6e76e825b53020de985b237f56fcc1f9a8b0 | [
"MIT"
] | 7 | 2021-10-30T23:34:22.000Z | 2022-01-21T21:31:20.000Z | aiogram_keyboards/dialog/builtin/builtin_converts.py | TheArcherST/aiogram_keyboards | 2a6a6e76e825b53020de985b237f56fcc1f9a8b0 | [
"MIT"
] | 2 | 2021-11-05T08:08:09.000Z | 2021-11-10T12:17:45.000Z | """Built-in converters
- Text
- PhotoID
- DocumentID
- Integer
- Float
"""
from aiogram.types import CallbackQuery, Message
from ..cast import CastMessage, CastTelegramObj, T, TO
class PhotoID(CastMessage[str]):
def _cast(self, obj: Message) -> T:
photo = obj.photo[0]
return photo.file_id
class DocumentID(CastMessage[str]):
def _cast(self, obj: Message) -> T:
try:
return obj.document.file_id
except AttributeError as e:
raise ValueError(f'Error while cast: {e}')
class Integer(CastTelegramObj[int, TO]):
def _cast(self, obj: TO) -> T:
if isinstance(obj, CallbackQuery):
return int(obj.data)
if isinstance(obj, Message):
return int(obj.text)
class Float(CastTelegramObj[float, TO]):
def _cast(self, obj: TO) -> T:
if isinstance(obj, CallbackQuery):
return float(obj.data)
if isinstance(obj, Message):
return float(obj.text)
class Text(CastTelegramObj[str, TO]):
def _cast(self, obj: TO) -> T:
if isinstance(obj, CallbackQuery):
if obj.data is None:
raise ValueError(
'Cant convert to text `CallbackQuery` object without `data`.'
)
return str(obj.data)
if isinstance(obj, Message):
if obj.text is None:
raise ValueError(
'Cant convert to text `Message` object without `text`.'
)
return str(obj.text)
| 22.434783 | 81 | 0.577519 |
f6e82b8eb584d06c809e2d842ca950c38feb1ad2 | 1,974 | py | Python | src/preprocessing/transform.py | css459/restaurant-closure-prediction | e93dcc6083a765a2d85f18f944ebaa2e96c7b831 | [
"Apache-2.0"
] | null | null | null | src/preprocessing/transform.py | css459/restaurant-closure-prediction | e93dcc6083a765a2d85f18f944ebaa2e96c7b831 | [
"Apache-2.0"
] | null | null | null | src/preprocessing/transform.py | css459/restaurant-closure-prediction | e93dcc6083a765a2d85f18f944ebaa2e96c7b831 | [
"Apache-2.0"
] | null | null | null | #
# Cole Smith
# Restaurant Closure Engine
# BDS - Undergraduate
# transform.py
#
# Utility functions for train / test splitting, re-balancing,
# and other numerical preparations for data matrices
#
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler, StandardScaler
#
# Split
#
def split_train_test(df, y, split_size=0.2):
"""
Performs preprocessing on the master dataset
and splits it into training and test sets, treating
it as time series data.
:param df: The master dataset
:param y: Column to predict on
:param split_size: Percent of data to use for testing
:return: X/Y, Train/Test Sets
"""
if split_size <= 0:
return df.drop(y, 1), df[y], None, None
y_set = df[y]
x_set = df.drop(y, 1)
return train_test_split(x_set, y_set, train_size=1 - split_size, random_state=42)
#
# Resampling
#
# def selective_master_downsample(master):
# """
# Selectively downsample sparse data
# in master dataset
#
# :param master: The master dataset
# :return: `DataFrame`
# """
#
# return master.dropna()
#
# Value Scaling
#
def normalize_values(df, y):
"""
Normalize columns of values in DataFrame
:param df: DataFrame
:param y: Name of Y col as string
:return: DataFrame
"""
cols = df.drop(y, 1).columns
return pd.DataFrame(StandardScaler().fit_transform(df.drop(y, 1)), columns=cols), df[y]
def min_max_scale_values(df, y):
"""
Min/Max scale columns of values in DataFrame
:param df: DataFrame
:param y: Name of Y col as string
:return: DataFrame
"""
if y:
cols = df.drop(y, 1).columns
return pd.DataFrame(MinMaxScaler().fit_transform(df.drop(y, 1)), columns=cols), df[y]
else:
cols = df.columns
return pd.DataFrame(MinMaxScaler().fit_transform(df), columns=cols)
| 23.223529 | 93 | 0.649443 |
3f7f1c74288a64ae5014c4234100e6151f56b5ea | 165 | py | Python | String.py | Hassan0072/CP_HS99 | 365ecd119e19fcf2c69ccaef42b278a1a9256741 | [
"MIT"
] | null | null | null | String.py | Hassan0072/CP_HS99 | 365ecd119e19fcf2c69ccaef42b278a1a9256741 | [
"MIT"
] | 3 | 2019-05-05T16:30:58.000Z | 2019-05-11T00:23:02.000Z | String.py | Hassan0072/CP_HS99 | 365ecd119e19fcf2c69ccaef42b278a1a9256741 | [
"MIT"
] | 1 | 2019-04-21T18:46:01.000Z | 2019-04-21T18:46:01.000Z | a=input("enter string :")
revstring=""
for i in a:
revstring=i+revstring
if revstring==a:
print("string is palindrome")
else:
print("string is not palindrome") | 20.625 | 35 | 0.709091 |
7f4b89efceaec0491a47e2e63074fbacd50dd723 | 2,374 | py | Python | src/litegraph/graph.py | RemyLau/litegraph | eff1b8204ad1f9e1b7b6c1b9dced09c072333264 | [
"MIT"
] | null | null | null | src/litegraph/graph.py | RemyLau/litegraph | eff1b8204ad1f9e1b7b6c1b9dced09c072333264 | [
"MIT"
] | null | null | null | src/litegraph/graph.py | RemyLau/litegraph | eff1b8204ad1f9e1b7b6c1b9dced09c072333264 | [
"MIT"
] | null | null | null | class Graph:
"""A light weight (undirected and weighted) graph object."""
def __init__(self):
self.data = []
self.ids = []
self.idmap = {}
self._number_of_nodes = 0
self._number_of_edges = 0
@property
def nodes(self):
return self.ids.copy()
@property
def number_of_nodes(self):
return self._number_of_nodes
@property
def number_of_edges(self):
return self._number_of_edges
def _add_node(self, node):
self.idmap[node] = self.number_of_nodes
self.ids.append(node)
self.data.append({})
self._number_of_nodes += 1
def add_node(self, node):
if node in self.idmap:
import warnings
warnings.warn(
"{node!r} (index = {self.idmap[node]}) already exists",
RuntimeWarning
)
else:
self._add_node(node)
def get_node_idx(self, node):
if node not in self.idmap:
self._add_node(node)
return self.idmap[node]
def add_edge(self, node1, node2, weight):
idx1 = self.get_node_idx(node1)
idx2 = self.get_node_idx(node2)
self.data[idx1][idx2] = self.data[idx2][idx1] = weight
self._number_of_edges += 1
def get_connected_components(self):
"""Find connected components via BFS search.
Return:
Sorted list of connected components by size in ascending order
"""
unvisited = set(range(self.number_of_nodes))
components = []
while unvisited:
seed_node = next(iter(unvisited))
next_level_nodes = [seed_node]
component_membership = []
while next_level_nodes:
curr_level_nodes = next_level_nodes[:]
next_level_nodes = []
for node in curr_level_nodes:
if node in unvisited:
for nbr in self.data[node]:
if nbr in unvisited:
next_level_nodes.append(nbr)
component_membership.append(node)
unvisited.remove(node)
components.append(component_membership)
return sorted(components, key=len, reverse=True)
def subgraph(self):
raise NotImplementedError
| 28.261905 | 74 | 0.560657 |
12e135376f5b8bc297cfa66e44b749f8f2f639b9 | 741 | py | Python | ipython_save.py | franceme/Scripts | 9d0ffb6a5cc504b011d58f73ae7b5cc5c9602f7d | [
"Apache-2.0"
] | null | null | null | ipython_save.py | franceme/Scripts | 9d0ffb6a5cc504b011d58f73ae7b5cc5c9602f7d | [
"Apache-2.0"
] | null | null | null | ipython_save.py | franceme/Scripts | 9d0ffb6a5cc504b011d58f73ae7b5cc5c9602f7d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
import os,sys,shelve
def shelf_out(shelf_name):
with shelve.open(shelf_name, 'n') as shelf:
for key in dir():
try:
shelf[key] = globals()[key]
except TypeError:
print(f"Could not shelf {key} due to type error")
except Exception as e:
print(f"Could not shelf {key} due to exception {e}")
print("Completed")
if __name__ == '__main__':
self_name = str(__file__).replace('./','').lower()
args = [x for x in set(map(lambda x: x.strip(), sys.argv)) if self_name not in x.lower()]
if args == []:
shelf_name = "output.shelf"
else:
shelf_name = args[0]
shelf_out(shelf_name)
#shelf_out('') | 27.444444 | 93 | 0.565452 |
27cfd7338d5e0c5f8dda2379f0863bc37faaadc7 | 1,844 | py | Python | archive/Changesets/mbf-76446/Source/Tools/Python/BioIronPython/Util.py | jdm7dv/Microsoft-Biology-Foundation | 4873300d957cb46694392c480301b319b55f58b8 | [
"Apache-2.0"
] | null | null | null | archive/Changesets/mbf-76446/Source/Tools/Python/BioIronPython/Util.py | jdm7dv/Microsoft-Biology-Foundation | 4873300d957cb46694392c480301b319b55f58b8 | [
"Apache-2.0"
] | null | null | null | archive/Changesets/mbf-76446/Source/Tools/Python/BioIronPython/Util.py | jdm7dv/Microsoft-Biology-Foundation | 4873300d957cb46694392c480301b319b55f58b8 | [
"Apache-2.0"
] | null | null | null | # Copyright Microsoft Corporation. All rights reserved.
import clr
import os
import sys
import random
_cwd = os.getcwd()
_solution_path = _cwd[ : _cwd.rindex("\\") + 1]
def add_biodotnet_reference(dll_name):
"Adds a Bio dll reference to the clr so that its contents can be imported."
# An exception will be thrown if we're debugging in VS from the Python development dir, instead
# of the standard non-dev method of running from the bin\Debug dir or an installation dir. If
# we are debugging in VS, we just need to add bin\Debug to the path.
try:
clr.AddReferenceToFile(dll_name + ".dll")
except:
sys.path += [_solution_path + r"..\..\Build\Binaries\Debug"]
print _solution_path
clr.AddReferenceToFile(dll_name + ".dll")
add_biodotnet_reference("Bio")
from Bio import *
from Bio.Util import *
def split_sequence(seq, coverage, fragment_length):
"Splits a sequence into overlapping fragments of the given length with the given coverage."
num_fragments = seq.Count * coverage / fragment_length
fragment_list = []
tmpFragment = []
for i in range(0, num_fragments):
start = random.randrange(seq.Count - fragment_length + 1)
for item in Helper.GetSequenceRange(seq, start, fragment_length):
tmpFragment.Add(item)
fragment = Sequence(seq.Alphabet, tmpFragment)
fragment.ID = seq.ID + " (Split " + `i` + ")"
fragment_list.append(fragment)
return fragment_list
def GetInputFileName(message):
"Gets the file name from the command prompt"
# Display the message.
fileName = raw_input(message)
# If the input is blank then prompt the user to enter a valid name
while fileName.lstrip() == "":
fileName = raw_input("Please enter a validFileName:")
return fileName | 36.156863 | 99 | 0.685466 |
dcf2215860fd3d56b90efe6cad4dc31f8bf87c62 | 1,051 | py | Python | Task1D.py | RichardJZhang2000/CUED-IA-Lent-Computing | ac048257230f57d90c0b12d13990d7ec985c48eb | [
"MIT"
] | null | null | null | Task1D.py | RichardJZhang2000/CUED-IA-Lent-Computing | ac048257230f57d90c0b12d13990d7ec985c48eb | [
"MIT"
] | null | null | null | Task1D.py | RichardJZhang2000/CUED-IA-Lent-Computing | ac048257230f57d90c0b12d13990d7ec985c48eb | [
"MIT"
] | null | null | null | from floodsystem.geo import rivers_with_station
from floodsystem.geo import stations_by_river
from floodsystem.stationdata import build_station_list
def run():
"""Requirements for Task 1D"""
#build list of stations
stations_list = build_station_list()
rivers = rivers_with_station(stations_list)
#get the number of rivers in total
number_of_rivers = len(rivers)
#print the number of rivers and the first 10 rivers in alphabetical order
print("{} stations. First 10 - {}".format(number_of_rivers, sorted(rivers)[:10]))
#get the rivers and the stations on them
stations_on_river = stations_by_river(stations_list)
#print the name of the stations on the selected rivers
for river in ["River Aire", "River Cam", "River Thames"]:
station_names = [station.name for station in stations_on_river[river]]
print("{} has the following stations: {}".format(river, sorted(station_names)))
if __name__ == "__main__":
print("*** Task 1D: CUED Part IA Flood Warning System ***")
run() | 37.535714 | 87 | 0.717412 |
d15217759ad2fb207091d404257d09b97756c63d | 738 | py | Python | ndb_utils/exceptions.py | Othernet-Project/ndb-utils | 7804a5e305a4ed280742e22dad1dd10756cbe695 | [
"MIT"
] | 1 | 2018-12-30T18:49:02.000Z | 2018-12-30T18:49:02.000Z | ndb_utils/exceptions.py | Othernet-Project/ndb-utils | 7804a5e305a4ed280742e22dad1dd10756cbe695 | [
"MIT"
] | null | null | null | ndb_utils/exceptions.py | Othernet-Project/ndb-utils | 7804a5e305a4ed280742e22dad1dd10756cbe695 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals, print_function
__all__ = ['ModelError', 'BadKeyValueError', 'DuplicateEntityError',
'ValidationError']
class ModelError(Exception):
""" Generic exception for utils module """
pass
class BadKeyValueError(ModelError):
""" Raised when value used for a key is missing """
pass
class DuplicateEntityError(ModelError):
""" Helper exception that can be thrown by models implementing KeyMixin """
pass
class ValidationError(Exception):
""" Raised when validation fails in the ValidatingMixin """
def __init__(self, message, errors, *args, **kwargs):
self.errors = errors
super(ValidationError, self).__init__(message, *args, **kwargs)
| 25.448276 | 79 | 0.704607 |
d9b5690d0923006a89a99562d850273dad56052f | 747 | py | Python | tests/migrations/0008_customerregistration.py | Formulka/django-GDPR | 84d427d0856f31f4bff7305298b3292c71e62fec | [
"MIT"
] | 55 | 2018-05-24T18:33:43.000Z | 2022-03-08T13:42:49.000Z | tests/migrations/0008_customerregistration.py | Formulka/django-GDPR | 84d427d0856f31f4bff7305298b3292c71e62fec | [
"MIT"
] | 14 | 2018-04-17T19:42:23.000Z | 2022-02-10T10:21:08.000Z | tests/migrations/0008_customerregistration.py | Formulka/django-GDPR | 84d427d0856f31f4bff7305298b3292c71e62fec | [
"MIT"
] | 12 | 2018-04-16T11:41:39.000Z | 2021-11-09T12:35:38.000Z | # Generated by Django 3.1 on 2021-04-16 11:47
from django.db import migrations, models
import gdpr.mixins
class Migration(migrations.Migration):
dependencies = [
('tests', '0007_childe_extraparentd_parentb_parentc_topparenta'),
]
operations = [
migrations.CreateModel(
name='CustomerRegistration',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email_address', models.EmailField(blank=True, max_length=254, null=True)),
],
options={
'abstract': False,
},
bases=(gdpr.mixins.AnonymizationModelMixin, models.Model),
),
]
| 28.730769 | 114 | 0.601071 |
a254ca15cb6271de6f97e6d971d48275a8c8b2d9 | 329 | py | Python | rest_search/middleware.py | jlaine/django-rest-search | 5a5fad6204cc0b8e64b3c42c6cb172ecc80379c0 | [
"BSD-2-Clause"
] | 7 | 2016-12-06T10:09:18.000Z | 2021-09-02T01:43:18.000Z | rest_search/middleware.py | jlaine/django-rest-search | 5a5fad6204cc0b8e64b3c42c6cb172ecc80379c0 | [
"BSD-2-Clause"
] | 5 | 2020-10-05T08:26:18.000Z | 2021-12-14T14:24:33.000Z | rest_search/middleware.py | jlaine/django-rest-search | 5a5fad6204cc0b8e64b3c42c6cb172ecc80379c0 | [
"BSD-2-Clause"
] | 3 | 2017-05-02T13:51:26.000Z | 2021-09-01T15:03:56.000Z | # -*- coding: utf-8 -*-
from django.utils.deprecation import MiddlewareMixin
from rest_search import queue_flush
class FlushUpdatesMiddleware(MiddlewareMixin):
"""
Middleware that flushes ElasticSearch updates.
"""
def process_response(self, request, response):
queue_flush()
return response
| 20.5625 | 52 | 0.714286 |
214bafdc8a1e90516b5922ce3f53d01147f3df8f | 51,417 | py | Python | sdk/logic/azure-mgmt-logic/azure/mgmt/logic/aio/operations/_workflows_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/logic/azure-mgmt-logic/azure/mgmt/logic/aio/operations/_workflows_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/logic/azure-mgmt-logic/azure/mgmt/logic/aio/operations/_workflows_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class WorkflowsOperations:
"""WorkflowsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.logic.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_subscription(
self,
top: Optional[int] = None,
filter: Optional[str] = None,
**kwargs
) -> AsyncIterable["_models.WorkflowListResult"]:
"""Gets a list of workflows by subscription.
:param top: The number of items to be included in the result.
:type top: int
:param filter: The filter to apply on the operation. Options for filters include: State,
Trigger, and ReferencedResourceId.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either WorkflowListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.logic.models.WorkflowListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WorkflowListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_subscription.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('WorkflowListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Logic/workflows'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
top: Optional[int] = None,
filter: Optional[str] = None,
**kwargs
) -> AsyncIterable["_models.WorkflowListResult"]:
"""Gets a list of workflows by resource group.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param top: The number of items to be included in the result.
:type top: int
:param filter: The filter to apply on the operation. Options for filters include: State,
Trigger, and ReferencedResourceId.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either WorkflowListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.logic.models.WorkflowListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WorkflowListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('WorkflowListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows'} # type: ignore
async def get(
self,
resource_group_name: str,
workflow_name: str,
**kwargs
) -> "_models.Workflow":
"""Gets a workflow.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param workflow_name: The workflow name.
:type workflow_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Workflow, or the result of cls(response)
:rtype: ~azure.mgmt.logic.models.Workflow
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Workflow"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Workflow', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}'} # type: ignore
async def create_or_update(
self,
resource_group_name: str,
workflow_name: str,
workflow: "_models.Workflow",
**kwargs
) -> "_models.Workflow":
"""Creates or updates a workflow.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param workflow_name: The workflow name.
:type workflow_name: str
:param workflow: The workflow.
:type workflow: ~azure.mgmt.logic.models.Workflow
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Workflow, or the result of cls(response)
:rtype: ~azure.mgmt.logic.models.Workflow
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Workflow"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(workflow, 'Workflow')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Workflow', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Workflow', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}'} # type: ignore
async def update(
self,
resource_group_name: str,
workflow_name: str,
**kwargs
) -> "_models.Workflow":
"""Updates a workflow.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param workflow_name: The workflow name.
:type workflow_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Workflow, or the result of cls(response)
:rtype: ~azure.mgmt.logic.models.Workflow
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Workflow"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-05-01"
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.patch(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Workflow', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}'} # type: ignore
async def delete(
self,
resource_group_name: str,
workflow_name: str,
**kwargs
) -> None:
"""Deletes a workflow.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param workflow_name: The workflow name.
:type workflow_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-05-01"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}'} # type: ignore
async def disable(
self,
resource_group_name: str,
workflow_name: str,
**kwargs
) -> None:
"""Disables a workflow.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param workflow_name: The workflow name.
:type workflow_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-05-01"
accept = "application/json"
# Construct URL
url = self.disable.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
disable.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/disable'} # type: ignore
async def enable(
self,
resource_group_name: str,
workflow_name: str,
**kwargs
) -> None:
"""Enables a workflow.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param workflow_name: The workflow name.
:type workflow_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-05-01"
accept = "application/json"
# Construct URL
url = self.enable.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
enable.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/enable'} # type: ignore
async def generate_upgraded_definition(
self,
resource_group_name: str,
workflow_name: str,
parameters: "_models.GenerateUpgradedDefinitionParameters",
**kwargs
) -> object:
"""Generates the upgraded definition for a workflow.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param workflow_name: The workflow name.
:type workflow_name: str
:param parameters: Parameters for generating an upgraded definition.
:type parameters: ~azure.mgmt.logic.models.GenerateUpgradedDefinitionParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: object, or the result of cls(response)
:rtype: object
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[object]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.generate_upgraded_definition.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'GenerateUpgradedDefinitionParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('object', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
generate_upgraded_definition.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/generateUpgradedDefinition'} # type: ignore
async def list_callback_url(
self,
resource_group_name: str,
workflow_name: str,
list_callback_url: "_models.GetCallbackUrlParameters",
**kwargs
) -> "_models.WorkflowTriggerCallbackUrl":
"""Get the workflow callback Url.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param workflow_name: The workflow name.
:type workflow_name: str
:param list_callback_url: Which callback url to list.
:type list_callback_url: ~azure.mgmt.logic.models.GetCallbackUrlParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: WorkflowTriggerCallbackUrl, or the result of cls(response)
:rtype: ~azure.mgmt.logic.models.WorkflowTriggerCallbackUrl
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WorkflowTriggerCallbackUrl"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.list_callback_url.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(list_callback_url, 'GetCallbackUrlParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('WorkflowTriggerCallbackUrl', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_callback_url.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/listCallbackUrl'} # type: ignore
async def list_swagger(
self,
resource_group_name: str,
workflow_name: str,
**kwargs
) -> object:
"""Gets an OpenAPI definition for the workflow.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param workflow_name: The workflow name.
:type workflow_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: object, or the result of cls(response)
:rtype: object
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[object]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-05-01"
accept = "application/json"
# Construct URL
url = self.list_swagger.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('object', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_swagger.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/listSwagger'} # type: ignore
async def _move_initial(
self,
resource_group_name: str,
workflow_name: str,
move: "_models.WorkflowReference",
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._move_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(move, 'WorkflowReference')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_move_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/move'} # type: ignore
async def begin_move(
self,
resource_group_name: str,
workflow_name: str,
move: "_models.WorkflowReference",
**kwargs
) -> AsyncLROPoller[None]:
"""Moves an existing workflow.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param workflow_name: The workflow name.
:type workflow_name: str
:param move: The workflow to move.
:type move: ~azure.mgmt.logic.models.WorkflowReference
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._move_initial(
resource_group_name=resource_group_name,
workflow_name=workflow_name,
move=move,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_move.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/move'} # type: ignore
async def regenerate_access_key(
self,
resource_group_name: str,
workflow_name: str,
key_type: "_models.RegenerateActionParameter",
**kwargs
) -> None:
"""Regenerates the callback URL access key for request triggers.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param workflow_name: The workflow name.
:type workflow_name: str
:param key_type: The access key type.
:type key_type: ~azure.mgmt.logic.models.RegenerateActionParameter
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.regenerate_access_key.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(key_type, 'RegenerateActionParameter')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
regenerate_access_key.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/regenerateAccessKey'} # type: ignore
async def validate_by_resource_group(
self,
resource_group_name: str,
workflow_name: str,
validate: "_models.Workflow",
**kwargs
) -> None:
"""Validates the workflow.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param workflow_name: The workflow name.
:type workflow_name: str
:param validate: The workflow.
:type validate: ~azure.mgmt.logic.models.Workflow
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.validate_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(validate, 'Workflow')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
validate_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/validate'} # type: ignore
async def validate_by_location(
self,
resource_group_name: str,
location: str,
workflow_name: str,
validate: "_models.Workflow",
**kwargs
) -> None:
"""Validates the workflow definition.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param location: The workflow location.
:type location: str
:param workflow_name: The workflow name.
:type workflow_name: str
:param validate: The workflow.
:type validate: ~azure.mgmt.logic.models.Workflow
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.validate_by_location.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'location': self._serialize.url("location", location, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(validate, 'Workflow')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
validate_by_location.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/locations/{location}/workflows/{workflowName}/validate'} # type: ignore
| 48.053271 | 215 | 0.662835 |
37c015d0b3d069db8eedc86454693efb88453378 | 549 | py | Python | Problemset/two-sum-ii-input-array-is-sorted/two-sum-ii-input-array-is-sorted.py | Yuziquan/LeetCode | 303fc1c8af847f783c4020bd731b28b72ed92a35 | [
"MIT"
] | 1 | 2020-12-22T12:39:21.000Z | 2020-12-22T12:39:21.000Z | Problemset/two-sum-ii-input-array-is-sorted/two-sum-ii-input-array-is-sorted.py | Yuziquan/LeetCode | 303fc1c8af847f783c4020bd731b28b72ed92a35 | [
"MIT"
] | null | null | null | Problemset/two-sum-ii-input-array-is-sorted/two-sum-ii-input-array-is-sorted.py | Yuziquan/LeetCode | 303fc1c8af847f783c4020bd731b28b72ed92a35 | [
"MIT"
] | null | null | null |
# @Title: 两数之和 II - 输入有序数组 (Two Sum II - Input array is sorted)
# @Author: KivenC
# @Date: 2018-07-14 15:01:31
# @Runtime: 28 ms
# @Memory: N/A
class Solution(object):
def twoSum(self, numbers, target):
"""
:type numbers: List[int]
:type target: int
:rtype: List[int]
"""
dic = dict()
for index, num in enumerate(numbers, start=1):
if target - num in dic:
return [dic[target - num], index]
else:
dic[num] = index
return []
| 24.954545 | 63 | 0.513661 |
635e1302196fedad154a957c2e0de041045d7954 | 623 | py | Python | scripts/ensemble.py | koukyo1994/kaggle_toxic_comment | 4be3a38f5b0a300de1a5e50c23eab49c666ad3da | [
"Apache-2.0"
] | null | null | null | scripts/ensemble.py | koukyo1994/kaggle_toxic_comment | 4be3a38f5b0a300de1a5e50c23eab49c666ad3da | [
"Apache-2.0"
] | null | null | null | scripts/ensemble.py | koukyo1994/kaggle_toxic_comment | 4be3a38f5b0a300de1a5e50c23eab49c666ad3da | [
"Apache-2.0"
] | null | null | null | import pandas as pd
lgbm = pd.read_csv("../submission/LGBM.csv")
ridge = pd.read_csv("../submission/Ridge.csv")
fmftrl = pd.read_csv("../submission/FMFTRL.csv")
columns = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
lgbm_val = lgbm.loc[:, columns].values
ridge_val = ridge.loc[:, columns].values
fmftrl_val = fmftrl.loc[:, columns].values
total_val = 0.2*lgbm_val + 0.4*ridge_val + 0.4*fmftrl_val
submission = pd.DataFrame()
submission['id'] = lgbm['id']
for i,name in enumerate(columns):
submission[name] = total_val[:, i]
submission.to_csv("../submission/Ensemble.csv", index=False)
| 31.15 | 83 | 0.704655 |
059f03bfec97c585f6b075a31bf13bfd7ce95af0 | 569 | py | Python | article/migrations/0012_auto_20170808_0911.py | glon/django_test | 4f9d9b93886ca5f861edfcd29b06df1bcedce1ad | [
"Apache-2.0"
] | null | null | null | article/migrations/0012_auto_20170808_0911.py | glon/django_test | 4f9d9b93886ca5f861edfcd29b06df1bcedce1ad | [
"Apache-2.0"
] | null | null | null | article/migrations/0012_auto_20170808_0911.py | glon/django_test | 4f9d9b93886ca5f861edfcd29b06df1bcedce1ad | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2017-08-08 09:11
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('article', '0011_auto_20170808_0908'),
]
operations = [
migrations.AlterField(
model_name='articlepost',
name='created',
field=models.DateTimeField(default=datetime.datetime(2017, 8, 8, 9, 11, 0, 481739, tzinfo=utc)),
),
]
| 24.73913 | 108 | 0.650264 |
3b03e98e9c623336a38353686a4ee0524fa64207 | 2,124 | py | Python | histo/HistokatController/scripts/histokat/backend/types.py | muxiao1217/DeformableRegistration.jl | e3b2c7a7d5b15b8b34302ee62a4727fe5a63176f | [
"BSD-3-Clause"
] | 4 | 2017-10-11T15:07:22.000Z | 2019-11-15T06:26:07.000Z | histo/HistokatController/scripts/histokat/backend/types.py | muxiao1217/DeformableRegistration.jl | e3b2c7a7d5b15b8b34302ee62a4727fe5a63176f | [
"BSD-3-Clause"
] | 6 | 2017-10-06T20:06:58.000Z | 2017-10-06T20:07:00.000Z | histo/HistokatController/scripts/histokat/backend/types.py | muxiao1217/DeformableRegistration.jl | e3b2c7a7d5b15b8b34302ee62a4727fe5a63176f | [
"BSD-3-Clause"
] | 2 | 2017-10-06T19:54:33.000Z | 2019-07-29T07:57:57.000Z | import ctypes
class RETURN_CODE:
C_OK = 0
C_CONTROLLER_NOT_INITIALIZED = 1
C_NONEXISTING_ID = 2
C_CANNOT_CREATE_SESSION = 3
C_ERROR = 4
class c_image_info(ctypes.Structure):
_fields_ = \
[
('image_id', ctypes.c_char_p),
('image_url', ctypes.c_char_p)
]
class c_image_group(ctypes.Structure):
_fields_ = \
[
('group_id', ctypes.c_char_p),
('group_url', ctypes.c_char_p),
('num_image_infos', ctypes.c_int64),
('image_info_list', ctypes.POINTER(c_image_info))
]
class c_analysis_id(ctypes.Structure):
_fields_ = \
[
('name', ctypes.c_char_p),
('version', ctypes.c_int64)
]
class c_analysis_id_list(ctypes.Structure):
_fields_ = \
[
('num_ids', ctypes.c_int64),
('analysis_ids', ctypes.POINTER(c_analysis_id))
]
class c_analysis_mode(ctypes.Structure):
_fields_ = \
[
('major', ctypes.c_int64),
('minor', ctypes.c_int64)
]
class c_vector(ctypes.Structure):
_fields_ = \
[
('x', ctypes.c_int64),
('y', ctypes.c_int64),
('z', ctypes.c_int64)
]
class c_box(ctypes.Structure):
_fields_ = \
[
('v1', c_vector),
('v2', c_vector),
]
class c_value_range(ctypes.Structure):
_fields_ = \
[
('min', ctypes.c_double),
('max', ctypes.c_double)
]
class c_string_list(ctypes.Structure):
_fields_ = \
[
('num_strings', ctypes.c_int64),
('string_list', ctypes.POINTER(ctypes.c_char_p))
]
class c_class_info(ctypes.Structure):
_fields_ = \
[
('name', ctypes.c_char_p),
('color', ctypes.c_int64),
('type', ctypes.c_int64)
]
class c_class_info_list(ctypes.Structure):
_fields_ = \
[
('num_class_infos', ctypes.c_int64),
('class_info_list', ctypes.POINTER(c_class_info))
]
| 20.823529 | 61 | 0.530603 |
9cf18fda4c6629c8a7c75ac7b738466dbfe0e39f | 22,605 | py | Python | teaser/logic/buildingobjects/thermalzone.py | Ja98/TEASER | 1bb782a01ce1b38c4abecb9c6ecc4d59f1ba21a3 | [
"MIT"
] | 1 | 2018-10-22T07:21:15.000Z | 2018-10-22T07:21:15.000Z | teaser/logic/buildingobjects/thermalzone.py | Ja98/TEASER | 1bb782a01ce1b38c4abecb9c6ecc4d59f1ba21a3 | [
"MIT"
] | null | null | null | teaser/logic/buildingobjects/thermalzone.py | Ja98/TEASER | 1bb782a01ce1b38c4abecb9c6ecc4d59f1ba21a3 | [
"MIT"
] | null | null | null | # created June 2015
# by TEASER4 Development Team
"""This module includes the ThermalZone class
"""
from __future__ import division
import random
import re
import warnings
from teaser.logic.buildingobjects.calculation.one_element import OneElement
from teaser.logic.buildingobjects.calculation.two_element import TwoElement
from teaser.logic.buildingobjects.calculation.three_element import ThreeElement
from teaser.logic.buildingobjects.calculation.four_element import FourElement
class ThermalZone(object):
"""Thermal zone class.
This class is used to manage information and parameter calculation for
thermal zones. Each thermal zone has one specific calculation method,
which is specific to the used model (model_attr). For new model
implementation this attribute can be assigned to new classes.
Parameters
----------
parent: Building()
The parent class of this object, the Building the zone belongs to.
Allows for better control of hierarchical structures. If not None it
adds this ThermalZone instance to Building.thermal_zones.
Default is None
Attributes
----------
internal_id : float
Random id for the distinction between different zones.
name : str
Individual name.
area : float [m2]
Thermal zone area.
volume : float [m3]
Thermal zone volume.
infiltration_rate : float [1/h]
Infiltration rate of zone. Default value aligned to
:cite:`DeutschesInstitutfurNormung.2007`
outer_walls : list
List of OuterWall instances.
doors : list
List of Door instances.
rooftops : list
List of Rooftop instances.
ground_floors : list
List of GroundFloor instances.
windows : list
List of Window instances.
inner_walls : list
List of InnerWall instances.
floors : list
List of Floor instances.
ceilings: list
List of Ceiling instances.
use_conditions : instance of UseConditions()
Instance of UseConditions with all relevant information for the usage
of the thermal zone
model_attr : instance of OneElement(), TwoElement(), ThreeElement() or
FourElement()
Instance of OneElement(), TwoElement(), ThreeElement() or
FourElement(), that holds all calculation functions and attributes
needed for the specific model.
typical_length : float [m]
normative typical length of the thermal zone
typical_width : float [m]
normative typical width of the thermal zone
t_inside : float [K]
Normative indoor temperature for static heat load calculation.
The input of t_inside is ALWAYS in Kelvin
t_outside : float [K]
Normative outdoor temperature for static heat load calculation.
The input of t_inside is ALWAYS in Kelvin
t_ground : float [K]
Temperature directly at the outer side of ground floors for static
heat load calculation.
The input of t_ground is ALWAYS in Kelvin
density_air : float [kg/m3]
average density of the air in the thermal zone
heat_capac_air : float [J/K]
average heat capacity of the air in the thermal zone
"""
def __init__(self, parent=None):
"""Constructor for ThermalZone
"""
self.parent = parent
self.internal_id = random.random()
self.name = None
self._area = None
self._volume = None
self._infiltration_rate = 0.4
self._outer_walls = []
self._doors = []
self._rooftops = []
self._ground_floors = []
self._windows = []
self._inner_walls = []
self._floors = []
self._ceilings = []
self._use_conditions = None
self.model_attr = None
self.typical_length = None
self.typical_width = None
self._t_inside = 293.15
self._t_outside = 261.15
self.density_air = 1.25
self.heat_capac_air = 1002
self.t_ground = 286.15
def calc_zone_parameters(
self,
number_of_elements=2,
merge_windows=False,
t_bt=5):
"""RC-Calculation for the thermal zone
Based on the input parameters (used model) this function instantiates
the corresponding calculation Class (e.g. TwoElement) and calculates
the zone parameters. Currently the function is able to distinguishes
between the number of elements, we distinguish between:
- one element: all outer walls are aggregated into one element,
inner wall are neglected
- two elements: exterior and interior walls are aggregated
- three elements: like 2, but floor or roofs are aggregated
separately
- four elements: roofs and floors are aggregated separately
For all four options we can chose if the thermal conduction through
the window is considered in a separate resistance or not.
Parameters
----------
number_of_elements : int
defines the number of elements, that area aggregated, between 1
and 4, default is 2
merge_windows : bool
True for merging the windows into the outer walls, False for
separate resistance for window, default is False (Only
supported for IBPSA)
t_bt : float
Time constant according to VDI 6007 (default t_bt = 5)
"""
if number_of_elements == 1:
self.model_attr = OneElement(
thermal_zone=self,
merge_windows=merge_windows,
t_bt=t_bt)
self.model_attr.calc_attributes()
elif number_of_elements == 2:
self.model_attr = TwoElement(
thermal_zone=self,
merge_windows=merge_windows,
t_bt=t_bt)
self.model_attr.calc_attributes()
elif number_of_elements == 3:
self.model_attr = ThreeElement(
thermal_zone=self,
merge_windows=merge_windows,
t_bt=t_bt)
self.model_attr.calc_attributes()
elif number_of_elements == 4:
self.model_attr = FourElement(
thermal_zone=self,
merge_windows=merge_windows,
t_bt=t_bt)
self.model_attr.calc_attributes()
def find_walls(self, orientation, tilt):
"""Returns all outer walls with given orientation and tilt
This function returns a list of all OuterWall elements with the
same orientation and tilt.
Parameters
----------
orientation : float [degree]
Azimuth of the desired walls.
tilt : float [degree]
Tilt against the horizontal of the desired walls.
Returns
-------
elements : list
List of OuterWalls instances with desired orientation and tilt.
"""
elements = []
for i in self.outer_walls:
if i.orientation == orientation and i.tilt == tilt:
elements.append(i)
else:
pass
return elements
def find_doors(self, orientation, tilt):
"""Returns all outer walls with given orientation and tilt
This function returns a list of all Doors elements with the
same orientation and tilt.
Parameters
----------
orientation : float [degree]
Azimuth of the desired walls.
tilt : float [degree]
Tilt against the horizontal of the desired walls.
Returns
-------
elements : list
List of Doors instances with desired orientation and tilt.
"""
elements = []
for i in self.doors:
if i.orientation == orientation and i.tilt == tilt:
elements.append(i)
else:
pass
return elements
def find_rts(self, orientation, tilt):
"""Returns all rooftops with given orientation and tilt
This function returns a list of all Rooftop elements with the
same orientation and tilt.
Parameters
----------
orientation : float [degree]
Azimuth of the desired rooftops.
tilt : float [degree]
Tilt against the horizontal of the desired rooftops.
Returns
-------
elements : list
List of Rooftop instances with desired orientation and tilt.
"""
elements = []
for i in self.rooftops:
if i.orientation == orientation and i.tilt == tilt:
elements.append(i)
else:
pass
return elements
def find_gfs(self, orientation, tilt):
"""Returns all ground floors with given orientation and tilt
This function returns a list of all GroundFloor elements with the
same orientation and tilt.
Parameters
----------
orientation : float [degree]
Azimuth of the desired ground floors.
tilt : float [degree]
Tilt against the horizontal of the desired ground floors.
Returns
-------
elements : list
List of GroundFloor instances with desired orientation and tilt.
"""
elements = []
for i in self.ground_floors:
if i.orientation == orientation and i.tilt == tilt:
elements.append(i)
else:
pass
return elements
def find_wins(self, orientation, tilt):
"""Returns all windows with given orientation and tilt
This function returns a list of all Window elements with the
same orientation and tilt.
Parameters
----------
orientation : float [degree]
Azimuth of the desired windows.
tilt : float [degree]
Tilt against the horizontal of the desired windows.
Returns
-------
elements : list
List of Window instances with desired orientation and tilt.
"""
elements = []
for i in self.windows:
if i.orientation == orientation and i.tilt == tilt:
elements.append(i)
else:
pass
return elements
def set_inner_wall_area(self):
"""Sets the inner wall area according to zone area
Sets the inner wall area according to zone area size if type building
approach is used. This function covers Floors, Ceilings and InnerWalls.
"""
ass_error_1 = "You need to specify parent for thermal zone"
assert self.parent is not None, ass_error_1
for floor in self.floors:
floor.area = (
(self.parent.number_of_floors - 1) /
self.parent.number_of_floors) * self.area
for ceiling in self.ceilings:
ceiling.area = (
(self.parent.number_of_floors - 1) /
self.parent.number_of_floors) * self.area
for wall in self.inner_walls:
typical_area = self.typical_length * self.typical_width
avg_room_nr = self.area / typical_area
wall.area = (avg_room_nr * (self.typical_length *
self.parent.height_of_floors +
2 * self.typical_width *
self.parent.height_of_floors))
def set_volume_zone(self):
"""Sets the zone volume according to area and height of floors
Sets the volume of a zone according area and height of floors
(building attribute).
"""
ass_error_1 = "you need to specify parent for thermal zone"
assert self.parent is not None, ass_error_1
self.volume = self.area * self.parent.height_of_floors
def retrofit_zone(
self,
type_of_retrofit=None,
window_type=None,
material=None):
"""Retrofits all walls and windows in the zone.
Function call for all elements facing the ambient or ground.
Distinguishes if the parent building is a archetype of type 'iwu' or
'tabula_de'. If TABULA is used, it will use the pre-defined wall
constructions of TABULA.
This function covers OuterWall, Rooftop, GroundFloor and Window.
Parameters
----------
type_of_retrofit : str
The classification of retrofit, if the archetype building
approach of TABULA is used.
window_type : str
Default: EnEv 2014
material : str
Default: EPS035
"""
if type_of_retrofit is None:
type_of_retrofit = 'retrofit'
if type(self.parent).__name__ in [
"SingleFamilyHouse", "TerracedHouse", "MultiFamilyHouse",
"ApartmentBlock"]:
for wall_count in self.outer_walls \
+ self.rooftops + self.ground_floors + self.doors + \
self.windows:
if "adv_retrofit" in wall_count.construction_type:
warnings.warn(
"already highest available standard"
+ self.parent.name + wall_count.name)
elif "standard" in wall_count.construction_type:
wall_count.load_type_element(
year=self.parent.year_of_construction,
construction=wall_count.construction_type.replace(
"standard", type_of_retrofit))
else:
wall_count.load_type_element(
year=self.parent.year_of_construction,
construction=wall_count.construction_type.replace(
"retrofit", type_of_retrofit))
else:
for wall_count in self.outer_walls:
wall_count.retrofit_wall(
self.parent.year_of_retrofit,
material)
for roof_count in self.rooftops:
roof_count.retrofit_wall(
self.parent.year_of_retrofit,
material)
for ground_count in self.ground_floors:
ground_count.retrofit_wall(
self.parent.year_of_retrofit,
material)
for win_count in self.windows:
win_count.replace_window(
self.parent.year_of_retrofit,
window_type)
def delete(self):
"""Deletes the actual thermal zone safely.
This deletes the current thermal Zone and also refreshes the
thermal_zones list in the parent Building.
"""
for index, tz in enumerate(self.parent.thermal_zones):
if tz.internal_id == self.internal_id:
self.parent.net_leased_area -= self.area
self.parent.thermal_zones.pop(index)
break
def add_element(self, building_element):
"""Adds a building element to the corresponding list
This function adds a BuildingElement instance to the the list
depending on the type of the Building Element
Parameters
----------
building_element : BuildingElement()
inherited objects of BuildingElement() instance of TEASER
"""
ass_error_1 = ("building_element has to be an instance of OuterWall,"
" Rooftop, GroundFloor, Window, InnerWall, "
"Ceiling or Floor")
assert type(building_element).__name__ in (
"OuterWall", "Rooftop", "GroundFloor",
"InnerWall", "Ceiling", "Floor",
"Window"), ass_error_1
if type(building_element).__name__ == "OuterWall":
self._outer_walls.append(building_element)
elif type(building_element).__name__ == "GroundFloor":
self._ground_floors.append(building_element)
elif type(building_element).__name__ == "Rooftop":
self._rooftops.append(building_element)
elif type(building_element).__name__ == "InnerWall":
self._inner_walls.append(building_element)
elif type(building_element).__name__ == "Ceiling":
self._ceilings.append(building_element)
elif type(building_element).__name__ == "Floor":
self._floors.append(building_element)
elif type(building_element).__name__ == "Window":
self._windows.append(building_element)
@property
def parent(self):
return self.__parent
@parent.setter
def parent(self, value):
from teaser.logic.buildingobjects.building import Building
import inspect
if value is not None:
if inspect.isclass(Building):
self.__parent = value
self.__parent.thermal_zones.append(self)
@property
def name(self):
return self._name
@name.setter
def name(self, value):
if isinstance(value, str):
regex = re.compile('[^a-zA-z0-9]')
self._name = regex.sub('', value)
else:
try:
value = str(value)
regex = re.compile('[^a-zA-z0-9]')
self._name = regex.sub('', value)
except ValueError:
print("Can't convert name to string")
@property
def outer_walls(self):
return self._outer_walls
@outer_walls.setter
def outer_walls(self, value):
if value is None:
self._outer_walls = []
@property
def doors(self):
return self._doors
@doors.setter
def doors(self, value):
if value is None:
self._doors = []
@property
def rooftops(self):
return self._rooftops
@rooftops.setter
def rooftops(self, value):
if value is None:
self._rooftops = []
@property
def ground_floors(self):
return self._ground_floors
@ground_floors.setter
def ground_floors(self, value):
if value is None:
self._ground_floors = []
@property
def ceilings(self):
return self._ceilings
@ceilings.setter
def ceilings(self, value):
if value is None:
self._ceilings = []
@property
def floors(self):
return self._floors
@floors.setter
def floors(self, value):
if value is None:
self._floors = []
@property
def inner_walls(self):
return self._inner_walls
@inner_walls.setter
def inner_walls(self, value):
if value is None:
self._inner_walls = []
@property
def windows(self):
return self._windows
@windows.setter
def windows(self, value):
if value is None:
self._windows = []
@property
def use_conditions(self):
return self._use_conditions
@use_conditions.setter
def use_conditions(self, value):
ass_error_1 = "Use condition has to be an instance of UseConditions()"
assert type(value).__name__ == "UseConditions" or \
type(value).__name__ == "BoundaryConditions", ass_error_1
if value is not None:
self._use_conditions = value
self.typical_length = value.typical_length
self.typical_width = value.typical_width
self._use_conditions = value
@property
def area(self):
return self._area
@area.setter
def area(self, value):
if isinstance(value, float):
pass
elif value is None:
pass
else:
try:
value = float(value)
except:
raise ValueError("Can't convert zone area to float")
if self.parent is not None:
if self._area is None:
if self.parent.net_leased_area is None:
self.parent.net_leased_area = 0.0
self._area = value
self.parent.net_leased_area += value
else:
self.parent.net_leased_area -= self._area
self.parent.net_leased_area += value
self._area = value
else:
self._area = value
@property
def volume(self):
return self._volume
@volume.setter
def volume(self, value):
if isinstance(value, float):
pass
elif value is None:
pass
else:
try:
value = float(value)
except:
raise ValueError("Can't convert zone volume to float")
if self.parent is not None:
if self._volume is None:
self._volume = value
self.parent.volume += value
else:
self.parent.volume -= self._volume
self.parent.volume += value
self._volume = value
else:
self._volume = value
@property
def infiltration_rate(self):
return self._infiltration_rate
@infiltration_rate.setter
def infiltration_rate(self, value):
if isinstance(value, float):
self._infiltration_rate = value
elif value is None:
self._infiltration_rate = value
else:
try:
value = float(value)
self._infiltration_rate = value
except:
raise ValueError("Can't convert infiltration rate to float")
@property
def t_inside(self):
return self._t_inside
@t_inside.setter
def t_inside(self, value):
if isinstance(value, float):
self._t_inside = value
elif value is None:
self._t_inside = value
else:
try:
value = float(value)
self._t_inside = value
except:
raise ValueError("Can't convert temperature to float")
@property
def t_outside(self):
return self._t_outside
@t_outside.setter
def t_outside(self, value):
if isinstance(value, float):
self._t_outside = value
elif value is None:
self._t_outside = value
else:
try:
value = float(value)
self._t_outside = value
except:
raise ValueError("Can't convert temperature to float")
| 32.200855 | 79 | 0.584959 |
8c20fdb1743b337d37275b1677881af08cec3823 | 1,147 | py | Python | testng1.py | fenglwh/Spider | 3499dd801928932a0008684ef623f1ac0bcccd74 | [
"MIT"
] | null | null | null | testng1.py | fenglwh/Spider | 3499dd801928932a0008684ef623f1ac0bcccd74 | [
"MIT"
] | null | null | null | testng1.py | fenglwh/Spider | 3499dd801928932a0008684ef623f1ac0bcccd74 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# coding:utf-8
__Author__ = 'Adair.l'
import requests
import bs4
import http.cookiejar as cookiejar
headers = {
'Host': 'manhua1032-101-69-161-98.cdndm5.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0',
'Accept': '*/*',
'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
'Accept-Encoding': 'gzip, deflate',
'Referer': 'http://cnc.dm5.com/m575919/',
'Connection': 'keep-alive',
}
for x in headers:
print(x, headers[x])
session = requests.session()
session.cookies = cookiejar.LWPCookieJar(filename='cookies')
try:
session.cookies.load(ignore_discard=True) # 如果已经有 cookie信息的话 直接用于登录
except:
print("Cookie 未能加载")
r0=session.get('http://cnc.dm5.com/m575919/')
print(r0)
r1=session.get('http://manhua1032-101-69-161-98.cdndm5.com/41/40586/575919/1_5252.jpg?cid=575919&key=e98e2016acb7167394dbc062a7161755&type=1',headers=headers,)
print(r1)
print(session.cookies)
open('1.jpg','wb').write(r1.content)
| 26.068182 | 159 | 0.629468 |
d035a7f7002e1cc03f14b793caf566d9f5f23ad7 | 337 | py | Python | cpovc_offline_mode/urls.py | Rebeccacheptoek/cpims-ovc-3.0 | 25d34dca2f93fcdb6fc934093b625604b46ddd8d | [
"Apache-2.0"
] | 3 | 2022-02-18T13:25:29.000Z | 2022-02-25T11:49:11.000Z | cpovc_offline_mode/urls.py | Rebeccacheptoek/cpims-ovc-3.0 | 25d34dca2f93fcdb6fc934093b625604b46ddd8d | [
"Apache-2.0"
] | null | null | null | cpovc_offline_mode/urls.py | Rebeccacheptoek/cpims-ovc-3.0 | 25d34dca2f93fcdb6fc934093b625604b46ddd8d | [
"Apache-2.0"
] | 22 | 2022-02-05T13:43:53.000Z | 2022-02-26T14:29:06.000Z | from django.conf.urls import patterns, url
urlpatterns = patterns(
'cpovc_offline_mode.views',
url(r'^templates/$', 'templates', name='templates'),
url(r'^data/$', 'fetch_data', name='fetch_data'),
url(r'^services/$', 'fetch_services', name='fetch_services'),
url(r'^submit/$', 'submit_form', name='submit_form'),
)
| 33.7 | 65 | 0.664688 |
5545b71022c0e8ea67bfeab58ff7fb581a40bb5a | 1,717 | py | Python | var/spack/repos/builtin/packages/libcanberra/package.py | ilagunap/spack | 510f869c3ae8ac2721debd29e98076212ee75852 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1 | 2020-03-09T14:32:26.000Z | 2020-03-09T14:32:26.000Z | var/spack/repos/builtin/packages/libcanberra/package.py | ilagunap/spack | 510f869c3ae8ac2721debd29e98076212ee75852 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 5 | 2021-07-26T12:12:00.000Z | 2022-03-01T12:16:03.000Z | var/spack/repos/builtin/packages/libcanberra/package.py | ilagunap/spack | 510f869c3ae8ac2721debd29e98076212ee75852 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Libcanberra(AutotoolsPackage):
"""libcanberra is an implementation of the XDG Sound Theme and
Name Specifications, for generating event sounds on free desktops,
such as GNOME."""
homepage = "https://0pointer.de/lennart/projects/libcanberra/"
url = "http://0pointer.de/lennart/projects/libcanberra/libcanberra-0.30.tar.xz"
version('0.30', sha256='c2b671e67e0c288a69fc33dc1b6f1b534d07882c2aceed37004bf48c601afa72')
# TODO: Add variants and dependencies for the following audio support:
# ALSA, OSS, PulseAudio, udev, GStreamer, null, GTK3+ , tdb
variant('gtk', default=False, description='Enable optional GTK+ support')
depends_on('libxrender', when='+gtk')
depends_on('libxext', when='+gtk')
depends_on('libx11', when='+gtk')
depends_on('libxinerama', when='+gtk')
depends_on('libxrandr', when='+gtk')
depends_on('libxcursor', when='+gtk')
depends_on('libxcomposite', when='+gtk')
depends_on('libxdamage', when='+gtk')
depends_on('libxfixes', when='+gtk')
depends_on('libxcb', when='+gtk')
depends_on('libxau', when='+gtk')
depends_on('gtkplus', when='+gtk')
depends_on('libvorbis')
depends_on('pkgconfig', type='build')
def configure_args(self):
args = ['--enable-static']
if '+gtk' in self.spec:
args.append('--enable-gtk')
else:
args.append('--disable-gtk')
return args
| 34.34 | 94 | 0.65463 |
7e77c11b0ef714bffe00bb6d08adc03def5988ac | 189,740 | py | Python | numba/np/arrayobj.py | luk-f-a/numba | 3a682bd827e416335e3574bc7b10f0ec69adb701 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | 1 | 2021-08-10T05:33:29.000Z | 2021-08-10T05:33:29.000Z | numba/np/arrayobj.py | luk-f-a/numba | 3a682bd827e416335e3574bc7b10f0ec69adb701 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | numba/np/arrayobj.py | luk-f-a/numba | 3a682bd827e416335e3574bc7b10f0ec69adb701 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | 1 | 2021-01-31T18:58:54.000Z | 2021-01-31T18:58:54.000Z | """
Implementation of operations on Array objects and objects supporting
the buffer protocol.
"""
import functools
import math
import operator
from llvmlite import ir
import llvmlite.llvmpy.core as lc
from llvmlite.llvmpy.core import Constant
import numpy as np
from numba import pndindex, literal_unroll
from numba.core import types, utils, typing, errors, cgutils, extending
from numba.np.numpy_support import (as_dtype, carray, farray, is_contiguous,
is_fortran)
from numba.np.numpy_support import type_can_asarray, is_nonelike
from numba.core.imputils import (lower_builtin, lower_getattr,
lower_getattr_generic,
lower_setattr_generic,
lower_cast, lower_constant,
iternext_impl, impl_ret_borrowed,
impl_ret_new_ref, impl_ret_untracked,
RefType)
from numba.core.typing import signature
from numba.core.extending import (register_jitable, overload, overload_method,
intrinsic)
from numba.misc import quicksort, mergesort
from numba.cpython import slicing
from numba.cpython.unsafe.tuple import tuple_setitem, build_full_slice_tuple
def set_range_metadata(builder, load, lower_bound, upper_bound):
"""
Set the "range" metadata on a load instruction.
Note the interval is in the form [lower_bound, upper_bound).
"""
range_operands = [Constant.int(load.type, lower_bound),
Constant.int(load.type, upper_bound)]
md = builder.module.add_metadata(range_operands)
load.set_metadata("range", md)
def mark_positive(builder, load):
"""
Mark the result of a load instruction as positive (or zero).
"""
upper_bound = (1 << (load.type.width - 1)) - 1
set_range_metadata(builder, load, 0, upper_bound)
def make_array(array_type):
"""
Return the Structure representation of the given *array_type*
(an instance of types.ArrayCompatible).
Note this does not call __array_wrap__ in case a new array structure
is being created (rather than populated).
"""
real_array_type = array_type.as_array
base = cgutils.create_struct_proxy(real_array_type)
ndim = real_array_type.ndim
class ArrayStruct(base):
def _make_refs(self, ref):
sig = signature(real_array_type, array_type)
try:
array_impl = self._context.get_function('__array__', sig)
except NotImplementedError:
return super(ArrayStruct, self)._make_refs(ref)
# Return a wrapped structure and its unwrapped reference
datamodel = self._context.data_model_manager[array_type]
be_type = self._get_be_type(datamodel)
if ref is None:
outer_ref = cgutils.alloca_once(self._builder, be_type,
zfill=True)
else:
outer_ref = ref
# NOTE: __array__ is called with a pointer and expects a pointer
# in return!
ref = array_impl(self._builder, (outer_ref,))
return outer_ref, ref
@property
def shape(self):
"""
Override .shape to inform LLVM that its elements are all positive.
"""
builder = self._builder
if ndim == 0:
return base.__getattr__(self, "shape")
# Unfortunately, we can't use llvm.assume as its presence can
# seriously pessimize performance,
# *and* the range metadata currently isn't improving anything here,
# see https://llvm.org/bugs/show_bug.cgi?id=23848 !
ptr = self._get_ptr_by_name("shape")
dims = []
for i in range(ndim):
dimptr = cgutils.gep_inbounds(builder, ptr, 0, i)
load = builder.load(dimptr)
dims.append(load)
mark_positive(builder, load)
return cgutils.pack_array(builder, dims)
return ArrayStruct
def get_itemsize(context, array_type):
"""
Return the item size for the given array or buffer type.
"""
llty = context.get_data_type(array_type.dtype)
return context.get_abi_sizeof(llty)
def load_item(context, builder, arrayty, ptr):
"""
Load the item at the given array pointer.
"""
align = None if arrayty.aligned else 1
return context.unpack_value(builder, arrayty.dtype, ptr,
align=align)
def store_item(context, builder, arrayty, val, ptr):
"""
Store the item at the given array pointer.
"""
align = None if arrayty.aligned else 1
return context.pack_value(builder, arrayty.dtype, val, ptr, align=align)
def fix_integer_index(context, builder, idxty, idx, size):
"""
Fix the integer index' type and value for the given dimension size.
"""
if idxty.signed:
ind = context.cast(builder, idx, idxty, types.intp)
ind = slicing.fix_index(builder, ind, size)
else:
ind = context.cast(builder, idx, idxty, types.uintp)
return ind
def normalize_index(context, builder, idxty, idx):
"""
Normalize the index type and value. 0-d arrays are converted to scalars.
"""
if isinstance(idxty, types.Array) and idxty.ndim == 0:
assert isinstance(idxty.dtype, types.Integer)
idxary = make_array(idxty)(context, builder, idx)
idxval = load_item(context, builder, idxty, idxary.data)
return idxty.dtype, idxval
else:
return idxty, idx
def normalize_indices(context, builder, index_types, indices):
"""
Same as normalize_index(), but operating on sequences of
index types and values.
"""
if len(indices):
index_types, indices = zip(*[normalize_index(context, builder, idxty,
idx)
for idxty, idx in zip(index_types, indices)
])
return index_types, indices
def populate_array(array, data, shape, strides, itemsize, meminfo,
parent=None):
"""
Helper function for populating array structures.
This avoids forgetting to set fields.
*shape* and *strides* can be Python tuples or LLVM arrays.
"""
context = array._context
builder = array._builder
datamodel = array._datamodel
required_fields = set(datamodel._fields)
if meminfo is None:
meminfo = Constant.null(context.get_value_type(
datamodel.get_type('meminfo')))
intp_t = context.get_value_type(types.intp)
if isinstance(shape, (tuple, list)):
shape = cgutils.pack_array(builder, shape, intp_t)
if isinstance(strides, (tuple, list)):
strides = cgutils.pack_array(builder, strides, intp_t)
if isinstance(itemsize, int):
itemsize = intp_t(itemsize)
attrs = dict(shape=shape,
strides=strides,
data=data,
itemsize=itemsize,
meminfo=meminfo,)
# Set `parent` attribute
if parent is None:
attrs['parent'] = Constant.null(context.get_value_type(
datamodel.get_type('parent')))
else:
attrs['parent'] = parent
# Calc num of items from shape
nitems = context.get_constant(types.intp, 1)
unpacked_shape = cgutils.unpack_tuple(builder, shape, shape.type.count)
# (note empty shape => 0d array therefore nitems = 1)
for axlen in unpacked_shape:
nitems = builder.mul(nitems, axlen, flags=['nsw'])
attrs['nitems'] = nitems
# Make sure that we have all the fields
got_fields = set(attrs.keys())
if got_fields != required_fields:
raise ValueError("missing {0}".format(required_fields - got_fields))
# Set field value
for k, v in attrs.items():
setattr(array, k, v)
return array
def update_array_info(aryty, array):
"""
Update some auxiliary information in *array* after some of its fields
were changed. `itemsize` and `nitems` are updated.
"""
context = array._context
builder = array._builder
# Calc num of items from shape
nitems = context.get_constant(types.intp, 1)
unpacked_shape = cgutils.unpack_tuple(builder, array.shape, aryty.ndim)
for axlen in unpacked_shape:
nitems = builder.mul(nitems, axlen, flags=['nsw'])
array.nitems = nitems
array.itemsize = context.get_constant(types.intp,
get_itemsize(context, aryty))
@lower_builtin('getiter', types.Buffer)
def getiter_array(context, builder, sig, args):
[arrayty] = sig.args
[array] = args
iterobj = context.make_helper(builder, sig.return_type)
zero = context.get_constant(types.intp, 0)
indexptr = cgutils.alloca_once_value(builder, zero)
iterobj.index = indexptr
iterobj.array = array
# Incref array
if context.enable_nrt:
context.nrt.incref(builder, arrayty, array)
res = iterobj._getvalue()
# Note: a decref on the iterator will dereference all internal MemInfo*
out = impl_ret_new_ref(context, builder, sig.return_type, res)
return out
def _getitem_array_single_int(context, builder, return_type, aryty, ary, idx):
""" Evaluate `ary[idx]`, where idx is a single int. """
# optimized form of _getitem_array_generic
shapes = cgutils.unpack_tuple(builder, ary.shape, count=aryty.ndim)
strides = cgutils.unpack_tuple(builder, ary.strides, count=aryty.ndim)
offset = builder.mul(strides[0], idx)
dataptr = cgutils.pointer_add(builder, ary.data, offset)
view_shapes = shapes[1:]
view_strides = strides[1:]
if isinstance(return_type, types.Buffer):
# Build array view
retary = make_view(context, builder, aryty, ary, return_type,
dataptr, view_shapes, view_strides)
return retary._getvalue()
else:
# Load scalar from 0-d result
assert not view_shapes
return load_item(context, builder, aryty, dataptr)
@lower_builtin('iternext', types.ArrayIterator)
@iternext_impl(RefType.BORROWED)
def iternext_array(context, builder, sig, args, result):
[iterty] = sig.args
[iter] = args
arrayty = iterty.array_type
iterobj = context.make_helper(builder, iterty, value=iter)
ary = make_array(arrayty)(context, builder, value=iterobj.array)
nitems, = cgutils.unpack_tuple(builder, ary.shape, count=1)
index = builder.load(iterobj.index)
is_valid = builder.icmp(lc.ICMP_SLT, index, nitems)
result.set_valid(is_valid)
with builder.if_then(is_valid):
value = _getitem_array_single_int(
context, builder, iterty.yield_type, arrayty, ary, index
)
result.yield_(value)
nindex = cgutils.increment_index(builder, index)
builder.store(nindex, iterobj.index)
# ------------------------------------------------------------------------------
# Basic indexing (with integers and slices only)
def basic_indexing(context, builder, aryty, ary, index_types, indices,
boundscheck=None):
"""
Perform basic indexing on the given array.
A (data pointer, shapes, strides) tuple is returned describing
the corresponding view.
"""
zero = context.get_constant(types.intp, 0)
shapes = cgutils.unpack_tuple(builder, ary.shape, aryty.ndim)
strides = cgutils.unpack_tuple(builder, ary.strides, aryty.ndim)
output_indices = []
output_shapes = []
output_strides = []
ax = 0
for indexval, idxty in zip(indices, index_types):
if idxty is types.ellipsis:
# Fill up missing dimensions at the middle
n_missing = aryty.ndim - len(indices) + 1
for i in range(n_missing):
output_indices.append(zero)
output_shapes.append(shapes[ax])
output_strides.append(strides[ax])
ax += 1
continue
# Regular index value
if isinstance(idxty, types.SliceType):
slice = context.make_helper(builder, idxty, value=indexval)
slicing.guard_invalid_slice(context, builder, idxty, slice)
slicing.fix_slice(builder, slice, shapes[ax])
output_indices.append(slice.start)
sh = slicing.get_slice_length(builder, slice)
st = slicing.fix_stride(builder, slice, strides[ax])
output_shapes.append(sh)
output_strides.append(st)
elif isinstance(idxty, types.Integer):
ind = fix_integer_index(context, builder, idxty, indexval,
shapes[ax])
if boundscheck:
cgutils.do_boundscheck(context, builder, ind, shapes[ax], ax)
output_indices.append(ind)
else:
raise NotImplementedError("unexpected index type: %s" % (idxty,))
ax += 1
# Fill up missing dimensions at the end
assert ax <= aryty.ndim
while ax < aryty.ndim:
output_shapes.append(shapes[ax])
output_strides.append(strides[ax])
ax += 1
# No need to check wraparound, as negative indices were already
# fixed in the loop above.
dataptr = cgutils.get_item_pointer(context, builder, aryty, ary,
output_indices,
wraparound=False, boundscheck=False)
return (dataptr, output_shapes, output_strides)
def make_view(context, builder, aryty, ary, return_type,
data, shapes, strides):
"""
Build a view over the given array with the given parameters.
"""
retary = make_array(return_type)(context, builder)
populate_array(retary,
data=data,
shape=shapes,
strides=strides,
itemsize=ary.itemsize,
meminfo=ary.meminfo,
parent=ary.parent)
return retary
def _getitem_array_generic(context, builder, return_type, aryty, ary,
index_types, indices):
"""
Return the result of indexing *ary* with the given *indices*,
returning either a scalar or a view.
"""
dataptr, view_shapes, view_strides = \
basic_indexing(context, builder, aryty, ary, index_types, indices,
boundscheck=context.enable_boundscheck)
if isinstance(return_type, types.Buffer):
# Build array view
retary = make_view(context, builder, aryty, ary, return_type,
dataptr, view_shapes, view_strides)
return retary._getvalue()
else:
# Load scalar from 0-d result
assert not view_shapes
return load_item(context, builder, aryty, dataptr)
@lower_builtin(operator.getitem, types.Buffer, types.Integer)
@lower_builtin(operator.getitem, types.Buffer, types.SliceType)
def getitem_arraynd_intp(context, builder, sig, args):
"""
Basic indexing with an integer or a slice.
"""
aryty, idxty = sig.args
ary, idx = args
assert aryty.ndim >= 1
ary = make_array(aryty)(context, builder, ary)
res = _getitem_array_generic(context, builder, sig.return_type,
aryty, ary, (idxty,), (idx,))
return impl_ret_borrowed(context, builder, sig.return_type, res)
@lower_builtin(operator.getitem, types.Buffer, types.BaseTuple)
def getitem_array_tuple(context, builder, sig, args):
"""
Basic or advanced indexing with a tuple.
"""
aryty, tupty = sig.args
ary, tup = args
ary = make_array(aryty)(context, builder, ary)
index_types = tupty.types
indices = cgutils.unpack_tuple(builder, tup, count=len(tupty))
index_types, indices = normalize_indices(context, builder,
index_types, indices)
if any(isinstance(ty, types.Array) for ty in index_types):
# Advanced indexing
return fancy_getitem(context, builder, sig, args,
aryty, ary, index_types, indices)
res = _getitem_array_generic(context, builder, sig.return_type,
aryty, ary, index_types, indices)
return impl_ret_borrowed(context, builder, sig.return_type, res)
@lower_builtin(operator.setitem, types.Buffer, types.Any, types.Any)
def setitem_array(context, builder, sig, args):
"""
array[a] = scalar_or_array
array[a,..,b] = scalar_or_array
"""
aryty, idxty, valty = sig.args
ary, idx, val = args
if isinstance(idxty, types.BaseTuple):
index_types = idxty.types
indices = cgutils.unpack_tuple(builder, idx, count=len(idxty))
else:
index_types = (idxty,)
indices = (idx,)
ary = make_array(aryty)(context, builder, ary)
# First try basic indexing to see if a single array location is denoted.
index_types, indices = normalize_indices(context, builder,
index_types, indices)
try:
dataptr, shapes, strides = \
basic_indexing(context, builder, aryty, ary, index_types, indices,
boundscheck=context.enable_boundscheck)
except NotImplementedError:
use_fancy_indexing = True
else:
use_fancy_indexing = bool(shapes)
if use_fancy_indexing:
# Index describes a non-trivial view => use generic slice assignment
# (NOTE: this also handles scalar broadcasting)
return fancy_setslice(context, builder, sig, args,
index_types, indices)
# Store source value the given location
val = context.cast(builder, val, valty, aryty.dtype)
store_item(context, builder, aryty, val, dataptr)
@lower_builtin(len, types.Buffer)
def array_len(context, builder, sig, args):
(aryty,) = sig.args
(ary,) = args
arystty = make_array(aryty)
ary = arystty(context, builder, ary)
shapeary = ary.shape
res = builder.extract_value(shapeary, 0)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower_builtin("array.item", types.Array)
def array_item(context, builder, sig, args):
aryty, = sig.args
ary, = args
ary = make_array(aryty)(context, builder, ary)
nitems = ary.nitems
with builder.if_then(builder.icmp_signed('!=', nitems, nitems.type(1)),
likely=False):
msg = "item(): can only convert an array of size 1 to a Python scalar"
context.call_conv.return_user_exc(builder, ValueError, (msg,))
return load_item(context, builder, aryty, ary.data)
@lower_builtin("array.itemset", types.Array, types.Any)
def array_itemset(context, builder, sig, args):
aryty, valty = sig.args
ary, val = args
assert valty == aryty.dtype
ary = make_array(aryty)(context, builder, ary)
nitems = ary.nitems
with builder.if_then(builder.icmp_signed('!=', nitems, nitems.type(1)),
likely=False):
msg = "itemset(): can only write to an array of size 1"
context.call_conv.return_user_exc(builder, ValueError, (msg,))
store_item(context, builder, aryty, val, ary.data)
return context.get_dummy_value()
# ------------------------------------------------------------------------------
# Advanced / fancy indexing
class Indexer(object):
"""
Generic indexer interface, for generating indices over a fancy indexed
array on a single dimension.
"""
def prepare(self):
"""
Prepare the indexer by initializing any required variables, basic
blocks...
"""
raise NotImplementedError
def get_size(self):
"""
Return this dimension's size as an integer.
"""
raise NotImplementedError
def get_shape(self):
"""
Return this dimension's shape as a tuple.
"""
raise NotImplementedError
def get_index_bounds(self):
"""
Return a half-open [lower, upper) range of indices this dimension
is guaranteed not to step out of.
"""
raise NotImplementedError
def loop_head(self):
"""
Start indexation loop. Return a (index, count) tuple.
*index* is an integer LLVM value representing the index over this
dimension.
*count* is either an integer LLVM value representing the current
iteration count, or None if this dimension should be omitted from
the indexation result.
"""
raise NotImplementedError
def loop_tail(self):
"""
Finish indexation loop.
"""
raise NotImplementedError
class EntireIndexer(Indexer):
"""
Compute indices along an entire array dimension.
"""
def __init__(self, context, builder, aryty, ary, dim):
self.context = context
self.builder = builder
self.aryty = aryty
self.ary = ary
self.dim = dim
self.ll_intp = self.context.get_value_type(types.intp)
def prepare(self):
builder = self.builder
self.size = builder.extract_value(self.ary.shape, self.dim)
self.index = cgutils.alloca_once(builder, self.ll_intp)
self.bb_start = builder.append_basic_block()
self.bb_end = builder.append_basic_block()
def get_size(self):
return self.size
def get_shape(self):
return (self.size,)
def get_index_bounds(self):
# [0, size)
return (self.ll_intp(0), self.size)
def loop_head(self):
builder = self.builder
# Initialize loop variable
self.builder.store(Constant.int(self.ll_intp, 0), self.index)
builder.branch(self.bb_start)
builder.position_at_end(self.bb_start)
cur_index = builder.load(self.index)
with builder.if_then(builder.icmp_signed('>=', cur_index, self.size),
likely=False):
builder.branch(self.bb_end)
return cur_index, cur_index
def loop_tail(self):
builder = self.builder
next_index = cgutils.increment_index(builder, builder.load(self.index))
builder.store(next_index, self.index)
builder.branch(self.bb_start)
builder.position_at_end(self.bb_end)
class IntegerIndexer(Indexer):
"""
Compute indices from a single integer.
"""
def __init__(self, context, builder, idx):
self.context = context
self.builder = builder
self.idx = idx
self.ll_intp = self.context.get_value_type(types.intp)
def prepare(self):
pass
def get_size(self):
return Constant.int(self.ll_intp, 1)
def get_shape(self):
return ()
def get_index_bounds(self):
# [idx, idx+1)
return (self.idx, self.builder.add(self.idx, self.get_size()))
def loop_head(self):
return self.idx, None
def loop_tail(self):
pass
class IntegerArrayIndexer(Indexer):
"""
Compute indices from an array of integer indices.
"""
def __init__(self, context, builder, idxty, idxary, size):
self.context = context
self.builder = builder
self.idxty = idxty
self.idxary = idxary
self.size = size
assert idxty.ndim == 1
self.ll_intp = self.context.get_value_type(types.intp)
def prepare(self):
builder = self.builder
self.idx_size = cgutils.unpack_tuple(builder, self.idxary.shape)[0]
self.idx_index = cgutils.alloca_once(builder, self.ll_intp)
self.bb_start = builder.append_basic_block()
self.bb_end = builder.append_basic_block()
def get_size(self):
return self.idx_size
def get_shape(self):
return (self.idx_size,)
def get_index_bounds(self):
# Pessimal heuristic, as we don't want to scan for the min and max
return (self.ll_intp(0), self.size)
def loop_head(self):
builder = self.builder
# Initialize loop variable
self.builder.store(Constant.int(self.ll_intp, 0), self.idx_index)
builder.branch(self.bb_start)
builder.position_at_end(self.bb_start)
cur_index = builder.load(self.idx_index)
with builder.if_then(
builder.icmp_signed('>=', cur_index, self.idx_size),
likely=False
):
builder.branch(self.bb_end)
# Load the actual index from the array of indices
index = _getitem_array_single_int(
self.context, builder, self.idxty.dtype, self.idxty, self.idxary,
cur_index
)
index = fix_integer_index(self.context, builder,
self.idxty.dtype, index, self.size)
return index, cur_index
def loop_tail(self):
builder = self.builder
next_index = cgutils.increment_index(builder,
builder.load(self.idx_index))
builder.store(next_index, self.idx_index)
builder.branch(self.bb_start)
builder.position_at_end(self.bb_end)
class BooleanArrayIndexer(Indexer):
"""
Compute indices from an array of boolean predicates.
"""
def __init__(self, context, builder, idxty, idxary):
self.context = context
self.builder = builder
self.idxty = idxty
self.idxary = idxary
assert idxty.ndim == 1
self.ll_intp = self.context.get_value_type(types.intp)
self.zero = Constant.int(self.ll_intp, 0)
def prepare(self):
builder = self.builder
self.size = cgutils.unpack_tuple(builder, self.idxary.shape)[0]
self.idx_index = cgutils.alloca_once(builder, self.ll_intp)
self.count = cgutils.alloca_once(builder, self.ll_intp)
self.bb_start = builder.append_basic_block()
self.bb_tail = builder.append_basic_block()
self.bb_end = builder.append_basic_block()
def get_size(self):
builder = self.builder
count = cgutils.alloca_once_value(builder, self.zero)
# Sum all true values
with cgutils.for_range(builder, self.size) as loop:
c = builder.load(count)
pred = _getitem_array_single_int(
self.context, builder, self.idxty.dtype,
self.idxty, self.idxary, loop.index
)
c = builder.add(c, builder.zext(pred, c.type))
builder.store(c, count)
return builder.load(count)
def get_shape(self):
return (self.get_size(),)
def get_index_bounds(self):
# Pessimal heuristic, as we don't want to scan for the
# first and last true items
return (self.ll_intp(0), self.size)
def loop_head(self):
builder = self.builder
# Initialize loop variable
self.builder.store(self.zero, self.idx_index)
self.builder.store(self.zero, self.count)
builder.branch(self.bb_start)
builder.position_at_end(self.bb_start)
cur_index = builder.load(self.idx_index)
cur_count = builder.load(self.count)
with builder.if_then(builder.icmp_signed('>=', cur_index, self.size),
likely=False):
builder.branch(self.bb_end)
# Load the predicate and branch if false
pred = _getitem_array_single_int(
self.context, builder, self.idxty.dtype, self.idxty, self.idxary,
cur_index
)
with builder.if_then(builder.not_(pred)):
builder.branch(self.bb_tail)
# Increment the count for next iteration
next_count = cgutils.increment_index(builder, cur_count)
builder.store(next_count, self.count)
return cur_index, cur_count
def loop_tail(self):
builder = self.builder
builder.branch(self.bb_tail)
builder.position_at_end(self.bb_tail)
next_index = cgutils.increment_index(builder,
builder.load(self.idx_index))
builder.store(next_index, self.idx_index)
builder.branch(self.bb_start)
builder.position_at_end(self.bb_end)
class SliceIndexer(Indexer):
"""
Compute indices along a slice.
"""
def __init__(self, context, builder, aryty, ary, dim, idxty, slice):
self.context = context
self.builder = builder
self.aryty = aryty
self.ary = ary
self.dim = dim
self.idxty = idxty
self.slice = slice
self.ll_intp = self.context.get_value_type(types.intp)
self.zero = Constant.int(self.ll_intp, 0)
def prepare(self):
builder = self.builder
# Fix slice for the dimension's size
self.dim_size = builder.extract_value(self.ary.shape, self.dim)
slicing.guard_invalid_slice(self.context, builder, self.idxty,
self.slice)
slicing.fix_slice(builder, self.slice, self.dim_size)
self.is_step_negative = cgutils.is_neg_int(builder, self.slice.step)
# Create loop entities
self.index = cgutils.alloca_once(builder, self.ll_intp)
self.count = cgutils.alloca_once(builder, self.ll_intp)
self.bb_start = builder.append_basic_block()
self.bb_end = builder.append_basic_block()
def get_size(self):
return slicing.get_slice_length(self.builder, self.slice)
def get_shape(self):
return (self.get_size(),)
def get_index_bounds(self):
lower, upper = slicing.get_slice_bounds(self.builder, self.slice)
return lower, upper
def loop_head(self):
builder = self.builder
# Initialize loop variable
self.builder.store(self.slice.start, self.index)
self.builder.store(self.zero, self.count)
builder.branch(self.bb_start)
builder.position_at_end(self.bb_start)
cur_index = builder.load(self.index)
cur_count = builder.load(self.count)
is_finished = builder.select(self.is_step_negative,
builder.icmp_signed('<=', cur_index,
self.slice.stop),
builder.icmp_signed('>=', cur_index,
self.slice.stop))
with builder.if_then(is_finished, likely=False):
builder.branch(self.bb_end)
return cur_index, cur_count
def loop_tail(self):
builder = self.builder
next_index = builder.add(builder.load(self.index), self.slice.step,
flags=['nsw'])
builder.store(next_index, self.index)
next_count = cgutils.increment_index(builder, builder.load(self.count))
builder.store(next_count, self.count)
builder.branch(self.bb_start)
builder.position_at_end(self.bb_end)
class FancyIndexer(object):
"""
Perform fancy indexing on the given array.
"""
def __init__(self, context, builder, aryty, ary, index_types, indices):
self.context = context
self.builder = builder
self.aryty = aryty
self.shapes = cgutils.unpack_tuple(builder, ary.shape, aryty.ndim)
self.strides = cgutils.unpack_tuple(builder, ary.strides, aryty.ndim)
self.ll_intp = self.context.get_value_type(types.intp)
indexers = []
ax = 0
for indexval, idxty in zip(indices, index_types):
if idxty is types.ellipsis:
# Fill up missing dimensions at the middle
n_missing = aryty.ndim - len(indices) + 1
for i in range(n_missing):
indexer = EntireIndexer(context, builder, aryty, ary, ax)
indexers.append(indexer)
ax += 1
continue
# Regular index value
if isinstance(idxty, types.SliceType):
slice = context.make_helper(builder, idxty, indexval)
indexer = SliceIndexer(context, builder, aryty, ary, ax,
idxty, slice)
indexers.append(indexer)
elif isinstance(idxty, types.Integer):
ind = fix_integer_index(context, builder, idxty, indexval,
self.shapes[ax])
indexer = IntegerIndexer(context, builder, ind)
indexers.append(indexer)
elif isinstance(idxty, types.Array):
idxary = make_array(idxty)(context, builder, indexval)
if isinstance(idxty.dtype, types.Integer):
indexer = IntegerArrayIndexer(context, builder,
idxty, idxary,
self.shapes[ax])
elif isinstance(idxty.dtype, types.Boolean):
indexer = BooleanArrayIndexer(context, builder,
idxty, idxary)
else:
assert 0
indexers.append(indexer)
else:
raise AssertionError("unexpected index type: %s" % (idxty,))
ax += 1
# Fill up missing dimensions at the end
assert ax <= aryty.ndim, (ax, aryty.ndim)
while ax < aryty.ndim:
indexer = EntireIndexer(context, builder, aryty, ary, ax)
indexers.append(indexer)
ax += 1
assert len(indexers) == aryty.ndim, (len(indexers), aryty.ndim)
self.indexers = indexers
def prepare(self):
for i in self.indexers:
i.prepare()
# Compute the resulting shape
self.indexers_shape = sum([i.get_shape() for i in self.indexers], ())
def get_shape(self):
"""
Get the resulting data shape as Python tuple.
"""
return self.indexers_shape
def get_offset_bounds(self, strides, itemsize):
"""
Get a half-open [lower, upper) range of byte offsets spanned by
the indexer with the given strides and itemsize. The indexer is
guaranteed to not go past those bounds.
"""
assert len(strides) == self.aryty.ndim
builder = self.builder
is_empty = cgutils.false_bit
zero = self.ll_intp(0)
one = self.ll_intp(1)
lower = zero
upper = zero
for indexer, shape, stride in zip(self.indexers, self.indexers_shape,
strides):
is_empty = builder.or_(is_empty,
builder.icmp_unsigned('==', shape, zero))
# Compute [lower, upper) indices on this dimension
lower_index, upper_index = indexer.get_index_bounds()
lower_offset = builder.mul(stride, lower_index)
upper_offset = builder.mul(stride, builder.sub(upper_index, one))
# Adjust total interval
is_downwards = builder.icmp_signed('<', stride, zero)
lower = builder.add(lower,
builder.select(is_downwards,
upper_offset,
lower_offset))
upper = builder.add(upper,
builder.select(is_downwards,
lower_offset,
upper_offset))
# Make interval half-open
upper = builder.add(upper, itemsize)
# Adjust for empty shape
lower = builder.select(is_empty, zero, lower)
upper = builder.select(is_empty, zero, upper)
return lower, upper
def begin_loops(self):
indices, counts = zip(*(i.loop_head() for i in self.indexers))
return indices, counts
def end_loops(self):
for i in reversed(self.indexers):
i.loop_tail()
def fancy_getitem(context, builder, sig, args,
aryty, ary, index_types, indices):
shapes = cgutils.unpack_tuple(builder, ary.shape)
strides = cgutils.unpack_tuple(builder, ary.strides)
data = ary.data
indexer = FancyIndexer(context, builder, aryty, ary,
index_types, indices)
indexer.prepare()
# Construct output array
out_ty = sig.return_type
out_shapes = indexer.get_shape()
out = _empty_nd_impl(context, builder, out_ty, out_shapes)
out_data = out.data
out_idx = cgutils.alloca_once_value(builder,
context.get_constant(types.intp, 0))
# Loop on source and copy to destination
indices, _ = indexer.begin_loops()
# No need to check for wraparound, as the indexers all ensure
# a positive index is returned.
ptr = cgutils.get_item_pointer2(context, builder, data, shapes, strides,
aryty.layout, indices, wraparound=False,
boundscheck=context.enable_boundscheck)
val = load_item(context, builder, aryty, ptr)
# Since the destination is C-contiguous, no need for multi-dimensional
# indexing.
cur = builder.load(out_idx)
ptr = builder.gep(out_data, [cur])
store_item(context, builder, out_ty, val, ptr)
next_idx = cgutils.increment_index(builder, cur)
builder.store(next_idx, out_idx)
indexer.end_loops()
return impl_ret_new_ref(context, builder, out_ty, out._getvalue())
@lower_builtin(operator.getitem, types.Buffer, types.Array)
def fancy_getitem_array(context, builder, sig, args):
"""
Advanced or basic indexing with an array.
"""
aryty, idxty = sig.args
ary, idx = args
ary = make_array(aryty)(context, builder, ary)
if idxty.ndim == 0:
# 0-d array index acts as a basic integer index
idxty, idx = normalize_index(context, builder, idxty, idx)
res = _getitem_array_generic(context, builder, sig.return_type,
aryty, ary, (idxty,), (idx,))
return impl_ret_borrowed(context, builder, sig.return_type, res)
else:
# Advanced indexing
return fancy_getitem(context, builder, sig, args,
aryty, ary, (idxty,), (idx,))
def offset_bounds_from_strides(context, builder, arrty, arr, shapes, strides):
"""
Compute a half-open range [lower, upper) of byte offsets from the
array's data pointer, that bound the in-memory extent of the array.
This mimicks offset_bounds_from_strides() from
numpy/core/src/private/mem_overlap.c
"""
itemsize = arr.itemsize
zero = itemsize.type(0)
one = zero.type(1)
if arrty.layout in 'CF':
# Array is contiguous: contents are laid out sequentially
# starting from arr.data and upwards
lower = zero
upper = builder.mul(itemsize, arr.nitems)
else:
# Non-contiguous array: need to examine strides
lower = zero
upper = zero
for i in range(arrty.ndim):
# Compute the largest byte offset on this dimension
# max_axis_offset = strides[i] * (shapes[i] - 1)
# (shapes[i] == 0 is catered for by the empty array case below)
max_axis_offset = builder.mul(strides[i],
builder.sub(shapes[i], one))
is_upwards = builder.icmp_signed('>=', max_axis_offset, zero)
# Expand either upwards or downwards depending on stride
upper = builder.select(is_upwards,
builder.add(upper, max_axis_offset), upper)
lower = builder.select(is_upwards,
lower, builder.add(lower, max_axis_offset))
# Return a half-open range
upper = builder.add(upper, itemsize)
# Adjust for empty arrays
is_empty = builder.icmp_signed('==', arr.nitems, zero)
upper = builder.select(is_empty, zero, upper)
lower = builder.select(is_empty, zero, lower)
return lower, upper
def compute_memory_extents(context, builder, lower, upper, data):
"""
Given [lower, upper) byte offsets and a base data pointer,
compute the memory pointer bounds as pointer-sized integers.
"""
data_ptr_as_int = builder.ptrtoint(data, lower.type)
start = builder.add(data_ptr_as_int, lower)
end = builder.add(data_ptr_as_int, upper)
return start, end
def get_array_memory_extents(context, builder, arrty, arr, shapes, strides,
data):
"""
Compute a half-open range [start, end) of pointer-sized integers
which fully contain the array data.
"""
lower, upper = offset_bounds_from_strides(context, builder, arrty, arr,
shapes, strides)
return compute_memory_extents(context, builder, lower, upper, data)
def extents_may_overlap(context, builder, a_start, a_end, b_start, b_end):
"""
Whether two memory extents [a_start, a_end) and [b_start, b_end)
may overlap.
"""
# Comparisons are unsigned, since we are really comparing pointers
may_overlap = builder.and_(
builder.icmp_unsigned('<', a_start, b_end),
builder.icmp_unsigned('<', b_start, a_end),
)
return may_overlap
def maybe_copy_source(context, builder, use_copy,
srcty, src, src_shapes, src_strides, src_data):
ptrty = src_data.type
copy_layout = 'C'
copy_data = cgutils.alloca_once_value(builder, src_data)
copy_shapes = src_shapes
copy_strides = None # unneeded for contiguous arrays
with builder.if_then(use_copy, likely=False):
# Allocate temporary scratchpad
# XXX: should we use a stack-allocated array for very small
# data sizes?
allocsize = builder.mul(src.itemsize, src.nitems)
data = context.nrt.allocate(builder, allocsize)
voidptrty = data.type
data = builder.bitcast(data, ptrty)
builder.store(data, copy_data)
# Copy source data into scratchpad
intp_t = context.get_value_type(types.intp)
with cgutils.loop_nest(builder, src_shapes, intp_t) as indices:
src_ptr = cgutils.get_item_pointer2(context, builder, src_data,
src_shapes, src_strides,
srcty.layout, indices)
dest_ptr = cgutils.get_item_pointer2(context, builder, data,
copy_shapes, copy_strides,
copy_layout, indices)
builder.store(builder.load(src_ptr), dest_ptr)
def src_getitem(source_indices):
assert len(source_indices) == srcty.ndim
src_ptr = cgutils.alloca_once(builder, ptrty)
with builder.if_else(use_copy, likely=False) as (if_copy, otherwise):
with if_copy:
builder.store(
cgutils.get_item_pointer2(context, builder,
builder.load(copy_data),
copy_shapes, copy_strides,
copy_layout, source_indices,
wraparound=False),
src_ptr)
with otherwise:
builder.store(
cgutils.get_item_pointer2(context, builder, src_data,
src_shapes, src_strides,
srcty.layout, source_indices,
wraparound=False),
src_ptr)
return load_item(context, builder, srcty, builder.load(src_ptr))
def src_cleanup():
# Deallocate memory
with builder.if_then(use_copy, likely=False):
data = builder.load(copy_data)
data = builder.bitcast(data, voidptrty)
context.nrt.free(builder, data)
return src_getitem, src_cleanup
def _bc_adjust_dimension(context, builder, shapes, strides, target_shape):
"""
Preprocess dimension for broadcasting.
Returns (shapes, strides) such that the ndim match *target_shape*.
When expanding to higher ndim, the returning shapes and strides are
prepended with ones and zeros, respectively.
When truncating to lower ndim, the shapes are checked (in runtime).
All extra dimension must have size of 1.
"""
zero = context.get_constant(types.uintp, 0)
one = context.get_constant(types.uintp, 1)
# Adjust for broadcasting to higher dimension
if len(target_shape) > len(shapes):
nd_diff = len(target_shape) - len(shapes)
# Fill missing shapes with one, strides with zeros
shapes = [one] * nd_diff + shapes
strides = [zero] * nd_diff + strides
# Adjust for broadcasting to lower dimension
elif len(target_shape) < len(shapes):
# Accepted if all extra dims has shape 1
nd_diff = len(shapes) - len(target_shape)
dim_is_one = [builder.icmp_unsigned('==', sh, one)
for sh in shapes[:nd_diff]]
accepted = functools.reduce(builder.and_, dim_is_one,
cgutils.true_bit)
# Check error
with builder.if_then(builder.not_(accepted), likely=False):
msg = "cannot broadcast source array for assignment"
context.call_conv.return_user_exc(builder, ValueError, (msg,))
# Truncate extra shapes, strides
shapes = shapes[nd_diff:]
strides = strides[nd_diff:]
return shapes, strides
def _bc_adjust_shape_strides(context, builder, shapes, strides, target_shape):
"""
Broadcast shapes and strides to target_shape given that their ndim already
matches. For each location where the shape is 1 and does not match the
dim for target, it is set to the value at the target and the stride is
set to zero.
"""
bc_shapes = []
bc_strides = []
zero = context.get_constant(types.uintp, 0)
one = context.get_constant(types.uintp, 1)
# Adjust all mismatching ones in shape
mismatch = [builder.icmp_signed('!=', tar, old)
for tar, old in zip(target_shape, shapes)]
src_is_one = [builder.icmp_signed('==', old, one) for old in shapes]
preds = [builder.and_(x, y) for x, y in zip(mismatch, src_is_one)]
bc_shapes = [builder.select(p, tar, old)
for p, tar, old in zip(preds, target_shape, shapes)]
bc_strides = [builder.select(p, zero, old)
for p, old in zip(preds, strides)]
return bc_shapes, bc_strides
def _broadcast_to_shape(context, builder, arrtype, arr, target_shape):
"""
Broadcast the given array to the target_shape.
Returns (array_type, array)
"""
# Compute broadcasted shape and strides
shapes = cgutils.unpack_tuple(builder, arr.shape)
strides = cgutils.unpack_tuple(builder, arr.strides)
shapes, strides = _bc_adjust_dimension(context, builder, shapes, strides,
target_shape)
shapes, strides = _bc_adjust_shape_strides(context, builder, shapes,
strides, target_shape)
new_arrtype = arrtype.copy(ndim=len(target_shape), layout='A')
# Create new view
new_arr = make_array(new_arrtype)(context, builder)
repl = dict(shape=cgutils.pack_array(builder, shapes),
strides=cgutils.pack_array(builder, strides))
cgutils.copy_struct(new_arr, arr, repl)
return new_arrtype, new_arr
def fancy_setslice(context, builder, sig, args, index_types, indices):
"""
Implement slice assignment for arrays. This implementation works for
basic as well as fancy indexing, since there's no functional difference
between the two for indexed assignment.
"""
aryty, _, srcty = sig.args
ary, _, src = args
ary = make_array(aryty)(context, builder, ary)
dest_shapes = cgutils.unpack_tuple(builder, ary.shape)
dest_strides = cgutils.unpack_tuple(builder, ary.strides)
dest_data = ary.data
indexer = FancyIndexer(context, builder, aryty, ary,
index_types, indices)
indexer.prepare()
if isinstance(srcty, types.Buffer):
# Source is an array
src_dtype = srcty.dtype
index_shape = indexer.get_shape()
src = make_array(srcty)(context, builder, src)
# Broadcast source array to shape
srcty, src = _broadcast_to_shape(context, builder, srcty, src,
index_shape)
src_shapes = cgutils.unpack_tuple(builder, src.shape)
src_strides = cgutils.unpack_tuple(builder, src.strides)
src_data = src.data
# Check shapes are equal
shape_error = cgutils.false_bit
assert len(index_shape) == len(src_shapes)
for u, v in zip(src_shapes, index_shape):
shape_error = builder.or_(shape_error,
builder.icmp_signed('!=', u, v))
with builder.if_then(shape_error, likely=False):
msg = "cannot assign slice from input of different size"
context.call_conv.return_user_exc(builder, ValueError, (msg,))
# Check for array overlap
src_start, src_end = get_array_memory_extents(context, builder, srcty,
src, src_shapes,
src_strides, src_data)
dest_lower, dest_upper = indexer.get_offset_bounds(dest_strides,
ary.itemsize)
dest_start, dest_end = compute_memory_extents(context, builder,
dest_lower, dest_upper,
dest_data)
use_copy = extents_may_overlap(context, builder, src_start, src_end,
dest_start, dest_end)
src_getitem, src_cleanup = maybe_copy_source(context, builder, use_copy,
srcty, src, src_shapes,
src_strides, src_data)
elif isinstance(srcty, types.Sequence):
src_dtype = srcty.dtype
# Check shape is equal to sequence length
index_shape = indexer.get_shape()
assert len(index_shape) == 1
len_impl = context.get_function(len, signature(types.intp, srcty))
seq_len = len_impl(builder, (src,))
shape_error = builder.icmp_signed('!=', index_shape[0], seq_len)
with builder.if_then(shape_error, likely=False):
msg = "cannot assign slice from input of different size"
context.call_conv.return_user_exc(builder, ValueError, (msg,))
def src_getitem(source_indices):
idx, = source_indices
getitem_impl = context.get_function(
operator.getitem,
signature(src_dtype, srcty, types.intp),
)
return getitem_impl(builder, (src, idx))
def src_cleanup():
pass
else:
# Source is a scalar (broadcast or not, depending on destination
# shape).
src_dtype = srcty
def src_getitem(source_indices):
return src
def src_cleanup():
pass
# Loop on destination and copy from source to destination
dest_indices, counts = indexer.begin_loops()
# Source is iterated in natural order
source_indices = tuple(c for c in counts if c is not None)
val = src_getitem(source_indices)
# Cast to the destination dtype (cross-dtype slice assignement is allowed)
val = context.cast(builder, val, src_dtype, aryty.dtype)
# No need to check for wraparound, as the indexers all ensure
# a positive index is returned.
dest_ptr = cgutils.get_item_pointer2(context, builder, dest_data,
dest_shapes, dest_strides,
aryty.layout, dest_indices,
wraparound=False)
store_item(context, builder, aryty, val, dest_ptr)
indexer.end_loops()
src_cleanup()
return context.get_dummy_value()
# ------------------------------------------------------------------------------
# Shape / layout altering
def vararg_to_tuple(context, builder, sig, args):
aryty = sig.args[0]
dimtys = sig.args[1:]
# values
ary = args[0]
dims = args[1:]
# coerce all types to intp
dims = [context.cast(builder, val, ty, types.intp)
for ty, val in zip(dimtys, dims)]
# make a tuple
shape = cgutils.pack_array(builder, dims, dims[0].type)
shapety = types.UniTuple(dtype=types.intp, count=len(dims))
new_sig = typing.signature(sig.return_type, aryty, shapety)
new_args = ary, shape
return new_sig, new_args
@lower_builtin('array.transpose', types.Array)
def array_transpose(context, builder, sig, args):
return array_T(context, builder, sig.args[0], args[0])
def permute_arrays(axis, shape, strides):
if len(axis) != len(set(axis)):
raise ValueError("repeated axis in transpose")
dim = len(shape)
for x in axis:
if x >= dim or abs(x) > dim:
raise ValueError("axis is out of bounds for array of "
"given dimension")
shape[:] = shape[axis]
strides[:] = strides[axis]
# Transposing an array involves permuting the shape and strides of the array
# based on the given axes.
@lower_builtin('array.transpose', types.Array, types.BaseTuple)
def array_transpose_tuple(context, builder, sig, args):
aryty = sig.args[0]
ary = make_array(aryty)(context, builder, args[0])
axisty, axis = sig.args[1], args[1]
num_axis, dtype = axisty.count, axisty.dtype
ll_intp = context.get_value_type(types.intp)
ll_ary_size = lc.Type.array(ll_intp, num_axis)
# Allocate memory for axes, shapes, and strides arrays.
arys = [axis, ary.shape, ary.strides]
ll_arys = [cgutils.alloca_once(builder, ll_ary_size) for _ in arys]
# Store axes, shapes, and strides arrays to the allocated memory.
for src, dst in zip(arys, ll_arys):
builder.store(src, dst)
np_ary_ty = types.Array(dtype=dtype, ndim=1, layout='C')
np_itemsize = context.get_constant(types.intp,
context.get_abi_sizeof(ll_intp))
# Form NumPy arrays for axes, shapes, and strides arrays.
np_arys = [make_array(np_ary_ty)(context, builder) for _ in arys]
# Roughly, `np_ary = np.array(ll_ary)` for each of axes, shapes, and strides
for np_ary, ll_ary in zip(np_arys, ll_arys):
populate_array(np_ary,
data=builder.bitcast(ll_ary, ll_intp.as_pointer()),
shape=[context.get_constant(types.intp, num_axis)],
strides=[np_itemsize],
itemsize=np_itemsize,
meminfo=None)
# Pass NumPy arrays formed above to permute_arrays function that permutes
# shapes and strides based on axis contents.
context.compile_internal(builder, permute_arrays,
typing.signature(types.void,
np_ary_ty, np_ary_ty, np_ary_ty),
[a._getvalue() for a in np_arys])
# Make a new array based on permuted shape and strides and return it.
ret = make_array(sig.return_type)(context, builder)
populate_array(ret,
data=ary.data,
shape=builder.load(ll_arys[1]),
strides=builder.load(ll_arys[2]),
itemsize=ary.itemsize,
meminfo=ary.meminfo,
parent=ary.parent)
res = ret._getvalue()
return impl_ret_borrowed(context, builder, sig.return_type, res)
@lower_builtin('array.transpose', types.Array, types.VarArg(types.Any))
def array_transpose_vararg(context, builder, sig, args):
new_sig, new_args = vararg_to_tuple(context, builder, sig, args)
return array_transpose_tuple(context, builder, new_sig, new_args)
@overload(np.transpose)
def numpy_transpose(a, axes=None):
if isinstance(a, types.BaseTuple):
raise errors.UnsupportedError("np.transpose does not accept tuples")
if axes is None:
def np_transpose_impl(a, axes=None):
return a.transpose()
else:
def np_transpose_impl(a, axes=None):
return a.transpose(axes)
return np_transpose_impl
@lower_getattr(types.Array, 'T')
def array_T(context, builder, typ, value):
if typ.ndim <= 1:
res = value
else:
ary = make_array(typ)(context, builder, value)
ret = make_array(typ)(context, builder)
shapes = cgutils.unpack_tuple(builder, ary.shape, typ.ndim)
strides = cgutils.unpack_tuple(builder, ary.strides, typ.ndim)
populate_array(ret,
data=ary.data,
shape=cgutils.pack_array(builder, shapes[::-1]),
strides=cgutils.pack_array(builder, strides[::-1]),
itemsize=ary.itemsize,
meminfo=ary.meminfo,
parent=ary.parent)
res = ret._getvalue()
return impl_ret_borrowed(context, builder, typ, res)
def _attempt_nocopy_reshape(context, builder, aryty, ary,
newnd, newshape, newstrides):
"""
Call into Numba_attempt_nocopy_reshape() for the given array type
and instance, and the specified new shape.
Return value is non-zero if successful, and the array pointed to
by *newstrides* will be filled up with the computed results.
"""
ll_intp = context.get_value_type(types.intp)
ll_intp_star = ll_intp.as_pointer()
ll_intc = context.get_value_type(types.intc)
fnty = lc.Type.function(ll_intc, [
# nd, *dims, *strides
ll_intp, ll_intp_star, ll_intp_star,
# newnd, *newdims, *newstrides
ll_intp, ll_intp_star, ll_intp_star,
# itemsize, is_f_order
ll_intp, ll_intc])
fn = builder.module.get_or_insert_function(
fnty, name="numba_attempt_nocopy_reshape")
nd = ll_intp(aryty.ndim)
shape = cgutils.gep_inbounds(builder, ary._get_ptr_by_name('shape'), 0, 0)
strides = cgutils.gep_inbounds(builder, ary._get_ptr_by_name('strides'),
0, 0)
newnd = ll_intp(newnd)
newshape = cgutils.gep_inbounds(builder, newshape, 0, 0)
newstrides = cgutils.gep_inbounds(builder, newstrides, 0, 0)
is_f_order = ll_intc(0)
res = builder.call(fn, [nd, shape, strides,
newnd, newshape, newstrides,
ary.itemsize, is_f_order])
return res
def normalize_reshape_value(origsize, shape):
num_neg_value = 0
known_size = 1
for ax, s in enumerate(shape):
if s < 0:
num_neg_value += 1
neg_ax = ax
else:
known_size *= s
if num_neg_value == 0:
if origsize != known_size:
raise ValueError("total size of new array must be unchanged")
elif num_neg_value == 1:
# Infer negative dimension
if known_size == 0:
inferred = 0
ok = origsize == 0
else:
inferred = origsize // known_size
ok = origsize % known_size == 0
if not ok:
raise ValueError("total size of new array must be unchanged")
shape[neg_ax] = inferred
else:
raise ValueError("multiple negative shape values")
@lower_builtin('array.reshape', types.Array, types.BaseTuple)
def array_reshape(context, builder, sig, args):
aryty = sig.args[0]
retty = sig.return_type
shapety = sig.args[1]
shape = args[1]
ll_intp = context.get_value_type(types.intp)
ll_shape = lc.Type.array(ll_intp, shapety.count)
ary = make_array(aryty)(context, builder, args[0])
# We will change the target shape in this slot
# (see normalize_reshape_value() below)
newshape = cgutils.alloca_once(builder, ll_shape)
builder.store(shape, newshape)
# Create a shape array pointing to the value of newshape.
# (roughly, `shape_ary = np.array(ary.shape)`)
shape_ary_ty = types.Array(dtype=shapety.dtype, ndim=1, layout='C')
shape_ary = make_array(shape_ary_ty)(context, builder)
shape_itemsize = context.get_constant(types.intp,
context.get_abi_sizeof(ll_intp))
populate_array(shape_ary,
data=builder.bitcast(newshape, ll_intp.as_pointer()),
shape=[context.get_constant(types.intp, shapety.count)],
strides=[shape_itemsize],
itemsize=shape_itemsize,
meminfo=None)
# Compute the original array size
size = ary.nitems
# Call our normalizer which will fix the shape array in case of negative
# shape value
context.compile_internal(builder, normalize_reshape_value,
typing.signature(types.void,
types.uintp, shape_ary_ty),
[size, shape_ary._getvalue()])
# Perform reshape (nocopy)
newnd = shapety.count
newstrides = cgutils.alloca_once(builder, ll_shape)
ok = _attempt_nocopy_reshape(context, builder, aryty, ary, newnd,
newshape, newstrides)
fail = builder.icmp_unsigned('==', ok, ok.type(0))
with builder.if_then(fail):
msg = "incompatible shape for array"
context.call_conv.return_user_exc(builder, NotImplementedError, (msg,))
ret = make_array(retty)(context, builder)
populate_array(ret,
data=ary.data,
shape=builder.load(newshape),
strides=builder.load(newstrides),
itemsize=ary.itemsize,
meminfo=ary.meminfo,
parent=ary.parent)
res = ret._getvalue()
return impl_ret_borrowed(context, builder, sig.return_type, res)
@lower_builtin('array.reshape', types.Array, types.VarArg(types.Any))
def array_reshape_vararg(context, builder, sig, args):
new_sig, new_args = vararg_to_tuple(context, builder, sig, args)
return array_reshape(context, builder, new_sig, new_args)
@overload(np.reshape)
def np_reshape(a, shape):
def np_reshape_impl(a, shape):
return a.reshape(shape)
return np_reshape_impl
@overload(np.append)
def np_append(arr, values, axis=None):
if not type_can_asarray(arr):
raise errors.TypingError('The first argument "arr" must be array-like')
if not type_can_asarray(values):
raise errors.TypingError('The second argument "values" must be '
'array-like')
if is_nonelike(axis):
def impl(arr, values, axis=None):
arr = np.ravel(np.asarray(arr))
values = np.ravel(np.asarray(values))
return np.concatenate((arr, values))
else:
if not isinstance(axis, types.Integer):
raise errors.TypingError('The third argument "axis" must be an '
'integer')
def impl(arr, values, axis=None):
return np.concatenate((arr, values), axis=axis)
return impl
@lower_builtin('array.ravel', types.Array)
def array_ravel(context, builder, sig, args):
# Only support no argument version (default order='C')
def imp_nocopy(ary):
"""No copy version"""
return ary.reshape(ary.size)
def imp_copy(ary):
"""Copy version"""
return ary.flatten()
# If the input array is C layout already, use the nocopy version
if sig.args[0].layout == 'C':
imp = imp_nocopy
# otherwise, use flatten under-the-hood
else:
imp = imp_copy
res = context.compile_internal(builder, imp, sig, args)
res = impl_ret_new_ref(context, builder, sig.return_type, res)
return res
@lower_builtin(np.ravel, types.Array)
def np_ravel(context, builder, sig, args):
def np_ravel_impl(a):
return a.ravel()
return context.compile_internal(builder, np_ravel_impl, sig, args)
@lower_builtin('array.flatten', types.Array)
def array_flatten(context, builder, sig, args):
# Only support flattening to C layout currently.
def imp(ary):
return ary.copy().reshape(ary.size)
res = context.compile_internal(builder, imp, sig, args)
res = impl_ret_new_ref(context, builder, sig.return_type, res)
return res
def _change_dtype(context, builder, oldty, newty, ary):
"""
Attempt to fix up *ary* for switching from *oldty* to *newty*.
See Numpy's array_descr_set()
(np/core/src/multiarray/getset.c).
Attempt to fix the array's shape and strides for a new dtype.
False is returned on failure, True on success.
"""
assert oldty.ndim == newty.ndim
assert oldty.layout == newty.layout
new_layout = ord(newty.layout)
any_layout = ord('A')
c_layout = ord('C')
f_layout = ord('F')
int8 = types.int8
def imp(nd, dims, strides, old_itemsize, new_itemsize, layout):
# Attempt to update the layout due to limitation of the numba
# type system.
if layout == any_layout:
# Test rightmost stride to be contiguous
if strides[-1] == old_itemsize:
# Process this as if it is C contiguous
layout = int8(c_layout)
# Test leftmost stride to be F contiguous
elif strides[0] == old_itemsize:
# Process this as if it is F contiguous
layout = int8(f_layout)
if old_itemsize != new_itemsize and (layout == any_layout or nd == 0):
return False
if layout == c_layout:
i = nd - 1
else:
i = 0
if new_itemsize < old_itemsize:
# If it is compatible, increase the size of the dimension
# at the end (or at the front if F-contiguous)
if (old_itemsize % new_itemsize) != 0:
return False
newdim = old_itemsize // new_itemsize
dims[i] *= newdim
strides[i] = new_itemsize
elif new_itemsize > old_itemsize:
# Determine if last (or first if F-contiguous) dimension
# is compatible
bytelength = dims[i] * old_itemsize
if (bytelength % new_itemsize) != 0:
return False
dims[i] = bytelength // new_itemsize
strides[i] = new_itemsize
else:
# Same item size: nothing to do (this also works for
# non-contiguous arrays).
pass
return True
old_itemsize = context.get_constant(types.intp,
get_itemsize(context, oldty))
new_itemsize = context.get_constant(types.intp,
get_itemsize(context, newty))
nd = context.get_constant(types.intp, newty.ndim)
shape_data = cgutils.gep_inbounds(builder, ary._get_ptr_by_name('shape'),
0, 0)
strides_data = cgutils.gep_inbounds(builder,
ary._get_ptr_by_name('strides'), 0, 0)
shape_strides_array_type = types.Array(dtype=types.intp, ndim=1, layout='C')
arycls = context.make_array(shape_strides_array_type)
shape_constant = cgutils.pack_array(builder,
[context.get_constant(types.intp,
newty.ndim)])
sizeof_intp = context.get_abi_sizeof(context.get_data_type(types.intp))
sizeof_intp = context.get_constant(types.intp, sizeof_intp)
strides_constant = cgutils.pack_array(builder, [sizeof_intp])
shape_ary = arycls(context, builder)
populate_array(shape_ary,
data=shape_data,
shape=shape_constant,
strides=strides_constant,
itemsize=sizeof_intp,
meminfo=None)
strides_ary = arycls(context, builder)
populate_array(strides_ary,
data=strides_data,
shape=shape_constant,
strides=strides_constant,
itemsize=sizeof_intp,
meminfo=None)
shape = shape_ary._getvalue()
strides = strides_ary._getvalue()
args = [nd, shape, strides, old_itemsize, new_itemsize,
context.get_constant(types.int8, new_layout)]
sig = signature(types.boolean,
types.intp, # nd
shape_strides_array_type, # dims
shape_strides_array_type, # strides
types.intp, # old_itemsize
types.intp, # new_itemsize
types.int8, # layout
)
res = context.compile_internal(builder, imp, sig, args)
update_array_info(newty, ary)
res = impl_ret_borrowed(context, builder, sig.return_type, res)
return res
@overload(np.shape)
def np_shape(a):
if not type_can_asarray(a):
raise errors.TypingError("The argument to np.shape must be array-like")
def impl(a):
return np.asarray(a).shape
return impl
# ------------------------------------------------------------------------------
@overload(np.unique)
def np_unique(a):
def np_unique_impl(a):
b = np.sort(a.ravel())
head = list(b[:1])
tail = [x for i, x in enumerate(b[1:]) if b[i] != x]
return np.array(head + tail)
return np_unique_impl
@overload(np.repeat)
def np_repeat(a, repeats):
# Implementation for repeats being a scalar is a module global function
# (see below) because it might be called from the implementation below.
def np_repeat_impl_repeats_array_like(a, repeats):
# implementation if repeats is an array like
repeats_array = np.asarray(repeats, dtype=np.int64)
# if it is a singleton array, invoke the scalar implementation
if repeats_array.shape[0] == 1:
return np_repeat_impl_repeats_scaler(a, repeats_array[0])
if np.any(repeats_array < 0):
raise ValueError("negative dimensions are not allowed")
asa = np.asarray(a)
aravel = asa.ravel()
n = aravel.shape[0]
if aravel.shape != repeats_array.shape:
raise ValueError(
"operands could not be broadcast together")
to_return = np.empty(np.sum(repeats_array), dtype=asa.dtype)
pos = 0
for i in range(n):
to_return[pos : pos + repeats_array[i]] = aravel[i]
pos += repeats_array[i]
return to_return
# type checking
if isinstance(a, (types.Array,
types.List,
types.BaseTuple,
types.Number,
types.Boolean,
)
):
if isinstance(repeats, types.Integer):
return np_repeat_impl_repeats_scaler
elif isinstance(repeats, (types.Array, types.List)):
if isinstance(repeats.dtype, types.Integer):
return np_repeat_impl_repeats_array_like
raise errors.TypingError(
"The repeats argument must be an integer "
"or an array-like of integer dtype")
@register_jitable
def np_repeat_impl_repeats_scaler(a, repeats):
if repeats < 0:
raise ValueError("negative dimensions are not allowed")
asa = np.asarray(a)
aravel = asa.ravel()
n = aravel.shape[0]
if repeats == 0:
return np.empty(0, dtype=asa.dtype)
elif repeats == 1:
return np.copy(aravel)
else:
to_return = np.empty(n * repeats, dtype=asa.dtype)
for i in range(n):
to_return[i * repeats : (i + 1) * repeats] = aravel[i]
return to_return
@extending.overload_method(types.Array, 'repeat')
def array_repeat(a, repeats):
def array_repeat_impl(a, repeats):
return np.repeat(a, repeats)
return array_repeat_impl
@lower_builtin('array.view', types.Array, types.DTypeSpec)
def array_view(context, builder, sig, args):
aryty = sig.args[0]
retty = sig.return_type
ary = make_array(aryty)(context, builder, args[0])
ret = make_array(retty)(context, builder)
# Copy all fields, casting the "data" pointer appropriately
fields = set(ret._datamodel._fields)
for k in sorted(fields):
val = getattr(ary, k)
if k == 'data':
ptrty = ret.data.type
ret.data = builder.bitcast(val, ptrty)
else:
setattr(ret, k, val)
ok = _change_dtype(context, builder, aryty, retty, ret)
fail = builder.icmp_unsigned('==', ok, lc.Constant.int(ok.type, 0))
with builder.if_then(fail):
msg = "new type not compatible with array"
context.call_conv.return_user_exc(builder, ValueError, (msg,))
res = ret._getvalue()
return impl_ret_borrowed(context, builder, sig.return_type, res)
# ------------------------------------------------------------------------------
# Array attributes
@lower_getattr(types.Array, "dtype")
def array_dtype(context, builder, typ, value):
res = context.get_dummy_value()
return impl_ret_untracked(context, builder, typ, res)
@lower_getattr(types.Array, "shape")
@lower_getattr(types.MemoryView, "shape")
def array_shape(context, builder, typ, value):
arrayty = make_array(typ)
array = arrayty(context, builder, value)
res = array.shape
return impl_ret_untracked(context, builder, typ, res)
@lower_getattr(types.Array, "strides")
@lower_getattr(types.MemoryView, "strides")
def array_strides(context, builder, typ, value):
arrayty = make_array(typ)
array = arrayty(context, builder, value)
res = array.strides
return impl_ret_untracked(context, builder, typ, res)
@lower_getattr(types.Array, "ndim")
@lower_getattr(types.MemoryView, "ndim")
def array_ndim(context, builder, typ, value):
res = context.get_constant(types.intp, typ.ndim)
return impl_ret_untracked(context, builder, typ, res)
@lower_getattr(types.Array, "size")
def array_size(context, builder, typ, value):
arrayty = make_array(typ)
array = arrayty(context, builder, value)
res = array.nitems
return impl_ret_untracked(context, builder, typ, res)
@lower_getattr(types.Array, "itemsize")
@lower_getattr(types.MemoryView, "itemsize")
def array_itemsize(context, builder, typ, value):
arrayty = make_array(typ)
array = arrayty(context, builder, value)
res = array.itemsize
return impl_ret_untracked(context, builder, typ, res)
@lower_getattr(types.MemoryView, "nbytes")
def array_nbytes(context, builder, typ, value):
"""
nbytes = size * itemsize
"""
arrayty = make_array(typ)
array = arrayty(context, builder, value)
res = builder.mul(array.nitems, array.itemsize)
return impl_ret_untracked(context, builder, typ, res)
@lower_getattr(types.MemoryView, "contiguous")
def array_contiguous(context, builder, typ, value):
res = context.get_constant(types.boolean, typ.is_contig)
return impl_ret_untracked(context, builder, typ, res)
@lower_getattr(types.MemoryView, "c_contiguous")
def array_c_contiguous(context, builder, typ, value):
res = context.get_constant(types.boolean, typ.is_c_contig)
return impl_ret_untracked(context, builder, typ, res)
@lower_getattr(types.MemoryView, "f_contiguous")
def array_f_contiguous(context, builder, typ, value):
res = context.get_constant(types.boolean, typ.is_f_contig)
return impl_ret_untracked(context, builder, typ, res)
@lower_getattr(types.MemoryView, "readonly")
def array_readonly(context, builder, typ, value):
res = context.get_constant(types.boolean, not typ.mutable)
return impl_ret_untracked(context, builder, typ, res)
# array.ctypes
@lower_getattr(types.Array, "ctypes")
def array_ctypes(context, builder, typ, value):
arrayty = make_array(typ)
array = arrayty(context, builder, value)
# Create new ArrayCType structure
act = types.ArrayCTypes(typ)
ctinfo = context.make_helper(builder, act)
ctinfo.data = array.data
ctinfo.meminfo = array.meminfo
res = ctinfo._getvalue()
return impl_ret_borrowed(context, builder, act, res)
@lower_getattr(types.ArrayCTypes, "data")
def array_ctypes_data(context, builder, typ, value):
ctinfo = context.make_helper(builder, typ, value=value)
res = ctinfo.data
# Convert it to an integer
res = builder.ptrtoint(res, context.get_value_type(types.intp))
return impl_ret_untracked(context, builder, typ, res)
@lower_cast(types.ArrayCTypes, types.CPointer)
@lower_cast(types.ArrayCTypes, types.voidptr)
def array_ctypes_to_pointer(context, builder, fromty, toty, val):
ctinfo = context.make_helper(builder, fromty, value=val)
res = ctinfo.data
res = builder.bitcast(res, context.get_value_type(toty))
return impl_ret_untracked(context, builder, toty, res)
def _call_contiguous_check(checker, context, builder, aryty, ary):
"""Helper to invoke the contiguous checker function on an array
Args
----
checker :
``numba.numpy_supports.is_contiguous``, or
``numba.numpy_supports.is_fortran``.
context : target context
builder : llvm ir builder
aryty : numba type
ary : llvm value
"""
ary = make_array(aryty)(context, builder, value=ary)
tup_intp = types.UniTuple(types.intp, aryty.ndim)
itemsize = context.get_abi_sizeof(context.get_value_type(aryty.dtype))
check_sig = signature(types.bool_, tup_intp, tup_intp, types.intp)
check_args = [ary.shape, ary.strides,
context.get_constant(types.intp, itemsize)]
is_contig = context.compile_internal(builder, checker, check_sig,
check_args)
return is_contig
# array.flags
@lower_getattr(types.Array, "flags")
def array_flags(context, builder, typ, value):
flagsobj = context.make_helper(builder, types.ArrayFlags(typ))
flagsobj.parent = value
res = flagsobj._getvalue()
context.nrt.incref(builder, typ, value)
return impl_ret_new_ref(context, builder, typ, res)
@lower_getattr(types.ArrayFlags, "contiguous")
@lower_getattr(types.ArrayFlags, "c_contiguous")
def array_flags_c_contiguous(context, builder, typ, value):
if typ.array_type.layout != 'C':
# any layout can stil be contiguous
flagsobj = context.make_helper(builder, typ, value=value)
res = _call_contiguous_check(is_contiguous, context, builder,
typ.array_type, flagsobj.parent)
else:
val = typ.array_type.layout == 'C'
res = context.get_constant(types.boolean, val)
return impl_ret_untracked(context, builder, typ, res)
@lower_getattr(types.ArrayFlags, "f_contiguous")
def array_flags_f_contiguous(context, builder, typ, value):
if typ.array_type.layout != 'F':
# any layout can stil be contiguous
flagsobj = context.make_helper(builder, typ, value=value)
res = _call_contiguous_check(is_fortran, context, builder,
typ.array_type, flagsobj.parent)
else:
layout = typ.array_type.layout
val = layout == 'F' if typ.array_type.ndim > 1 else layout in 'CF'
res = context.get_constant(types.boolean, val)
return impl_ret_untracked(context, builder, typ, res)
# ------------------------------------------------------------------------------
# .real / .imag
@lower_getattr(types.Array, "real")
def array_real_part(context, builder, typ, value):
if typ.dtype in types.complex_domain:
return array_complex_attr(context, builder, typ, value, attr='real')
elif typ.dtype in types.number_domain:
# as an identity function
return impl_ret_borrowed(context, builder, typ, value)
else:
raise NotImplementedError('unsupported .real for {}'.format(type.dtype))
@lower_getattr(types.Array, "imag")
def array_imag_part(context, builder, typ, value):
if typ.dtype in types.complex_domain:
return array_complex_attr(context, builder, typ, value, attr='imag')
elif typ.dtype in types.number_domain:
# return a readonly zero array
sig = signature(typ.copy(readonly=True), typ)
return numpy_zeros_like_nd(context, builder, sig, [value])
else:
raise NotImplementedError('unsupported .imag for {}'.format(type.dtype))
@overload_method(types.Array, 'conj')
@overload_method(types.Array, 'conjugate')
def array_conj(arr):
def impl(arr):
return np.conj(arr)
return impl
def array_complex_attr(context, builder, typ, value, attr):
"""
Given a complex array, it's memory layout is:
R C R C R C
^ ^ ^
(`R` indicates a float for the real part;
`C` indicates a float for the imaginary part;
the `^` indicates the start of each element)
To get the real part, we can simply change the dtype and itemsize to that
of the underlying float type. The new layout is:
R x R x R x
^ ^ ^
(`x` indicates unused)
A load operation will use the dtype to determine the number of bytes to
load.
To get the imaginary part, we shift the pointer by 1 float offset and
change the dtype and itemsize. The new layout is:
x C x C x C
^ ^ ^
"""
if attr not in ['real', 'imag'] or typ.dtype not in types.complex_domain:
raise NotImplementedError("cannot get attribute `{}`".format(attr))
arrayty = make_array(typ)
array = arrayty(context, builder, value)
# sizeof underlying float type
flty = typ.dtype.underlying_float
sizeof_flty = context.get_abi_sizeof(context.get_data_type(flty))
itemsize = array.itemsize.type(sizeof_flty)
# cast data pointer to float type
llfltptrty = context.get_value_type(flty).as_pointer()
dataptr = builder.bitcast(array.data, llfltptrty)
# add offset
if attr == 'imag':
dataptr = builder.gep(dataptr, [ir.IntType(32)(1)])
# make result
resultty = typ.copy(dtype=flty, layout='A')
result = make_array(resultty)(context, builder)
repl = dict(data=dataptr, itemsize=itemsize)
cgutils.copy_struct(result, array, repl)
return impl_ret_borrowed(context, builder, resultty, result._getvalue())
# ------------------------------------------------------------------------------
# DType attribute
def dtype_type(context, builder, dtypety, dtypeval):
# Just return a dummy opaque value
return context.get_dummy_value()
lower_getattr(types.DType, 'type')(dtype_type)
lower_getattr(types.DType, 'kind')(dtype_type)
# ------------------------------------------------------------------------------
# Structured / record lookup
@lower_getattr_generic(types.Array)
def array_record_getattr(context, builder, typ, value, attr):
"""
Generic getattr() implementation for record arrays: fetch the given
record member, i.e. a subarray.
"""
arrayty = make_array(typ)
array = arrayty(context, builder, value)
rectype = typ.dtype
if not isinstance(rectype, types.Record):
raise NotImplementedError("attribute %r of %s not defined"
% (attr, typ))
dtype = rectype.typeof(attr)
offset = rectype.offset(attr)
resty = typ.copy(dtype=dtype, layout='A')
raryty = make_array(resty)
rary = raryty(context, builder)
constoffset = context.get_constant(types.intp, offset)
newdataptr = cgutils.pointer_add(
builder, array.data, constoffset, return_type=rary.data.type,
)
datasize = context.get_abi_sizeof(context.get_data_type(dtype))
populate_array(rary,
data=newdataptr,
shape=array.shape,
strides=array.strides,
itemsize=context.get_constant(types.intp, datasize),
meminfo=array.meminfo,
parent=array.parent)
res = rary._getvalue()
return impl_ret_borrowed(context, builder, resty, res)
@lower_builtin('static_getitem', types.Array, types.StringLiteral)
def array_record_getitem(context, builder, sig, args):
index = args[1]
if not isinstance(index, str):
# This will fallback to normal getitem
raise NotImplementedError
return array_record_getattr(context, builder, sig.args[0], args[0], index)
@lower_getattr_generic(types.Record)
def record_getattr(context, builder, typ, value, attr):
"""
Generic getattr() implementation for records: fetch the given
record member, i.e. a scalar.
"""
context.sentry_record_alignment(typ, attr)
offset = typ.offset(attr)
elemty = typ.typeof(attr)
if isinstance(elemty, types.NestedArray):
# Only a nested array's *data* is stored in a structured array,
# so we create an array structure to point to that data.
aryty = make_array(elemty)
ary = aryty(context, builder)
dtype = elemty.dtype
newshape = [context.get_constant(types.intp, s) for s in
elemty.shape]
newstrides = [context.get_constant(types.intp, s) for s in
elemty.strides]
newdata = cgutils.get_record_member(builder, value, offset,
context.get_data_type(dtype))
populate_array(
ary,
data=newdata,
shape=cgutils.pack_array(builder, newshape),
strides=cgutils.pack_array(builder, newstrides),
itemsize=context.get_constant(types.intp, elemty.size),
meminfo=None,
parent=None,
)
res = ary._getvalue()
return impl_ret_borrowed(context, builder, typ, res)
else:
dptr = cgutils.get_record_member(builder, value, offset,
context.get_data_type(elemty))
align = None if typ.aligned else 1
res = context.unpack_value(builder, elemty, dptr, align)
return impl_ret_borrowed(context, builder, typ, res)
@lower_setattr_generic(types.Record)
def record_setattr(context, builder, sig, args, attr):
"""
Generic setattr() implementation for records: set the given
record member, i.e. a scalar.
"""
typ, valty = sig.args
target, val = args
context.sentry_record_alignment(typ, attr)
offset = typ.offset(attr)
elemty = typ.typeof(attr)
dptr = cgutils.get_record_member(builder, target, offset,
context.get_data_type(elemty))
val = context.cast(builder, val, valty, elemty)
align = None if typ.aligned else 1
context.pack_value(builder, elemty, val, dptr, align=align)
@lower_builtin('static_getitem', types.Record, types.StringLiteral)
def record_getitem(context, builder, sig, args):
"""
Record.__getitem__ redirects to getattr()
"""
impl = context.get_getattr(sig.args[0], args[1])
return impl(context, builder, sig.args[0], args[0], args[1])
@lower_builtin('static_setitem', types.Record, types.StringLiteral, types.Any)
def record_setitem(context, builder, sig, args):
"""
Record.__setitem__ redirects to setattr()
"""
recty, _, valty = sig.args
rec, idx, val = args
getattr_sig = signature(sig.return_type, recty, valty)
impl = context.get_setattr(idx, getattr_sig)
assert impl is not None
return impl(builder, (rec, val))
# ------------------------------------------------------------------------------
# Constant arrays and records
@lower_constant(types.Array)
def constant_array(context, builder, ty, pyval):
"""
Create a constant array (mechanism is target-dependent).
"""
return context.make_constant_array(builder, ty, pyval)
@lower_constant(types.Record)
def constant_record(context, builder, ty, pyval):
"""
Create a record constant as a stack-allocated array of bytes.
"""
lty = ir.ArrayType(ir.IntType(8), pyval.nbytes)
val = lty(bytearray(pyval.tostring()))
return cgutils.alloca_once_value(builder, val)
@lower_constant(types.Bytes)
def constant_bytes(context, builder, ty, pyval):
"""
Create a constant array from bytes (mechanism is target-dependent).
"""
buf = np.array(bytearray(pyval), dtype=np.uint8)
return context.make_constant_array(builder, ty, buf)
# ------------------------------------------------------------------------------
# Comparisons
@lower_builtin(operator.is_, types.Array, types.Array)
def array_is(context, builder, sig, args):
aty, bty = sig.args
if aty != bty:
return cgutils.false_bit
def array_is_impl(a, b):
return (a.shape == b.shape and
a.strides == b.strides and
a.ctypes.data == b.ctypes.data)
return context.compile_internal(builder, array_is_impl, sig, args)
# ------------------------------------------------------------------------------
# builtin `np.flat` implementation
def make_array_flat_cls(flatiterty):
"""
Return the Structure representation of the given *flatiterty* (an
instance of types.NumpyFlatType).
"""
return _make_flattening_iter_cls(flatiterty, 'flat')
def make_array_ndenumerate_cls(nditerty):
"""
Return the Structure representation of the given *nditerty* (an
instance of types.NumpyNdEnumerateType).
"""
return _make_flattening_iter_cls(nditerty, 'ndenumerate')
def _increment_indices(context, builder, ndim, shape, indices, end_flag=None,
loop_continue=None, loop_break=None):
zero = context.get_constant(types.intp, 0)
bbend = builder.append_basic_block('end_increment')
if end_flag is not None:
builder.store(cgutils.false_byte, end_flag)
for dim in reversed(range(ndim)):
idxptr = cgutils.gep_inbounds(builder, indices, dim)
idx = cgutils.increment_index(builder, builder.load(idxptr))
count = shape[dim]
in_bounds = builder.icmp_signed('<', idx, count)
with cgutils.if_likely(builder, in_bounds):
# New index is still in bounds
builder.store(idx, idxptr)
if loop_continue is not None:
loop_continue(dim)
builder.branch(bbend)
# Index out of bounds => reset it and proceed it to outer index
builder.store(zero, idxptr)
if loop_break is not None:
loop_break(dim)
if end_flag is not None:
builder.store(cgutils.true_byte, end_flag)
builder.branch(bbend)
builder.position_at_end(bbend)
def _increment_indices_array(context, builder, arrty, arr, indices,
end_flag=None):
shape = cgutils.unpack_tuple(builder, arr.shape, arrty.ndim)
_increment_indices(context, builder, arrty.ndim, shape, indices, end_flag)
def make_nditer_cls(nditerty):
"""
Return the Structure representation of the given *nditerty* (an
instance of types.NumpyNdIterType).
"""
ndim = nditerty.ndim
layout = nditerty.layout
narrays = len(nditerty.arrays)
nshapes = ndim if nditerty.need_shaped_indexing else 1
class BaseSubIter(object):
"""
Base class for sub-iterators of a nditer() instance.
"""
def __init__(self, nditer, member_name, start_dim, end_dim):
self.nditer = nditer
self.member_name = member_name
self.start_dim = start_dim
self.end_dim = end_dim
self.ndim = end_dim - start_dim
def set_member_ptr(self, ptr):
setattr(self.nditer, self.member_name, ptr)
@utils.cached_property
def member_ptr(self):
return getattr(self.nditer, self.member_name)
def init_specific(self, context, builder):
pass
def loop_continue(self, context, builder, logical_dim):
pass
def loop_break(self, context, builder, logical_dim):
pass
class FlatSubIter(BaseSubIter):
"""
Sub-iterator walking a contiguous array in physical order, with
support for broadcasting (the index is reset on the outer dimension).
"""
def init_specific(self, context, builder):
zero = context.get_constant(types.intp, 0)
self.set_member_ptr(cgutils.alloca_once_value(builder, zero))
def compute_pointer(self, context, builder, indices, arrty, arr):
index = builder.load(self.member_ptr)
return builder.gep(arr.data, [index])
def loop_continue(self, context, builder, logical_dim):
if logical_dim == self.ndim - 1:
# Only increment index inside innermost logical dimension
index = builder.load(self.member_ptr)
index = cgutils.increment_index(builder, index)
builder.store(index, self.member_ptr)
def loop_break(self, context, builder, logical_dim):
if logical_dim == 0:
# At the exit of outermost logical dimension, reset index
zero = context.get_constant(types.intp, 0)
builder.store(zero, self.member_ptr)
elif logical_dim == self.ndim - 1:
# Inside innermost logical dimension, increment index
index = builder.load(self.member_ptr)
index = cgutils.increment_index(builder, index)
builder.store(index, self.member_ptr)
class TrivialFlatSubIter(BaseSubIter):
"""
Sub-iterator walking a contiguous array in physical order,
*without* support for broadcasting.
"""
def init_specific(self, context, builder):
assert not nditerty.need_shaped_indexing
def compute_pointer(self, context, builder, indices, arrty, arr):
assert len(indices) <= 1, len(indices)
return builder.gep(arr.data, indices)
class IndexedSubIter(BaseSubIter):
"""
Sub-iterator walking an array in logical order.
"""
def compute_pointer(self, context, builder, indices, arrty, arr):
assert len(indices) == self.ndim
return cgutils.get_item_pointer(context, builder, arrty, arr,
indices, wraparound=False)
class ZeroDimSubIter(BaseSubIter):
"""
Sub-iterator "walking" a 0-d array.
"""
def compute_pointer(self, context, builder, indices, arrty, arr):
return arr.data
class ScalarSubIter(BaseSubIter):
"""
Sub-iterator "walking" a scalar value.
"""
def compute_pointer(self, context, builder, indices, arrty, arr):
return arr
class NdIter(cgutils.create_struct_proxy(nditerty)):
"""
.nditer() implementation.
Note: 'F' layout means the shape is iterated in reverse logical order,
so indices and shapes arrays have to be reversed as well.
"""
@utils.cached_property
def subiters(self):
l = []
factories = {'flat': FlatSubIter if nditerty.need_shaped_indexing
else TrivialFlatSubIter,
'indexed': IndexedSubIter,
'0d': ZeroDimSubIter,
'scalar': ScalarSubIter,
}
for i, sub in enumerate(nditerty.indexers):
kind, start_dim, end_dim, _ = sub
member_name = 'index%d' % i
factory = factories[kind]
l.append(factory(self, member_name, start_dim, end_dim))
return l
def init_specific(self, context, builder, arrtys, arrays):
"""
Initialize the nditer() instance for the specific array inputs.
"""
zero = context.get_constant(types.intp, 0)
# Store inputs
self.arrays = context.make_tuple(builder, types.Tuple(arrtys),
arrays)
# Create slots for scalars
for i, ty in enumerate(arrtys):
if not isinstance(ty, types.Array):
member_name = 'scalar%d' % i
# XXX as_data()?
slot = cgutils.alloca_once_value(builder, arrays[i])
setattr(self, member_name, slot)
arrays = self._arrays_or_scalars(context, builder, arrtys, arrays)
# Extract iterator shape (the shape of the most-dimensional input)
main_shape_ty = types.UniTuple(types.intp, ndim)
main_shape = None
main_nitems = None
for i, arrty in enumerate(arrtys):
if isinstance(arrty, types.Array) and arrty.ndim == ndim:
main_shape = arrays[i].shape
main_nitems = arrays[i].nitems
break
else:
# Only scalar inputs => synthesize a dummy shape
assert ndim == 0
main_shape = context.make_tuple(builder, main_shape_ty, ())
main_nitems = context.get_constant(types.intp, 1)
# Validate shapes of array inputs
def check_shape(shape, main_shape):
n = len(shape)
for i in range(n):
if shape[i] != main_shape[len(main_shape) - n + i]:
raise ValueError("nditer(): operands could not be "
"broadcast together")
for arrty, arr in zip(arrtys, arrays):
if isinstance(arrty, types.Array) and arrty.ndim > 0:
sig = signature(types.none,
types.UniTuple(types.intp, arrty.ndim),
main_shape_ty)
context.compile_internal(builder, check_shape,
sig, (arr.shape, main_shape))
# Compute shape and size
shapes = cgutils.unpack_tuple(builder, main_shape)
if layout == 'F':
shapes = shapes[::-1]
# If shape is empty, mark iterator exhausted
shape_is_empty = builder.icmp_signed('==', main_nitems, zero)
exhausted = builder.select(shape_is_empty, cgutils.true_byte,
cgutils.false_byte)
if not nditerty.need_shaped_indexing:
# Flatten shape to make iteration faster on small innermost
# dimensions (e.g. a (100000, 3) shape)
shapes = (main_nitems,)
assert len(shapes) == nshapes
indices = cgutils.alloca_once(builder, zero.type, size=nshapes)
for dim in range(nshapes):
idxptr = cgutils.gep_inbounds(builder, indices, dim)
builder.store(zero, idxptr)
self.indices = indices
self.shape = cgutils.pack_array(builder, shapes, zero.type)
self.exhausted = cgutils.alloca_once_value(builder, exhausted)
# Initialize subiterators
for subiter in self.subiters:
subiter.init_specific(context, builder)
def iternext_specific(self, context, builder, result):
"""
Compute next iteration of the nditer() instance.
"""
bbend = builder.append_basic_block('end')
# Branch early if exhausted
exhausted = cgutils.as_bool_bit(builder,
builder.load(self.exhausted))
with cgutils.if_unlikely(builder, exhausted):
result.set_valid(False)
builder.branch(bbend)
arrtys = nditerty.arrays
arrays = cgutils.unpack_tuple(builder, self.arrays)
arrays = self._arrays_or_scalars(context, builder, arrtys, arrays)
indices = self.indices
# Compute iterated results
result.set_valid(True)
views = self._make_views(context, builder, indices, arrtys, arrays)
views = [v._getvalue() for v in views]
if len(views) == 1:
result.yield_(views[0])
else:
result.yield_(context.make_tuple(builder, nditerty.yield_type,
views))
shape = cgutils.unpack_tuple(builder, self.shape)
_increment_indices(context, builder, len(shape), shape,
indices, self.exhausted,
functools.partial(self._loop_continue,
context,
builder),
functools.partial(self._loop_break,
context,
builder),
)
builder.branch(bbend)
builder.position_at_end(bbend)
def _loop_continue(self, context, builder, dim):
for sub in self.subiters:
if sub.start_dim <= dim < sub.end_dim:
sub.loop_continue(context, builder, dim - sub.start_dim)
def _loop_break(self, context, builder, dim):
for sub in self.subiters:
if sub.start_dim <= dim < sub.end_dim:
sub.loop_break(context, builder, dim - sub.start_dim)
def _make_views(self, context, builder, indices, arrtys, arrays):
"""
Compute the views to be yielded.
"""
views = [None] * narrays
indexers = nditerty.indexers
subiters = self.subiters
rettys = nditerty.yield_type
if isinstance(rettys, types.BaseTuple):
rettys = list(rettys)
else:
rettys = [rettys]
indices = [builder.load(cgutils.gep_inbounds(builder, indices, i))
for i in range(nshapes)]
for sub, subiter in zip(indexers, subiters):
_, _, _, array_indices = sub
sub_indices = indices[subiter.start_dim:subiter.end_dim]
if layout == 'F':
sub_indices = sub_indices[::-1]
for i in array_indices:
assert views[i] is None
views[i] = self._make_view(context, builder, sub_indices,
rettys[i],
arrtys[i], arrays[i], subiter)
assert all(v for v in views)
return views
def _make_view(self, context, builder, indices, retty, arrty, arr,
subiter):
"""
Compute a 0d view for a given input array.
"""
assert isinstance(retty, types.Array) and retty.ndim == 0
ptr = subiter.compute_pointer(context, builder, indices, arrty, arr)
view = context.make_array(retty)(context, builder)
itemsize = get_itemsize(context, retty)
shape = context.make_tuple(builder, types.UniTuple(types.intp, 0),
())
strides = context.make_tuple(builder, types.UniTuple(types.intp, 0),
())
# HACK: meminfo=None avoids expensive refcounting operations
# on ephemeral views
populate_array(view, ptr, shape, strides, itemsize, meminfo=None)
return view
def _arrays_or_scalars(self, context, builder, arrtys, arrays):
# Return a list of either array structures or pointers to
# scalar slots
l = []
for i, (arrty, arr) in enumerate(zip(arrtys, arrays)):
if isinstance(arrty, types.Array):
l.append(context.make_array(arrty)(context,
builder,
value=arr))
else:
l.append(getattr(self, "scalar%d" % i))
return l
return NdIter
def make_ndindex_cls(nditerty):
"""
Return the Structure representation of the given *nditerty* (an
instance of types.NumpyNdIndexType).
"""
ndim = nditerty.ndim
class NdIndexIter(cgutils.create_struct_proxy(nditerty)):
"""
.ndindex() implementation.
"""
def init_specific(self, context, builder, shapes):
zero = context.get_constant(types.intp, 0)
indices = cgutils.alloca_once(builder, zero.type,
size=context.get_constant(types.intp,
ndim))
exhausted = cgutils.alloca_once_value(builder, cgutils.false_byte)
for dim in range(ndim):
idxptr = cgutils.gep_inbounds(builder, indices, dim)
builder.store(zero, idxptr)
# 0-sized dimensions really indicate an empty array,
# but we have to catch that condition early to avoid
# a bug inside the iteration logic.
dim_size = shapes[dim]
dim_is_empty = builder.icmp(lc.ICMP_EQ, dim_size, zero)
with cgutils.if_unlikely(builder, dim_is_empty):
builder.store(cgutils.true_byte, exhausted)
self.indices = indices
self.exhausted = exhausted
self.shape = cgutils.pack_array(builder, shapes, zero.type)
def iternext_specific(self, context, builder, result):
zero = context.get_constant(types.intp, 0)
bbend = builder.append_basic_block('end')
exhausted = cgutils.as_bool_bit(builder,
builder.load(self.exhausted))
with cgutils.if_unlikely(builder, exhausted):
result.set_valid(False)
builder.branch(bbend)
indices = [builder.load(cgutils.gep_inbounds(builder,
self.indices,
dim))
for dim in range(ndim)]
for load in indices:
mark_positive(builder, load)
result.yield_(cgutils.pack_array(builder, indices, zero.type))
result.set_valid(True)
shape = cgutils.unpack_tuple(builder, self.shape, ndim)
_increment_indices(context, builder, ndim, shape,
self.indices, self.exhausted)
builder.branch(bbend)
builder.position_at_end(bbend)
return NdIndexIter
def _make_flattening_iter_cls(flatiterty, kind):
assert kind in ('flat', 'ndenumerate')
array_type = flatiterty.array_type
if array_type.layout == 'C':
class CContiguousFlatIter(cgutils.create_struct_proxy(flatiterty)):
"""
.flat() / .ndenumerate() implementation for C-contiguous arrays.
"""
def init_specific(self, context, builder, arrty, arr):
zero = context.get_constant(types.intp, 0)
self.index = cgutils.alloca_once_value(builder, zero)
# We can't trust strides[-1] to always contain the right
# step value, see
# http://docs.scipy.org/doc/numpy-dev/release.html#npy-relaxed-strides-checking # noqa: E501
self.stride = arr.itemsize
if kind == 'ndenumerate':
# Zero-initialize the indices array.
indices = cgutils.alloca_once(
builder, zero.type,
size=context.get_constant(types.intp, arrty.ndim))
for dim in range(arrty.ndim):
idxptr = cgutils.gep_inbounds(builder, indices, dim)
builder.store(zero, idxptr)
self.indices = indices
# NOTE: Using gep() instead of explicit pointer addition helps
# LLVM vectorize the loop (since the stride is known and
# constant). This is not possible in the non-contiguous case,
# where the strides are unknown at compile-time.
def iternext_specific(self, context, builder, arrty, arr, result):
ndim = arrty.ndim
nitems = arr.nitems
index = builder.load(self.index)
is_valid = builder.icmp(lc.ICMP_SLT, index, nitems)
result.set_valid(is_valid)
with cgutils.if_likely(builder, is_valid):
ptr = builder.gep(arr.data, [index])
value = load_item(context, builder, arrty, ptr)
if kind == 'flat':
result.yield_(value)
else:
# ndenumerate(): fetch and increment indices
indices = self.indices
idxvals = [builder.load(cgutils.gep_inbounds(builder,
indices,
dim))
for dim in range(ndim)]
idxtuple = cgutils.pack_array(builder, idxvals)
result.yield_(
cgutils.make_anonymous_struct(builder,
[idxtuple, value]))
_increment_indices_array(context, builder, arrty,
arr, indices)
index = cgutils.increment_index(builder, index)
builder.store(index, self.index)
def getitem(self, context, builder, arrty, arr, index):
ptr = builder.gep(arr.data, [index])
return load_item(context, builder, arrty, ptr)
def setitem(self, context, builder, arrty, arr, index, value):
ptr = builder.gep(arr.data, [index])
store_item(context, builder, arrty, value, ptr)
return CContiguousFlatIter
else:
class FlatIter(cgutils.create_struct_proxy(flatiterty)):
"""
Generic .flat() / .ndenumerate() implementation for
non-contiguous arrays.
It keeps track of pointers along each dimension in order to
minimize computations.
"""
def init_specific(self, context, builder, arrty, arr):
zero = context.get_constant(types.intp, 0)
data = arr.data
ndim = arrty.ndim
shapes = cgutils.unpack_tuple(builder, arr.shape, ndim)
indices = cgutils.alloca_once(
builder, zero.type, size=context.get_constant(types.intp,
arrty.ndim))
pointers = cgutils.alloca_once(
builder, data.type, size=context.get_constant(types.intp,
arrty.ndim))
exhausted = cgutils.alloca_once_value(builder,
cgutils.false_byte)
# Initialize indices and pointers with their start values.
for dim in range(ndim):
idxptr = cgutils.gep_inbounds(builder, indices, dim)
ptrptr = cgutils.gep_inbounds(builder, pointers, dim)
builder.store(data, ptrptr)
builder.store(zero, idxptr)
# 0-sized dimensions really indicate an empty array,
# but we have to catch that condition early to avoid
# a bug inside the iteration logic (see issue #846).
dim_size = shapes[dim]
dim_is_empty = builder.icmp(lc.ICMP_EQ, dim_size, zero)
with cgutils.if_unlikely(builder, dim_is_empty):
builder.store(cgutils.true_byte, exhausted)
self.indices = indices
self.pointers = pointers
self.exhausted = exhausted
def iternext_specific(self, context, builder, arrty, arr, result):
ndim = arrty.ndim
shapes = cgutils.unpack_tuple(builder, arr.shape, ndim)
strides = cgutils.unpack_tuple(builder, arr.strides, ndim)
indices = self.indices
pointers = self.pointers
zero = context.get_constant(types.intp, 0)
bbend = builder.append_basic_block('end')
# Catch already computed iterator exhaustion
is_exhausted = cgutils.as_bool_bit(
builder, builder.load(self.exhausted))
with cgutils.if_unlikely(builder, is_exhausted):
result.set_valid(False)
builder.branch(bbend)
result.set_valid(True)
# Current pointer inside last dimension
last_ptr = cgutils.gep_inbounds(builder, pointers, ndim - 1)
ptr = builder.load(last_ptr)
value = load_item(context, builder, arrty, ptr)
if kind == 'flat':
result.yield_(value)
else:
# ndenumerate() => yield (indices, value)
idxvals = [builder.load(cgutils.gep_inbounds(builder,
indices,
dim))
for dim in range(ndim)]
idxtuple = cgutils.pack_array(builder, idxvals)
result.yield_(
cgutils.make_anonymous_struct(builder,
[idxtuple, value]))
# Update indices and pointers by walking from inner
# dimension to outer.
for dim in reversed(range(ndim)):
idxptr = cgutils.gep_inbounds(builder, indices, dim)
idx = cgutils.increment_index(builder,
builder.load(idxptr))
count = shapes[dim]
stride = strides[dim]
in_bounds = builder.icmp(lc.ICMP_SLT, idx, count)
with cgutils.if_likely(builder, in_bounds):
# Index is valid => pointer can simply be incremented.
builder.store(idx, idxptr)
ptrptr = cgutils.gep_inbounds(builder, pointers, dim)
ptr = builder.load(ptrptr)
ptr = cgutils.pointer_add(builder, ptr, stride)
builder.store(ptr, ptrptr)
# Reset pointers in inner dimensions
for inner_dim in range(dim + 1, ndim):
ptrptr = cgutils.gep_inbounds(builder,
pointers,
inner_dim)
builder.store(ptr, ptrptr)
builder.branch(bbend)
# Reset index and continue with next dimension
builder.store(zero, idxptr)
# End of array
builder.store(cgutils.true_byte, self.exhausted)
builder.branch(bbend)
builder.position_at_end(bbend)
def _ptr_for_index(self, context, builder, arrty, arr, index):
ndim = arrty.ndim
shapes = cgutils.unpack_tuple(builder, arr.shape, count=ndim)
strides = cgutils.unpack_tuple(builder, arr.strides, count=ndim)
# First convert the flattened index into a regular n-dim index
indices = []
for dim in reversed(range(ndim)):
indices.append(builder.urem(index, shapes[dim]))
index = builder.udiv(index, shapes[dim])
indices.reverse()
ptr = cgutils.get_item_pointer2(context, builder, arr.data,
shapes, strides, arrty.layout,
indices)
return ptr
def getitem(self, context, builder, arrty, arr, index):
ptr = self._ptr_for_index(context, builder, arrty, arr, index)
return load_item(context, builder, arrty, ptr)
def setitem(self, context, builder, arrty, arr, index, value):
ptr = self._ptr_for_index(context, builder, arrty, arr, index)
store_item(context, builder, arrty, value, ptr)
return FlatIter
@lower_getattr(types.Array, "flat")
def make_array_flatiter(context, builder, arrty, arr):
flatitercls = make_array_flat_cls(types.NumpyFlatType(arrty))
flatiter = flatitercls(context, builder)
flatiter.array = arr
arrcls = context.make_array(arrty)
arr = arrcls(context, builder, ref=flatiter._get_ptr_by_name('array'))
flatiter.init_specific(context, builder, arrty, arr)
res = flatiter._getvalue()
return impl_ret_borrowed(context, builder, types.NumpyFlatType(arrty), res)
@lower_builtin('iternext', types.NumpyFlatType)
@iternext_impl(RefType.BORROWED)
def iternext_numpy_flatiter(context, builder, sig, args, result):
[flatiterty] = sig.args
[flatiter] = args
flatitercls = make_array_flat_cls(flatiterty)
flatiter = flatitercls(context, builder, value=flatiter)
arrty = flatiterty.array_type
arrcls = context.make_array(arrty)
arr = arrcls(context, builder, value=flatiter.array)
flatiter.iternext_specific(context, builder, arrty, arr, result)
@lower_builtin(operator.getitem, types.NumpyFlatType, types.Integer)
def iternext_numpy_getitem(context, builder, sig, args):
flatiterty = sig.args[0]
flatiter, index = args
flatitercls = make_array_flat_cls(flatiterty)
flatiter = flatitercls(context, builder, value=flatiter)
arrty = flatiterty.array_type
arrcls = context.make_array(arrty)
arr = arrcls(context, builder, value=flatiter.array)
res = flatiter.getitem(context, builder, arrty, arr, index)
return impl_ret_borrowed(context, builder, sig.return_type, res)
@lower_builtin(operator.setitem, types.NumpyFlatType, types.Integer,
types.Any)
def iternext_numpy_getitem_any(context, builder, sig, args):
flatiterty = sig.args[0]
flatiter, index, value = args
flatitercls = make_array_flat_cls(flatiterty)
flatiter = flatitercls(context, builder, value=flatiter)
arrty = flatiterty.array_type
arrcls = context.make_array(arrty)
arr = arrcls(context, builder, value=flatiter.array)
flatiter.setitem(context, builder, arrty, arr, index, value)
return context.get_dummy_value()
@lower_builtin(len, types.NumpyFlatType)
def iternext_numpy_getitem_flat(context, builder, sig, args):
flatiterty = sig.args[0]
flatitercls = make_array_flat_cls(flatiterty)
flatiter = flatitercls(context, builder, value=args[0])
arrcls = context.make_array(flatiterty.array_type)
arr = arrcls(context, builder, value=flatiter.array)
return arr.nitems
@lower_builtin(np.ndenumerate, types.Array)
def make_array_ndenumerate(context, builder, sig, args):
arrty, = sig.args
arr, = args
nditercls = make_array_ndenumerate_cls(types.NumpyNdEnumerateType(arrty))
nditer = nditercls(context, builder)
nditer.array = arr
arrcls = context.make_array(arrty)
arr = arrcls(context, builder, ref=nditer._get_ptr_by_name('array'))
nditer.init_specific(context, builder, arrty, arr)
res = nditer._getvalue()
return impl_ret_borrowed(context, builder, sig.return_type, res)
@lower_builtin('iternext', types.NumpyNdEnumerateType)
@iternext_impl(RefType.BORROWED)
def iternext_numpy_nditer(context, builder, sig, args, result):
[nditerty] = sig.args
[nditer] = args
nditercls = make_array_ndenumerate_cls(nditerty)
nditer = nditercls(context, builder, value=nditer)
arrty = nditerty.array_type
arrcls = context.make_array(arrty)
arr = arrcls(context, builder, value=nditer.array)
nditer.iternext_specific(context, builder, arrty, arr, result)
@lower_builtin(pndindex, types.VarArg(types.Integer))
@lower_builtin(np.ndindex, types.VarArg(types.Integer))
def make_array_ndindex(context, builder, sig, args):
"""ndindex(*shape)"""
shape = [context.cast(builder, arg, argty, types.intp)
for argty, arg in zip(sig.args, args)]
nditercls = make_ndindex_cls(types.NumpyNdIndexType(len(shape)))
nditer = nditercls(context, builder)
nditer.init_specific(context, builder, shape)
res = nditer._getvalue()
return impl_ret_borrowed(context, builder, sig.return_type, res)
@lower_builtin(pndindex, types.BaseTuple)
@lower_builtin(np.ndindex, types.BaseTuple)
def make_array_ndindex_tuple(context, builder, sig, args):
"""ndindex(shape)"""
ndim = sig.return_type.ndim
if ndim > 0:
idxty = sig.args[0].dtype
tup = args[0]
shape = cgutils.unpack_tuple(builder, tup, ndim)
shape = [context.cast(builder, idx, idxty, types.intp)
for idx in shape]
else:
shape = []
nditercls = make_ndindex_cls(types.NumpyNdIndexType(len(shape)))
nditer = nditercls(context, builder)
nditer.init_specific(context, builder, shape)
res = nditer._getvalue()
return impl_ret_borrowed(context, builder, sig.return_type, res)
@lower_builtin('iternext', types.NumpyNdIndexType)
@iternext_impl(RefType.BORROWED)
def iternext_numpy_ndindex(context, builder, sig, args, result):
[nditerty] = sig.args
[nditer] = args
nditercls = make_ndindex_cls(nditerty)
nditer = nditercls(context, builder, value=nditer)
nditer.iternext_specific(context, builder, result)
@lower_builtin(np.nditer, types.Any)
def make_array_nditer(context, builder, sig, args):
"""
nditer(...)
"""
nditerty = sig.return_type
arrtys = nditerty.arrays
if isinstance(sig.args[0], types.BaseTuple):
arrays = cgutils.unpack_tuple(builder, args[0])
else:
arrays = [args[0]]
nditer = make_nditer_cls(nditerty)(context, builder)
nditer.init_specific(context, builder, arrtys, arrays)
res = nditer._getvalue()
return impl_ret_borrowed(context, builder, nditerty, res)
@lower_builtin('iternext', types.NumpyNdIterType)
@iternext_impl(RefType.BORROWED)
def iternext_numpy_nditer2(context, builder, sig, args, result):
[nditerty] = sig.args
[nditer] = args
nditer = make_nditer_cls(nditerty)(context, builder, value=nditer)
nditer.iternext_specific(context, builder, result)
# ------------------------------------------------------------------------------
# Numpy array constructors
def _empty_nd_impl(context, builder, arrtype, shapes):
"""Utility function used for allocating a new array during LLVM code
generation (lowering). Given a target context, builder, array
type, and a tuple or list of lowered dimension sizes, returns a
LLVM value pointing at a Numba runtime allocated array.
"""
arycls = make_array(arrtype)
ary = arycls(context, builder)
datatype = context.get_data_type(arrtype.dtype)
itemsize = context.get_constant(types.intp, get_itemsize(context, arrtype))
# compute array length
arrlen = context.get_constant(types.intp, 1)
overflow = ir.Constant(ir.IntType(1), 0)
for s in shapes:
arrlen_mult = builder.smul_with_overflow(arrlen, s)
arrlen = builder.extract_value(arrlen_mult, 0)
overflow = builder.or_(
overflow, builder.extract_value(arrlen_mult, 1)
)
if arrtype.ndim == 0:
strides = ()
elif arrtype.layout == 'C':
strides = [itemsize]
for dimension_size in reversed(shapes[1:]):
strides.append(builder.mul(strides[-1], dimension_size))
strides = tuple(reversed(strides))
elif arrtype.layout == 'F':
strides = [itemsize]
for dimension_size in shapes[:-1]:
strides.append(builder.mul(strides[-1], dimension_size))
strides = tuple(strides)
else:
raise NotImplementedError(
"Don't know how to allocate array with layout '{0}'.".format(
arrtype.layout))
# Check overflow, numpy also does this after checking order
allocsize_mult = builder.smul_with_overflow(arrlen, itemsize)
allocsize = builder.extract_value(allocsize_mult, 0)
overflow = builder.or_(overflow, builder.extract_value(allocsize_mult, 1))
with builder.if_then(overflow, likely=False):
# Raise same error as numpy, see:
# https://github.com/numpy/numpy/blob/2a488fe76a0f732dc418d03b452caace161673da/numpy/core/src/multiarray/ctors.c#L1095-L1101 # noqa: E501
context.call_conv.return_user_exc(
builder, ValueError,
("array is too big; `arr.size * arr.dtype.itemsize` is larger than"
" the maximum possible size.",)
)
align = context.get_preferred_array_alignment(arrtype.dtype)
meminfo = context.nrt.meminfo_alloc_aligned(builder, size=allocsize,
align=align)
data = context.nrt.meminfo_data(builder, meminfo)
intp_t = context.get_value_type(types.intp)
shape_array = cgutils.pack_array(builder, shapes, ty=intp_t)
strides_array = cgutils.pack_array(builder, strides, ty=intp_t)
populate_array(ary,
data=builder.bitcast(data, datatype.as_pointer()),
shape=shape_array,
strides=strides_array,
itemsize=itemsize,
meminfo=meminfo)
return ary
def _zero_fill_array(context, builder, ary):
"""
Zero-fill an array. The array must be contiguous.
"""
cgutils.memset(builder, ary.data, builder.mul(ary.itemsize, ary.nitems), 0)
def _parse_shape(context, builder, ty, val):
"""
Parse the shape argument to an array constructor.
"""
def safecast_intp(context, builder, src_t, src):
"""Cast src to intp only if value can be maintained"""
intp_t = context.get_value_type(types.intp)
intp_width = intp_t.width
intp_ir = ir.IntType(intp_width)
maxval = ir.Constant(intp_ir, ((1 << intp_width - 1) - 1))
if src_t.width < intp_width:
res = builder.sext(src, intp_ir)
elif src_t.width >= intp_width:
is_larger = builder.icmp_signed(">", src, maxval)
with builder.if_then(is_larger, likely=False):
context.call_conv.return_user_exc(
builder, ValueError,
("Cannot safely convert value to intp",)
)
if src_t.width > intp_width:
res = builder.trunc(src, intp_ir)
else:
res = src
return res
if isinstance(ty, types.Integer):
ndim = 1
passed_shapes = [context.cast(builder, val, ty, types.intp)]
else:
assert isinstance(ty, types.BaseTuple)
ndim = ty.count
passed_shapes = cgutils.unpack_tuple(builder, val, count=ndim)
shapes = []
for s in passed_shapes:
shapes.append(safecast_intp(context, builder, s.type, s))
zero = context.get_constant_generic(builder, types.intp, 0)
for dim in range(ndim):
is_neg = builder.icmp_signed('<', shapes[dim], zero)
with cgutils.if_unlikely(builder, is_neg):
context.call_conv.return_user_exc(
builder, ValueError, ("negative dimensions not allowed",)
)
return shapes
def _parse_empty_args(context, builder, sig, args):
"""
Parse the arguments of a np.empty(), np.zeros() or np.ones() call.
"""
arrshapetype = sig.args[0]
arrshape = args[0]
arrtype = sig.return_type
return arrtype, _parse_shape(context, builder, arrshapetype, arrshape)
def _parse_empty_like_args(context, builder, sig, args):
"""
Parse the arguments of a np.empty_like(), np.zeros_like() or
np.ones_like() call.
"""
arytype = sig.args[0]
if isinstance(arytype, types.Array):
ary = make_array(arytype)(context, builder, value=args[0])
shapes = cgutils.unpack_tuple(builder, ary.shape, count=arytype.ndim)
return sig.return_type, shapes
else:
return sig.return_type, ()
@lower_builtin(np.empty, types.Any)
@lower_builtin(np.empty, types.Any, types.Any)
def numpy_empty_nd(context, builder, sig, args):
arrtype, shapes = _parse_empty_args(context, builder, sig, args)
ary = _empty_nd_impl(context, builder, arrtype, shapes)
return impl_ret_new_ref(context, builder, sig.return_type, ary._getvalue())
@lower_builtin(np.empty_like, types.Any)
@lower_builtin(np.empty_like, types.Any, types.DTypeSpec)
@lower_builtin(np.empty_like, types.Any, types.StringLiteral)
def numpy_empty_like_nd(context, builder, sig, args):
arrtype, shapes = _parse_empty_like_args(context, builder, sig, args)
ary = _empty_nd_impl(context, builder, arrtype, shapes)
return impl_ret_new_ref(context, builder, sig.return_type, ary._getvalue())
@lower_builtin(np.zeros, types.Any)
@lower_builtin(np.zeros, types.Any, types.Any)
def numpy_zeros_nd(context, builder, sig, args):
arrtype, shapes = _parse_empty_args(context, builder, sig, args)
ary = _empty_nd_impl(context, builder, arrtype, shapes)
_zero_fill_array(context, builder, ary)
return impl_ret_new_ref(context, builder, sig.return_type, ary._getvalue())
@lower_builtin(np.zeros_like, types.Any)
@lower_builtin(np.zeros_like, types.Any, types.DTypeSpec)
@lower_builtin(np.zeros_like, types.Any, types.StringLiteral)
def numpy_zeros_like_nd(context, builder, sig, args):
arrtype, shapes = _parse_empty_like_args(context, builder, sig, args)
ary = _empty_nd_impl(context, builder, arrtype, shapes)
_zero_fill_array(context, builder, ary)
return impl_ret_new_ref(context, builder, sig.return_type, ary._getvalue())
@lower_builtin(np.full, types.Any, types.Any)
def numpy_full_nd(context, builder, sig, args):
def full(shape, value):
arr = np.empty(shape, type(value))
for idx in np.ndindex(arr.shape):
arr[idx] = value
return arr
res = context.compile_internal(builder, full, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@lower_builtin(np.full, types.Any, types.Any, types.DTypeSpec)
@lower_builtin(np.full, types.Any, types.Any, types.StringLiteral)
def numpy_full_dtype_nd(context, builder, sig, args):
def full(shape, value, dtype):
arr = np.empty(shape, dtype)
for idx in np.ndindex(arr.shape):
arr[idx] = value
return arr
res = context.compile_internal(builder, full, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@lower_builtin(np.full_like, types.Any, types.Any)
def numpy_full_like_nd(context, builder, sig, args):
def full_like(arr, value):
arr = np.empty_like(arr)
for idx in np.ndindex(arr.shape):
arr[idx] = value
return arr
res = context.compile_internal(builder, full_like, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@lower_builtin(np.full_like, types.Any, types.Any, types.DTypeSpec)
@lower_builtin(np.full_like, types.Any, types.Any, types.StringLiteral)
def numpy_full_like_nd_type_spec(context, builder, sig, args):
def full_like(arr, value, dtype):
arr = np.empty_like(arr, dtype)
for idx in np.ndindex(arr.shape):
arr[idx] = value
return arr
res = context.compile_internal(builder, full_like, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@lower_builtin(np.ones, types.Any)
def numpy_ones_nd(context, builder, sig, args):
def ones(shape):
arr = np.empty(shape)
for idx in np.ndindex(arr.shape):
arr[idx] = 1
return arr
valty = sig.return_type.dtype
res = context.compile_internal(builder, ones, sig, args,
locals={'c': valty})
return impl_ret_new_ref(context, builder, sig.return_type, res)
@lower_builtin(np.ones, types.Any, types.DTypeSpec)
@lower_builtin(np.ones, types.Any, types.StringLiteral)
def numpy_ones_dtype_nd(context, builder, sig, args):
def ones(shape, dtype):
arr = np.empty(shape, dtype)
for idx in np.ndindex(arr.shape):
arr[idx] = 1
return arr
res = context.compile_internal(builder, ones, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@lower_builtin(np.ones_like, types.Any)
def numpy_ones_like_nd(context, builder, sig, args):
def ones_like(arr):
arr = np.empty_like(arr)
for idx in np.ndindex(arr.shape):
arr[idx] = 1
return arr
res = context.compile_internal(builder, ones_like, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@lower_builtin(np.ones_like, types.Any, types.DTypeSpec)
@lower_builtin(np.ones_like, types.Any, types.StringLiteral)
def numpy_ones_like_dtype_nd(context, builder, sig, args):
def ones_like(arr, dtype):
arr = np.empty_like(arr, dtype)
for idx in np.ndindex(arr.shape):
arr[idx] = 1
return arr
res = context.compile_internal(builder, ones_like, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@lower_builtin(np.identity, types.Integer)
def numpy_identity(context, builder, sig, args):
def identity(n):
arr = np.zeros((n, n))
for i in range(n):
arr[i, i] = 1
return arr
res = context.compile_internal(builder, identity, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@lower_builtin(np.identity, types.Integer, types.DTypeSpec)
@lower_builtin(np.identity, types.Integer, types.StringLiteral)
def numpy_identity_type_spec(context, builder, sig, args):
def identity(n, dtype):
arr = np.zeros((n, n), dtype)
for i in range(n):
arr[i, i] = 1
return arr
res = context.compile_internal(builder, identity, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
def _eye_none_handler(N, M):
pass
@extending.overload(_eye_none_handler)
def _eye_none_handler_impl(N, M):
if isinstance(M, types.NoneType):
def impl(N, M):
return N
else:
def impl(N, M):
return M
return impl
@extending.overload(np.eye)
def numpy_eye(N, M=None, k=0, dtype=float):
if dtype is None or isinstance(dtype, types.NoneType):
dt = np.dtype(float)
elif isinstance(dtype, (types.DTypeSpec, types.Number)):
# dtype or instance of dtype
dt = as_dtype(getattr(dtype, 'dtype', dtype))
else:
dt = np.dtype(dtype)
def impl(N, M=None, k=0, dtype=float):
_M = _eye_none_handler(N, M)
arr = np.zeros((N, _M), dt)
if k >= 0:
d = min(N, _M - k)
for i in range(d):
arr[i, i + k] = 1
else:
d = min(N + k, _M)
for i in range(d):
arr[i - k, i] = 1
return arr
return impl
@lower_builtin(np.diag, types.Array)
def numpy_diag(context, builder, sig, args):
def diag_impl(val):
return np.diag(val, k=0)
return context.compile_internal(builder, diag_impl, sig, args)
@lower_builtin(np.diag, types.Array, types.Integer)
def numpy_diag_kwarg(context, builder, sig, args):
arg = sig.args[0]
if arg.ndim == 1:
# vector context
def diag_impl(arr, k=0):
s = arr.shape
n = s[0] + abs(k)
ret = np.zeros((n, n), arr.dtype)
if k >= 0:
for i in range(n - k):
ret[i, k + i] = arr[i]
else:
for i in range(n + k):
ret[i - k, i] = arr[i]
return ret
elif arg.ndim == 2:
# matrix context
def diag_impl(arr, k=0):
# Will return arr.diagonal(v, k) when axis args are supported
rows, cols = arr.shape
if k < 0:
rows = rows + k
if k > 0:
cols = cols - k
n = max(min(rows, cols), 0)
ret = np.empty(n, arr.dtype)
if k >= 0:
for i in range(n):
ret[i] = arr[i, k + i]
else:
for i in range(n):
ret[i] = arr[i - k, i]
return ret
else:
# invalid input
raise ValueError("Input must be 1- or 2-d.")
res = context.compile_internal(builder, diag_impl, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@lower_builtin(np.take, types.Array, types.Integer)
@lower_builtin('array.take', types.Array, types.Integer)
def numpy_take_1(context, builder, sig, args):
def take_impl(a, indices):
if indices > (a.size - 1) or indices < -a.size:
raise IndexError("Index out of bounds")
return a.ravel()[np.int(indices)]
res = context.compile_internal(builder, take_impl, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@lower_builtin('array.take', types.Array, types.Array)
@lower_builtin(np.take, types.Array, types.Array)
def numpy_take_2(context, builder, sig, args):
F_order = sig.args[1].layout == 'F'
def take_impl(a, indices):
ret = np.empty(indices.size, dtype=a.dtype)
if F_order:
walker = indices.copy() # get C order
else:
walker = indices
it = np.nditer(walker)
i = 0
flat = a.ravel()
for x in it:
if x > (a.size - 1) or x < -a.size:
raise IndexError("Index out of bounds")
ret[i] = flat[x]
i = i + 1
return ret.reshape(indices.shape)
res = context.compile_internal(builder, take_impl, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@lower_builtin('array.take', types.Array, types.List)
@lower_builtin(np.take, types.Array, types.List)
@lower_builtin('array.take', types.Array, types.BaseTuple)
@lower_builtin(np.take, types.Array, types.BaseTuple)
def numpy_take_3(context, builder, sig, args):
def take_impl(a, indices):
convert = np.array(indices)
ret = np.empty(convert.size, dtype=a.dtype)
it = np.nditer(convert)
i = 0
flat = a.ravel()
for x in it:
if x > (a.size - 1) or x < -a.size:
raise IndexError("Index out of bounds")
ret[i] = flat[x]
i = i + 1
return ret.reshape(convert.shape)
res = context.compile_internal(builder, take_impl, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
def _arange_dtype(*args):
bounds = [a for a in args if not isinstance(a, types.NoneType)]
if any(isinstance(a, types.Complex) for a in bounds):
dtype = types.complex128
elif any(isinstance(a, types.Float) for a in bounds):
dtype = types.float64
else:
# numerous attempts were made at guessing this type from the NumPy
# source but it turns out on running `np.arange(10).dtype` on pretty
# much all platform and python combinations that it matched np.int?!
# Windows 64 is broken by default here because Numba (as of 0.47) does
# not differentiate between Python and NumPy integers, so a `typeof(1)`
# on w64 is `int64`, i.e. `intp`. This means an arange(<some int>) will
# be typed as arange(int64) and the following will yield int64 opposed
# to int32. Example: without a load of analysis to work out of the args
# were wrapped in NumPy int*() calls it's not possible to detect the
# difference between `np.arange(10)` and `np.arange(np.int64(10)`.
NPY_TY = getattr(types, "int%s" % (8 * np.dtype(np.int).itemsize))
dtype = max(bounds + [NPY_TY,])
return dtype
@overload(np.arange)
def np_arange(start, stop=None, step=None, dtype=None):
if isinstance(stop, types.Optional):
stop = stop.type
if isinstance(step, types.Optional):
step = step.type
if isinstance(dtype, types.Optional):
dtype = dtype.type
if stop is None:
stop = types.none
if step is None:
step = types.none
if dtype is None:
dtype = types.none
if (not isinstance(start, types.Number) or
not isinstance(stop, (types.NoneType, types.Number)) or
not isinstance(step, (types.NoneType, types.Number)) or
not isinstance(dtype, (types.NoneType, types.DTypeSpec))):
return
if isinstance(dtype, types.NoneType):
true_dtype = _arange_dtype(start, stop, step)
else:
true_dtype = dtype.dtype
use_complex = any([isinstance(x, types.Complex)
for x in (start, stop, step)])
start_value = getattr(start, "literal_value", None)
stop_value = getattr(stop, "literal_value", None)
step_value = getattr(step, "literal_value", None)
def impl(start, stop=None, step=None, dtype=None):
# Allow for improved performance if given literal arguments.
lit_start = start_value if start_value is not None else start
lit_stop = stop_value if stop_value is not None else stop
lit_step = step_value if step_value is not None else step
_step = lit_step if lit_step is not None else 1
if lit_stop is None:
_start, _stop = 0, lit_start
else:
_start, _stop = lit_start, lit_stop
if _step == 0:
raise ValueError("Maximum allowed size exceeded")
nitems_c = (_stop - _start) / _step
nitems_r = int(math.ceil(nitems_c.real))
# Binary operator needed for compiler branch pruning.
if use_complex is True:
nitems_i = int(math.ceil(nitems_c.imag))
nitems = max(min(nitems_i, nitems_r), 0)
else:
nitems = max(nitems_r, 0)
arr = np.empty(nitems, true_dtype)
val = _start
for i in range(nitems):
arr[i] = val
val += _step
return arr
return impl
@lower_builtin(np.linspace, types.Number, types.Number)
def numpy_linspace_2(context, builder, sig, args):
def linspace(start, stop):
return np.linspace(start, stop, 50)
res = context.compile_internal(builder, linspace, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@lower_builtin(np.linspace, types.Number, types.Number,
types.Integer)
def numpy_linspace_3(context, builder, sig, args):
dtype = as_dtype(sig.return_type.dtype)
def linspace(start, stop, num):
arr = np.empty(num, dtype)
if num == 0:
return arr
div = num - 1
delta = stop - start
arr[0] = start
for i in range(1, num):
arr[i] = start + delta * (i / div)
return arr
res = context.compile_internal(builder, linspace, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
def _array_copy(context, builder, sig, args):
"""
Array copy.
"""
arytype = sig.args[0]
ary = make_array(arytype)(context, builder, value=args[0])
shapes = cgutils.unpack_tuple(builder, ary.shape)
rettype = sig.return_type
ret = _empty_nd_impl(context, builder, rettype, shapes)
src_data = ary.data
dest_data = ret.data
assert rettype.layout in "CF"
if arytype.layout == rettype.layout:
# Fast path: memcpy
cgutils.raw_memcpy(builder, dest_data, src_data, ary.nitems,
ary.itemsize, align=1)
else:
src_strides = cgutils.unpack_tuple(builder, ary.strides)
dest_strides = cgutils.unpack_tuple(builder, ret.strides)
intp_t = context.get_value_type(types.intp)
with cgutils.loop_nest(builder, shapes, intp_t) as indices:
src_ptr = cgutils.get_item_pointer2(context, builder, src_data,
shapes, src_strides,
arytype.layout, indices)
dest_ptr = cgutils.get_item_pointer2(context, builder, dest_data,
shapes, dest_strides,
rettype.layout, indices)
builder.store(builder.load(src_ptr), dest_ptr)
return impl_ret_new_ref(context, builder, sig.return_type, ret._getvalue())
@lower_builtin("array.copy", types.Array)
def array_copy(context, builder, sig, args):
return _array_copy(context, builder, sig, args)
@lower_builtin(np.copy, types.Array)
def numpy_copy(context, builder, sig, args):
return _array_copy(context, builder, sig, args)
def _as_layout_array(context, builder, sig, args, output_layout):
"""
Common logic for layout conversion function;
e.g. ascontiguousarray and asfortranarray
"""
retty = sig.return_type
aryty = sig.args[0]
assert retty.layout == output_layout, 'return-type has incorrect layout'
if aryty.ndim == 0:
# 0-dim input => asfortranarray() returns a 1-dim array
assert retty.ndim == 1
ary = make_array(aryty)(context, builder, value=args[0])
ret = make_array(retty)(context, builder)
shape = context.get_constant_generic(
builder, types.UniTuple(types.intp, 1), (1,),
)
strides = context.make_tuple(builder,
types.UniTuple(types.intp, 1),
(ary.itemsize,))
populate_array(ret, ary.data, shape, strides, ary.itemsize,
ary.meminfo, ary.parent)
return impl_ret_borrowed(context, builder, retty, ret._getvalue())
elif (retty.layout == aryty.layout
or (aryty.ndim == 1 and aryty.layout in 'CF')):
# 1-dim contiguous input => return the same array
return impl_ret_borrowed(context, builder, retty, args[0])
else:
if aryty.layout == 'A':
# There's still chance the array is in contiguous layout,
# just that we don't know at compile time.
# We can do a runtime check.
# Prepare and call is_contiguous or is_fortran
assert output_layout in 'CF'
check_func = is_contiguous if output_layout == 'C' else is_fortran
is_contig = _call_contiguous_check(check_func,
context,
builder,
aryty,
args[0])
with builder.if_else(is_contig) as (then, orelse):
# If the array is already contiguous, just return it
with then:
out_then = impl_ret_borrowed(context, builder, retty,
args[0])
then_blk = builder.block
# Otherwise, copy to a new contiguous region
with orelse:
out_orelse = _array_copy(context, builder, sig, args)
orelse_blk = builder.block
# Phi node for the return value
ret_phi = builder.phi(out_then.type)
ret_phi.add_incoming(out_then, then_blk)
ret_phi.add_incoming(out_orelse, orelse_blk)
return ret_phi
else:
# Return a copy with the right layout
return _array_copy(context, builder, sig, args)
@lower_builtin(np.asfortranarray, types.Array)
def array_asfortranarray(context, builder, sig, args):
return _as_layout_array(context, builder, sig, args, output_layout='F')
@lower_builtin(np.ascontiguousarray, types.Array)
def array_ascontiguousarray(context, builder, sig, args):
return _as_layout_array(context, builder, sig, args, output_layout='C')
@lower_builtin("array.astype", types.Array, types.DTypeSpec)
def array_astype(context, builder, sig, args):
arytype = sig.args[0]
ary = make_array(arytype)(context, builder, value=args[0])
shapes = cgutils.unpack_tuple(builder, ary.shape)
rettype = sig.return_type
ret = _empty_nd_impl(context, builder, rettype, shapes)
src_data = ary.data
dest_data = ret.data
src_strides = cgutils.unpack_tuple(builder, ary.strides)
dest_strides = cgutils.unpack_tuple(builder, ret.strides)
intp_t = context.get_value_type(types.intp)
with cgutils.loop_nest(builder, shapes, intp_t) as indices:
src_ptr = cgutils.get_item_pointer2(context, builder, src_data,
shapes, src_strides,
arytype.layout, indices)
dest_ptr = cgutils.get_item_pointer2(context, builder, dest_data,
shapes, dest_strides,
rettype.layout, indices)
item = load_item(context, builder, arytype, src_ptr)
item = context.cast(builder, item, arytype.dtype, rettype.dtype)
store_item(context, builder, rettype, item, dest_ptr)
return impl_ret_new_ref(context, builder, sig.return_type, ret._getvalue())
@lower_builtin(np.frombuffer, types.Buffer)
@lower_builtin(np.frombuffer, types.Buffer, types.DTypeSpec)
@lower_builtin(np.frombuffer, types.Buffer, types.StringLiteral)
def np_frombuffer(context, builder, sig, args):
bufty = sig.args[0]
aryty = sig.return_type
buf = make_array(bufty)(context, builder, value=args[0])
out_ary_ty = make_array(aryty)
out_ary = out_ary_ty(context, builder)
out_datamodel = out_ary._datamodel
itemsize = get_itemsize(context, aryty)
ll_itemsize = lc.Constant.int(buf.itemsize.type, itemsize)
nbytes = builder.mul(buf.nitems, buf.itemsize)
# Check that the buffer size is compatible
rem = builder.srem(nbytes, ll_itemsize)
is_incompatible = cgutils.is_not_null(builder, rem)
with builder.if_then(is_incompatible, likely=False):
msg = "buffer size must be a multiple of element size"
context.call_conv.return_user_exc(builder, ValueError, (msg,))
shape = cgutils.pack_array(builder, [builder.sdiv(nbytes, ll_itemsize)])
strides = cgutils.pack_array(builder, [ll_itemsize])
data = builder.bitcast(
buf.data, context.get_value_type(out_datamodel.get_type('data'))
)
populate_array(out_ary,
data=data,
shape=shape,
strides=strides,
itemsize=ll_itemsize,
meminfo=buf.meminfo,
parent=buf.parent,)
res = out_ary._getvalue()
return impl_ret_borrowed(context, builder, sig.return_type, res)
@lower_builtin(carray, types.Any, types.Any)
@lower_builtin(carray, types.Any, types.Any, types.DTypeSpec)
@lower_builtin(farray, types.Any, types.Any)
@lower_builtin(farray, types.Any, types.Any, types.DTypeSpec)
def np_cfarray(context, builder, sig, args):
"""
numba.numpy_support.carray(...) and
numba.numpy_support.farray(...).
"""
ptrty, shapety = sig.args[:2]
ptr, shape = args[:2]
aryty = sig.return_type
assert aryty.layout in 'CF'
out_ary = make_array(aryty)(context, builder)
itemsize = get_itemsize(context, aryty)
ll_itemsize = cgutils.intp_t(itemsize)
if isinstance(shapety, types.BaseTuple):
shapes = cgutils.unpack_tuple(builder, shape)
else:
shapety = (shapety,)
shapes = (shape,)
shapes = [context.cast(builder, value, fromty, types.intp)
for fromty, value in zip(shapety, shapes)]
off = ll_itemsize
strides = []
if aryty.layout == 'F':
for s in shapes:
strides.append(off)
off = builder.mul(off, s)
else:
for s in reversed(shapes):
strides.append(off)
off = builder.mul(off, s)
strides.reverse()
data = builder.bitcast(ptr,
context.get_data_type(aryty.dtype).as_pointer())
populate_array(out_ary,
data=data,
shape=shapes,
strides=strides,
itemsize=ll_itemsize,
# Array is not memory-managed
meminfo=None,
)
res = out_ary._getvalue()
return impl_ret_new_ref(context, builder, sig.return_type, res)
def _get_seq_size(context, builder, seqty, seq):
if isinstance(seqty, types.BaseTuple):
return context.get_constant(types.intp, len(seqty))
elif isinstance(seqty, types.Sequence):
len_impl = context.get_function(len, signature(types.intp, seqty,))
return len_impl(builder, (seq,))
else:
assert 0
def _get_borrowing_getitem(context, seqty):
"""
Return a getitem() implementation that doesn't incref its result.
"""
retty = seqty.dtype
getitem_impl = context.get_function(operator.getitem,
signature(retty, seqty, types.intp))
def wrap(builder, args):
ret = getitem_impl(builder, args)
if context.enable_nrt:
context.nrt.decref(builder, retty, ret)
return ret
return wrap
def compute_sequence_shape(context, builder, ndim, seqty, seq):
"""
Compute the likely shape of a nested sequence (possibly 0d).
"""
intp_t = context.get_value_type(types.intp)
zero = Constant.int(intp_t, 0)
def get_first_item(seqty, seq):
if isinstance(seqty, types.BaseTuple):
if len(seqty) == 0:
return None, None
else:
return seqty[0], builder.extract_value(seq, 0)
else:
getitem_impl = _get_borrowing_getitem(context, seqty)
return seqty.dtype, getitem_impl(builder, (seq, zero))
# Compute shape by traversing the first element of each nested
# sequence
shapes = []
innerty, inner = seqty, seq
for i in range(ndim):
if i > 0:
innerty, inner = get_first_item(innerty, inner)
shapes.append(_get_seq_size(context, builder, innerty, inner))
return tuple(shapes)
def check_sequence_shape(context, builder, seqty, seq, shapes):
"""
Check the nested sequence matches the given *shapes*.
"""
def _fail():
context.call_conv.return_user_exc(builder, ValueError,
("incompatible sequence shape",))
def check_seq_size(seqty, seq, shapes):
if len(shapes) == 0:
return
size = _get_seq_size(context, builder, seqty, seq)
expected = shapes[0]
mismatch = builder.icmp_signed('!=', size, expected)
with builder.if_then(mismatch, likely=False):
_fail()
if len(shapes) == 1:
return
if isinstance(seqty, types.Sequence):
getitem_impl = _get_borrowing_getitem(context, seqty)
with cgutils.for_range(builder, size) as loop:
innerty = seqty.dtype
inner = getitem_impl(builder, (seq, loop.index))
check_seq_size(innerty, inner, shapes[1:])
elif isinstance(seqty, types.BaseTuple):
for i in range(len(seqty)):
innerty = seqty[i]
inner = builder.extract_value(seq, i)
check_seq_size(innerty, inner, shapes[1:])
else:
assert 0, seqty
check_seq_size(seqty, seq, shapes)
def assign_sequence_to_array(context, builder, data, shapes, strides,
arrty, seqty, seq):
"""
Assign a nested sequence contents to an array. The shape must match
the sequence's structure.
"""
def assign_item(indices, valty, val):
ptr = cgutils.get_item_pointer2(context, builder, data, shapes, strides,
arrty.layout, indices, wraparound=False)
val = context.cast(builder, val, valty, arrty.dtype)
store_item(context, builder, arrty, val, ptr)
def assign(seqty, seq, shapes, indices):
if len(shapes) == 0:
assert not isinstance(seqty, (types.Sequence, types.BaseTuple))
assign_item(indices, seqty, seq)
return
size = shapes[0]
if isinstance(seqty, types.Sequence):
getitem_impl = _get_borrowing_getitem(context, seqty)
with cgutils.for_range(builder, size) as loop:
innerty = seqty.dtype
inner = getitem_impl(builder, (seq, loop.index))
assign(innerty, inner, shapes[1:], indices + (loop.index,))
elif isinstance(seqty, types.BaseTuple):
for i in range(len(seqty)):
innerty = seqty[i]
inner = builder.extract_value(seq, i)
index = context.get_constant(types.intp, i)
assign(innerty, inner, shapes[1:], indices + (index,))
else:
assert 0, seqty
assign(seqty, seq, shapes, ())
@lower_builtin(np.array, types.Any)
@lower_builtin(np.array, types.Any, types.DTypeSpec)
@lower_builtin(np.array, types.Any, types.StringLiteral)
def np_array(context, builder, sig, args):
arrty = sig.return_type
ndim = arrty.ndim
seqty = sig.args[0]
seq = args[0]
shapes = compute_sequence_shape(context, builder, ndim, seqty, seq)
assert len(shapes) == ndim
check_sequence_shape(context, builder, seqty, seq, shapes)
arr = _empty_nd_impl(context, builder, arrty, shapes)
assign_sequence_to_array(context, builder, arr.data, shapes, arr.strides,
arrty, seqty, seq)
return impl_ret_new_ref(context, builder, sig.return_type, arr._getvalue())
def _normalize_axis(context, builder, func_name, ndim, axis):
zero = axis.type(0)
ll_ndim = axis.type(ndim)
# Normalize negative axis
is_neg_axis = builder.icmp_signed('<', axis, zero)
axis = builder.select(is_neg_axis, builder.add(axis, ll_ndim), axis)
# Check axis for bounds
axis_out_of_bounds = builder.or_(
builder.icmp_signed('<', axis, zero),
builder.icmp_signed('>=', axis, ll_ndim))
with builder.if_then(axis_out_of_bounds, likely=False):
msg = "%s(): axis out of bounds" % func_name
context.call_conv.return_user_exc(builder, IndexError, (msg,))
return axis
def _insert_axis_in_shape(context, builder, orig_shape, ndim, axis):
"""
Compute shape with the new axis inserted
e.g. given original shape (2, 3, 4) and axis=2,
the returned new shape is (2, 3, 1, 4).
"""
assert len(orig_shape) == ndim - 1
ll_shty = ir.ArrayType(cgutils.intp_t, ndim)
shapes = cgutils.alloca_once(builder, ll_shty)
one = cgutils.intp_t(1)
# 1. copy original sizes at appropriate places
for dim in range(ndim - 1):
ll_dim = cgutils.intp_t(dim)
after_axis = builder.icmp_signed('>=', ll_dim, axis)
sh = orig_shape[dim]
idx = builder.select(after_axis,
builder.add(ll_dim, one),
ll_dim)
builder.store(sh, cgutils.gep_inbounds(builder, shapes, 0, idx))
# 2. insert new size (1) at axis dimension
builder.store(one, cgutils.gep_inbounds(builder, shapes, 0, axis))
return cgutils.unpack_tuple(builder, builder.load(shapes))
def _insert_axis_in_strides(context, builder, orig_strides, ndim, axis):
"""
Same as _insert_axis_in_shape(), but with a strides array.
"""
assert len(orig_strides) == ndim - 1
ll_shty = ir.ArrayType(cgutils.intp_t, ndim)
strides = cgutils.alloca_once(builder, ll_shty)
one = cgutils.intp_t(1)
zero = cgutils.intp_t(0)
# 1. copy original strides at appropriate places
for dim in range(ndim - 1):
ll_dim = cgutils.intp_t(dim)
after_axis = builder.icmp_signed('>=', ll_dim, axis)
idx = builder.select(after_axis,
builder.add(ll_dim, one),
ll_dim)
builder.store(orig_strides[dim],
cgutils.gep_inbounds(builder, strides, 0, idx))
# 2. insert new stride at axis dimension
# (the value is indifferent for a 1-sized dimension, we use 0)
builder.store(zero, cgutils.gep_inbounds(builder, strides, 0, axis))
return cgutils.unpack_tuple(builder, builder.load(strides))
def expand_dims(context, builder, sig, args, axis):
"""
np.expand_dims() with the given axis.
"""
retty = sig.return_type
ndim = retty.ndim
arrty = sig.args[0]
arr = make_array(arrty)(context, builder, value=args[0])
ret = make_array(retty)(context, builder)
shapes = cgutils.unpack_tuple(builder, arr.shape)
strides = cgutils.unpack_tuple(builder, arr.strides)
new_shapes = _insert_axis_in_shape(context, builder, shapes, ndim, axis)
new_strides = _insert_axis_in_strides(context, builder, strides, ndim, axis)
populate_array(ret,
data=arr.data,
shape=new_shapes,
strides=new_strides,
itemsize=arr.itemsize,
meminfo=arr.meminfo,
parent=arr.parent)
return ret._getvalue()
@lower_builtin(np.expand_dims, types.Array, types.Integer)
def np_expand_dims(context, builder, sig, args):
axis = context.cast(builder, args[1], sig.args[1], types.intp)
axis = _normalize_axis(context, builder, "np.expand_dims",
sig.return_type.ndim, axis)
ret = expand_dims(context, builder, sig, args, axis)
return impl_ret_borrowed(context, builder, sig.return_type, ret)
def _atleast_nd(context, builder, sig, args, transform):
arrtys = sig.args
arrs = args
if isinstance(sig.return_type, types.BaseTuple):
rettys = list(sig.return_type)
else:
rettys = [sig.return_type]
assert len(rettys) == len(arrtys)
rets = [transform(context, builder, arr, arrty, retty)
for arr, arrty, retty in zip(arrs, arrtys, rettys)]
if isinstance(sig.return_type, types.BaseTuple):
ret = context.make_tuple(builder, sig.return_type, rets)
else:
ret = rets[0]
return impl_ret_borrowed(context, builder, sig.return_type, ret)
def _atleast_nd_transform(min_ndim, axes):
"""
Return a callback successively inserting 1-sized dimensions at the
following axes.
"""
assert min_ndim == len(axes)
def transform(context, builder, arr, arrty, retty):
for i in range(min_ndim):
ndim = i + 1
if arrty.ndim < ndim:
axis = cgutils.intp_t(axes[i])
newarrty = arrty.copy(ndim=arrty.ndim + 1)
arr = expand_dims(context, builder,
typing.signature(newarrty, arrty), (arr,),
axis)
arrty = newarrty
return arr
return transform
@lower_builtin(np.atleast_1d, types.VarArg(types.Array))
def np_atleast_1d(context, builder, sig, args):
transform = _atleast_nd_transform(1, [0])
return _atleast_nd(context, builder, sig, args, transform)
@lower_builtin(np.atleast_2d, types.VarArg(types.Array))
def np_atleast_2d(context, builder, sig, args):
transform = _atleast_nd_transform(2, [0, 0])
return _atleast_nd(context, builder, sig, args, transform)
@lower_builtin(np.atleast_3d, types.VarArg(types.Array))
def np_atleast_3d(context, builder, sig, args):
transform = _atleast_nd_transform(3, [0, 0, 2])
return _atleast_nd(context, builder, sig, args, transform)
def _do_concatenate(context, builder, axis,
arrtys, arrs, arr_shapes, arr_strides,
retty, ret_shapes):
"""
Concatenate arrays along the given axis.
"""
assert len(arrtys) == len(arrs) == len(arr_shapes) == len(arr_strides)
zero = cgutils.intp_t(0)
# Allocate return array
ret = _empty_nd_impl(context, builder, retty, ret_shapes)
ret_strides = cgutils.unpack_tuple(builder, ret.strides)
# Compute the offset by which to bump the destination pointer
# after copying each input array.
# Morally, we need to copy each input array at different start indices
# into the destination array; bumping the destination pointer
# is simply easier than offsetting all destination indices.
copy_offsets = []
for arr_sh in arr_shapes:
# offset = ret_strides[axis] * input_shape[axis]
offset = zero
for dim, (size, stride) in enumerate(zip(arr_sh, ret_strides)):
is_axis = builder.icmp_signed('==', axis.type(dim), axis)
addend = builder.mul(size, stride)
offset = builder.select(is_axis,
builder.add(offset, addend),
offset)
copy_offsets.append(offset)
# Copy input arrays into the return array
ret_data = ret.data
for arrty, arr, arr_sh, arr_st, offset in zip(arrtys, arrs, arr_shapes,
arr_strides, copy_offsets):
arr_data = arr.data
# Do the copy loop
# Note the loop nesting is optimized for the destination layout
loop_nest = cgutils.loop_nest(builder, arr_sh, cgutils.intp_t,
order=retty.layout)
with loop_nest as indices:
src_ptr = cgutils.get_item_pointer2(context, builder, arr_data,
arr_sh, arr_st,
arrty.layout, indices)
val = load_item(context, builder, arrty, src_ptr)
val = context.cast(builder, val, arrty.dtype, retty.dtype)
dest_ptr = cgutils.get_item_pointer2(context, builder, ret_data,
ret_shapes, ret_strides,
retty.layout, indices)
store_item(context, builder, retty, val, dest_ptr)
# Bump destination pointer
ret_data = cgutils.pointer_add(builder, ret_data, offset)
return ret
def _np_concatenate(context, builder, arrtys, arrs, retty, axis):
ndim = retty.ndim
arrs = [make_array(aty)(context, builder, value=a)
for aty, a in zip(arrtys, arrs)]
axis = _normalize_axis(context, builder, "np.concatenate", ndim, axis)
# Get input shapes
arr_shapes = [cgutils.unpack_tuple(builder, arr.shape) for arr in arrs]
arr_strides = [cgutils.unpack_tuple(builder, arr.strides) for arr in arrs]
# Compute return shape:
# - the dimension for the concatenation axis is summed over all inputs
# - other dimensions must match exactly for each input
ret_shapes = [cgutils.alloca_once_value(builder, sh)
for sh in arr_shapes[0]]
for dim in range(ndim):
is_axis = builder.icmp_signed('==', axis.type(dim), axis)
ret_shape_ptr = ret_shapes[dim]
ret_sh = builder.load(ret_shape_ptr)
other_shapes = [sh[dim] for sh in arr_shapes[1:]]
with builder.if_else(is_axis) as (on_axis, on_other_dim):
with on_axis:
sh = functools.reduce(
builder.add,
other_shapes + [ret_sh])
builder.store(sh, ret_shape_ptr)
with on_other_dim:
is_ok = cgutils.true_bit
for sh in other_shapes:
is_ok = builder.and_(is_ok,
builder.icmp_signed('==', sh, ret_sh))
with builder.if_then(builder.not_(is_ok), likely=False):
context.call_conv.return_user_exc(
builder, ValueError,
("np.concatenate(): input sizes over "
"dimension %d do not match" % dim,))
ret_shapes = [builder.load(sh) for sh in ret_shapes]
ret = _do_concatenate(context, builder, axis,
arrtys, arrs, arr_shapes, arr_strides,
retty, ret_shapes)
return impl_ret_new_ref(context, builder, retty, ret._getvalue())
def _np_stack(context, builder, arrtys, arrs, retty, axis):
ndim = retty.ndim
zero = cgutils.intp_t(0)
one = cgutils.intp_t(1)
ll_narrays = cgutils.intp_t(len(arrs))
arrs = [make_array(aty)(context, builder, value=a)
for aty, a in zip(arrtys, arrs)]
axis = _normalize_axis(context, builder, "np.stack", ndim, axis)
# Check input arrays have the same shape
orig_shape = cgutils.unpack_tuple(builder, arrs[0].shape)
for arr in arrs[1:]:
is_ok = cgutils.true_bit
for sh, orig_sh in zip(cgutils.unpack_tuple(builder, arr.shape),
orig_shape):
is_ok = builder.and_(is_ok, builder.icmp_signed('==', sh, orig_sh))
with builder.if_then(builder.not_(is_ok), likely=False):
context.call_conv.return_user_exc(
builder, ValueError,
("np.stack(): all input arrays must have the same shape",))
orig_strides = [cgutils.unpack_tuple(builder, arr.strides) for arr in arrs]
# Compute input shapes and return shape with the new axis inserted
# e.g. given 5 input arrays of shape (2, 3, 4) and axis=1,
# corrected input shape is (2, 1, 3, 4) and return shape is (2, 5, 3, 4).
ll_shty = ir.ArrayType(cgutils.intp_t, ndim)
input_shapes = cgutils.alloca_once(builder, ll_shty)
ret_shapes = cgutils.alloca_once(builder, ll_shty)
# 1. copy original sizes at appropriate places
for dim in range(ndim - 1):
ll_dim = cgutils.intp_t(dim)
after_axis = builder.icmp_signed('>=', ll_dim, axis)
sh = orig_shape[dim]
idx = builder.select(after_axis,
builder.add(ll_dim, one),
ll_dim)
builder.store(sh, cgutils.gep_inbounds(builder, input_shapes, 0, idx))
builder.store(sh, cgutils.gep_inbounds(builder, ret_shapes, 0, idx))
# 2. insert new size at axis dimension
builder.store(one, cgutils.gep_inbounds(builder, input_shapes, 0, axis))
builder.store(ll_narrays, cgutils.gep_inbounds(builder,
ret_shapes,
0,
axis))
input_shapes = cgutils.unpack_tuple(builder, builder.load(input_shapes))
input_shapes = [input_shapes] * len(arrs)
ret_shapes = cgutils.unpack_tuple(builder, builder.load(ret_shapes))
# Compute input strides for each array with the new axis inserted
input_strides = [cgutils.alloca_once(builder, ll_shty)
for i in range(len(arrs))]
# 1. copy original strides at appropriate places
for dim in range(ndim - 1):
ll_dim = cgutils.intp_t(dim)
after_axis = builder.icmp_signed('>=', ll_dim, axis)
idx = builder.select(after_axis,
builder.add(ll_dim, one),
ll_dim)
for i in range(len(arrs)):
builder.store(orig_strides[i][dim],
cgutils.gep_inbounds(builder, input_strides[i], 0,
idx))
# 2. insert new stride at axis dimension
# (the value is indifferent for a 1-sized dimension, we put 0)
for i in range(len(arrs)):
builder.store(zero, cgutils.gep_inbounds(builder, input_strides[i], 0,
axis))
input_strides = [cgutils.unpack_tuple(builder, builder.load(st))
for st in input_strides]
# Create concatenated array
ret = _do_concatenate(context, builder, axis,
arrtys, arrs, input_shapes, input_strides,
retty, ret_shapes)
return impl_ret_new_ref(context, builder, retty, ret._getvalue())
@lower_builtin(np.concatenate, types.BaseTuple)
def np_concatenate(context, builder, sig, args):
axis = context.get_constant(types.intp, 0)
return _np_concatenate(context, builder,
list(sig.args[0]),
cgutils.unpack_tuple(builder, args[0]),
sig.return_type,
axis)
@lower_builtin(np.concatenate, types.BaseTuple, types.Integer)
def np_concatenate_axis(context, builder, sig, args):
axis = context.cast(builder, args[1], sig.args[1], types.intp)
return _np_concatenate(context, builder,
list(sig.args[0]),
cgutils.unpack_tuple(builder, args[0]),
sig.return_type,
axis)
@lower_builtin(np.column_stack, types.BaseTuple)
def np_column_stack(context, builder, sig, args):
orig_arrtys = list(sig.args[0])
orig_arrs = cgutils.unpack_tuple(builder, args[0])
arrtys = []
arrs = []
axis = context.get_constant(types.intp, 1)
for arrty, arr in zip(orig_arrtys, orig_arrs):
if arrty.ndim == 2:
arrtys.append(arrty)
arrs.append(arr)
else:
# Convert 1d array to 2d column array: np.expand_dims(a, 1)
assert arrty.ndim == 1
newty = arrty.copy(ndim=2)
expand_sig = typing.signature(newty, arrty)
newarr = expand_dims(context, builder, expand_sig, (arr,), axis)
arrtys.append(newty)
arrs.append(newarr)
return _np_concatenate(context, builder, arrtys, arrs,
sig.return_type, axis)
def _np_stack_common(context, builder, sig, args, axis):
"""
np.stack() with the given axis value.
"""
return _np_stack(context, builder,
list(sig.args[0]),
cgutils.unpack_tuple(builder, args[0]),
sig.return_type,
axis)
@lower_builtin(np.stack, types.BaseTuple)
def np_stack(context, builder, sig, args):
axis = context.get_constant(types.intp, 0)
return _np_stack_common(context, builder, sig, args, axis)
@lower_builtin(np.stack, types.BaseTuple, types.Integer)
def np_stack_axis(context, builder, sig, args):
axis = context.cast(builder, args[1], sig.args[1], types.intp)
return _np_stack_common(context, builder, sig, args, axis)
@lower_builtin(np.hstack, types.BaseTuple)
def np_hstack(context, builder, sig, args):
tupty = sig.args[0]
ndim = tupty[0].ndim
if ndim == 0:
# hstack() on 0-d arrays returns a 1-d array
axis = context.get_constant(types.intp, 0)
return _np_stack_common(context, builder, sig, args, axis)
else:
# As a special case, dimension 0 of 1-dimensional arrays is "horizontal"
axis = 0 if ndim == 1 else 1
def np_hstack_impl(arrays):
return np.concatenate(arrays, axis=axis)
return context.compile_internal(builder, np_hstack_impl, sig, args)
@lower_builtin(np.vstack, types.BaseTuple)
def np_vstack(context, builder, sig, args):
tupty = sig.args[0]
ndim = tupty[0].ndim
if ndim == 0:
def np_vstack_impl(arrays):
return np.expand_dims(np.hstack(arrays), 1)
elif ndim == 1:
# np.stack(arrays, axis=0)
axis = context.get_constant(types.intp, 0)
return _np_stack_common(context, builder, sig, args, axis)
else:
def np_vstack_impl(arrays):
return np.concatenate(arrays, axis=0)
return context.compile_internal(builder, np_vstack_impl, sig, args)
@lower_builtin(np.dstack, types.BaseTuple)
def np_dstack(context, builder, sig, args):
tupty = sig.args[0]
retty = sig.return_type
ndim = tupty[0].ndim
if ndim == 0:
def np_vstack_impl(arrays):
return np.hstack(arrays).reshape(1, 1, -1)
return context.compile_internal(builder, np_vstack_impl, sig, args)
elif ndim == 1:
# np.expand_dims(np.stack(arrays, axis=1), axis=0)
axis = context.get_constant(types.intp, 1)
stack_retty = retty.copy(ndim=retty.ndim - 1)
stack_sig = typing.signature(stack_retty, *sig.args)
stack_ret = _np_stack_common(context, builder, stack_sig, args, axis)
axis = context.get_constant(types.intp, 0)
expand_sig = typing.signature(retty, stack_retty)
return expand_dims(context, builder, expand_sig, (stack_ret,), axis)
elif ndim == 2:
# np.stack(arrays, axis=2)
axis = context.get_constant(types.intp, 2)
return _np_stack_common(context, builder, sig, args, axis)
else:
def np_vstack_impl(arrays):
return np.concatenate(arrays, axis=2)
return context.compile_internal(builder, np_vstack_impl, sig, args)
@extending.overload_method(types.Array, 'fill')
def arr_fill(arr, val):
def fill_impl(arr, val):
arr[:] = val
return None
return fill_impl
@extending.overload_method(types.Array, 'dot')
def array_dot(arr, other):
def dot_impl(arr, other):
return np.dot(arr, other)
return dot_impl
@overload(np.fliplr)
def np_flip_lr(a):
if not type_can_asarray(a):
raise errors.TypingError("Cannot np.fliplr on %s type" % a)
def impl(a):
A = np.asarray(a)
# this handling is superfluous/dead as < 2d array cannot be indexed as
# present below and so typing fails. If the typing doesn't fail due to
# some future change, this will catch it.
if A.ndim < 2:
raise ValueError('Input must be >= 2-d.')
return A[::, ::-1, ...]
return impl
@overload(np.flipud)
def np_flip_ud(a):
if not type_can_asarray(a):
raise errors.TypingError("Cannot np.flipud on %s type" % a)
def impl(a):
A = np.asarray(a)
# this handling is superfluous/dead as a 0d array cannot be indexed as
# present below and so typing fails. If the typing doesn't fail due to
# some future change, this will catch it.
if A.ndim < 1:
raise ValueError('Input must be >= 1-d.')
return A[::-1, ...]
return impl
@intrinsic
def _build_flip_slice_tuple(tyctx, sz):
""" Creates a tuple of slices for np.flip indexing like
`(slice(None, None, -1),) * sz` """
size = int(sz.literal_value)
tuple_type = types.UniTuple(dtype=types.slice3_type, count=size)
sig = tuple_type(sz)
def codegen(context, builder, signature, args):
def impl(length, empty_tuple):
out = empty_tuple
for i in range(length):
out = tuple_setitem(out, i, slice(None, None, -1))
return out
inner_argtypes = [types.intp, tuple_type]
inner_sig = typing.signature(tuple_type, *inner_argtypes)
ll_idx_type = context.get_value_type(types.intp)
# Allocate an empty tuple
empty_tuple = context.get_constant_undef(tuple_type)
inner_args = [ll_idx_type(size), empty_tuple]
res = context.compile_internal(builder, impl, inner_sig, inner_args)
return res
return sig, codegen
@overload(np.flip)
def np_flip(a):
# a constant value is needed for the tuple slice, types.Array.ndim can
# provide this and so at presnet only type.Array is support
if not isinstance(a, types.Array):
raise errors.TypingError("Cannot np.flip on %s type" % a)
def impl(a):
sl = _build_flip_slice_tuple(a.ndim)
return a[sl]
return impl
@overload(np.array_split)
def np_array_split(ary, indices_or_sections, axis=0):
if isinstance(ary, (types.UniTuple, types.ListType, types.List)):
def impl(ary, indices_or_sections, axis=0):
return np.array_split(
np.asarray(ary),
indices_or_sections,
axis=axis
)
return impl
if isinstance(indices_or_sections, types.Integer):
def impl(ary, indices_or_sections, axis=0):
l, rem = divmod(ary.shape[axis], indices_or_sections)
indices = np.cumsum(np.array(
[l + 1] * rem +
[l] * (indices_or_sections - rem - 1)
))
return np.array_split(ary, indices, axis=axis)
return impl
elif (
isinstance(indices_or_sections, types.IterableType)
and isinstance(
indices_or_sections.iterator_type.yield_type,
types.Integer
)
):
def impl(ary, indices_or_sections, axis=0):
slice_tup = build_full_slice_tuple(ary.ndim)
out = list()
prev = 0
for cur in indices_or_sections:
idx = tuple_setitem(slice_tup, axis, slice(prev, cur))
out.append(ary[idx])
prev = cur
out.append(ary[tuple_setitem(slice_tup, axis, slice(cur, None))])
return out
return impl
elif (
isinstance(indices_or_sections, types.Tuple)
and all(isinstance(t, types.Integer) for t in indices_or_sections.types)
):
def impl(ary, indices_or_sections, axis=0):
slice_tup = build_full_slice_tuple(ary.ndim)
out = list()
prev = 0
for cur in literal_unroll(indices_or_sections):
idx = tuple_setitem(slice_tup, axis, slice(prev, cur))
out.append(ary[idx])
prev = cur
out.append(ary[tuple_setitem(slice_tup, axis, slice(cur, None))])
return out
return impl
@overload(np.split)
def np_split(ary, indices_or_sections, axis=0):
# This is just a wrapper of array_split, but with an extra error if
# indices is an int.
if isinstance(ary, (types.UniTuple, types.ListType, types.List)):
def impl(ary, indices_or_sections, axis=0):
return np.split(np.asarray(ary), indices_or_sections, axis=axis)
return impl
if isinstance(indices_or_sections, types.Integer):
def impl(ary, indices_or_sections, axis=0):
_, rem = divmod(ary.shape[axis], indices_or_sections)
if rem != 0:
raise ValueError(
"array split does not result in an equal division"
)
return np.array_split(
ary, indices_or_sections, axis=axis
)
return impl
else:
return np_array_split(ary, indices_or_sections, axis=axis)
# -----------------------------------------------------------------------------
# Sorting
_sorts = {}
def lt_floats(a, b):
return math.isnan(b) or a < b
def get_sort_func(kind, is_float, is_argsort=False):
"""
Get a sort implementation of the given kind.
"""
key = kind, is_float, is_argsort
try:
return _sorts[key]
except KeyError:
if kind == 'quicksort':
sort = quicksort.make_jit_quicksort(
lt=lt_floats if is_float else None,
is_argsort=is_argsort)
func = sort.run_quicksort
elif kind == 'mergesort':
sort = mergesort.make_jit_mergesort(
lt=lt_floats if is_float else None,
is_argsort=is_argsort)
func = sort.run_mergesort
_sorts[key] = func
return func
@lower_builtin("array.sort", types.Array)
def array_sort(context, builder, sig, args):
arytype = sig.args[0]
sort_func = get_sort_func(kind='quicksort',
is_float=isinstance(arytype.dtype, types.Float))
def array_sort_impl(arr):
# Note we clobber the return value
sort_func(arr)
return context.compile_internal(builder, array_sort_impl, sig, args)
@lower_builtin(np.sort, types.Array)
def np_sort(context, builder, sig, args):
def np_sort_impl(a):
res = a.copy()
res.sort()
return res
return context.compile_internal(builder, np_sort_impl, sig, args)
@lower_builtin("array.argsort", types.Array, types.StringLiteral)
@lower_builtin(np.argsort, types.Array, types.StringLiteral)
def array_argsort(context, builder, sig, args):
arytype, kind = sig.args
sort_func = get_sort_func(kind=kind.literal_value,
is_float=isinstance(arytype.dtype, types.Float),
is_argsort=True)
def array_argsort_impl(arr):
return sort_func(arr)
innersig = sig.replace(args=sig.args[:1])
innerargs = args[:1]
return context.compile_internal(builder, array_argsort_impl,
innersig, innerargs)
# ------------------------------------------------------------------------------
# Implicit cast
@lower_cast(types.Array, types.Array)
def array_to_array(context, builder, fromty, toty, val):
# Type inference should have prevented illegal array casting.
assert fromty.mutable != toty.mutable or toty.layout == 'A'
return val
# ------------------------------------------------------------------------------
# Stride tricks
def reshape_unchecked(a, shape, strides):
"""
An intrinsic returning a derived array with the given shape and strides.
"""
raise NotImplementedError
@extending.type_callable(reshape_unchecked)
def type_reshape_unchecked(context):
def check_shape(shape):
return (isinstance(shape, types.BaseTuple) and
all(isinstance(v, types.Integer) for v in shape))
def typer(a, shape, strides):
if not isinstance(a, types.Array):
return
if not check_shape(shape) or not check_shape(strides):
return
if len(shape) != len(strides):
return
return a.copy(ndim=len(shape), layout='A')
return typer
@lower_builtin(reshape_unchecked, types.Array, types.BaseTuple, types.BaseTuple)
def impl_shape_unchecked(context, builder, sig, args):
aryty = sig.args[0]
retty = sig.return_type
ary = make_array(aryty)(context, builder, args[0])
out = make_array(retty)(context, builder)
shape = cgutils.unpack_tuple(builder, args[1])
strides = cgutils.unpack_tuple(builder, args[2])
populate_array(out,
data=ary.data,
shape=shape,
strides=strides,
itemsize=ary.itemsize,
meminfo=ary.meminfo,
)
res = out._getvalue()
return impl_ret_borrowed(context, builder, retty, res)
@extending.overload(np.lib.stride_tricks.as_strided)
def as_strided(x, shape=None, strides=None):
if shape in (None, types.none):
@register_jitable
def get_shape(x, shape):
return x.shape
else:
@register_jitable
def get_shape(x, shape):
return shape
if strides in (None, types.none):
# When *strides* is not passed, as_strided() does a non-size-checking
# reshape(), possibly changing the original strides. This is too
# cumbersome to support right now, and a Web search shows all example
# use cases of as_strided() pass explicit *strides*.
raise NotImplementedError("as_strided() strides argument is mandatory")
else:
@register_jitable
def get_strides(x, strides):
return strides
def as_strided_impl(x, shape=None, strides=None):
x = reshape_unchecked(x, get_shape(x, shape), get_strides(x, strides))
return x
return as_strided_impl
@overload(bool)
def ol_bool(arr):
if isinstance(arr, types.Array):
def impl(arr):
if arr.size == 0:
return False # this is deprecated
elif arr.size == 1:
return bool(arr.take(0))
else:
msg = ("The truth value of an array with more than one element "
"is ambiguous. Use a.any() or a.all()")
raise ValueError(msg)
return impl
| 36.216835 | 148 | 0.605866 |
f46a32f8e0bf72fdd6880d73f17a956943fd3ac7 | 1,499 | py | Python | networking_bagpipe/bagpipe_bgp/engine/ipvpn.py | oferby/networking-bagpipe | 4c42971872065192b4da33c050ccfa86deb5ea77 | [
"Apache-2.0"
] | null | null | null | networking_bagpipe/bagpipe_bgp/engine/ipvpn.py | oferby/networking-bagpipe | 4c42971872065192b4da33c050ccfa86deb5ea77 | [
"Apache-2.0"
] | null | null | null | networking_bagpipe/bagpipe_bgp/engine/ipvpn.py | oferby/networking-bagpipe | 4c42971872065192b4da33c050ccfa86deb5ea77 | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# encoding: utf-8
# Copyright 2014 Orange
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from networking_bagpipe.bagpipe_bgp.engine import exa
def prefix_to_packed_ip_mask(prefix):
ip_string, mask = prefix.split("/")
return (exa.IP.pton(ip_string), int(mask))
@exa.NLRI.register(exa.AFI.ipv4, exa.SAFI.mpls_vpn, force=True)
@exa.NLRI.register(exa.AFI.ipv6, exa.SAFI.mpls_vpn, force=True)
class IPVPN(exa.IPVPN):
# two NLRIs with same RD and prefix, but different labels need to
# be equal and have the same hash
def __eq__(self, other):
return self.rd == other.rd and self.cidr == other.cidr
def __hash__(self):
return hash((self.rd, self.cidr._packed))
def IPVPNRouteFactory(afi, prefix, label, rd, nexthop):
packed_prefix, mask = prefix_to_packed_ip_mask(prefix)
return IPVPN.new(afi, exa.SAFI(exa.SAFI.mpls_vpn), packed_prefix, mask,
exa.Labels([label], True), rd, nexthop)
| 34.068182 | 75 | 0.723816 |
4f75b6cd4b86d199b994c3f2a040a21b969a8a3a | 2,834 | py | Python | tests/test_testbase/test_serialization.py | molayxu/QTAF | a036a22b7baa10214baa60954154bb865c98e44f | [
"BSD-3-Clause"
] | 452 | 2016-09-27T11:21:00.000Z | 2022-03-31T06:11:30.000Z | tests/test_testbase/test_serialization.py | molayxu/QTAF | a036a22b7baa10214baa60954154bb865c98e44f | [
"BSD-3-Clause"
] | 39 | 2016-09-29T06:14:04.000Z | 2022-03-22T04:24:36.000Z | tests/test_testbase/test_serialization.py | molayxu/QTAF | a036a22b7baa10214baa60954154bb865c98e44f | [
"BSD-3-Clause"
] | 129 | 2016-09-27T11:28:42.000Z | 2022-03-17T09:05:16.000Z | # -*- coding: utf-8 -*-
"""test serialization
"""
import unittest
from testbase.testcase import TestCase, SeqTestSuite
from testbase import serialization, datadrive
drive_data = [0, {"data" : 1, "__attrs__" : {"owner" : "bar",
"timeout" : 5,
"priority" : TestCase.EnumPriority.BVT,
"status" : TestCase.EnumStatus.Implement,
"tags" : ("a", "b", "c"),
"__doc__" : "demo"}}]
@datadrive.DataDrive(drive_data)
class FooTest(TestCase):
"""foo test
"""
owner = "foo"
timeout = 1
priority = TestCase.EnumPriority.High
status = TestCase.EnumStatus.Ready
def run_test(self):
pass
class SerializationTest(unittest.TestCase):
def test_normal_serialization(self):
from tests.sampletest.hellotest import HelloTest
hello = HelloTest()
data = serialization.dumps(hello)
deserialized_case = serialization.loads(data)
self.assertEqual(type(deserialized_case), HelloTest)
for attr in ["owner", "timeout", "priority", "status", "tags", "test_doc"]:
self.assertEqual(getattr(deserialized_case, attr), getattr(hello, attr))
def test_datadrive_serialization(self):
tests = datadrive.load_datadrive_tests(FooTest, 1)
self.assertEqual(len(tests), 1)
test = tests[0]
deserialized_test = serialization.loads(serialization.dumps(test))
self.assertEqual(deserialized_test.owner, "bar")
self.assertEqual(deserialized_test.timeout, 5)
self.assertEqual(deserialized_test.priority, TestCase.EnumPriority.BVT)
self.assertEqual(deserialized_test.status, TestCase.EnumStatus.Implement)
self.assertEqual(deserialized_test.tags, set(["a", "b", "c"]))
self.assertEqual(deserialized_test.test_doc, "demo")
def test_serialize_testsuite(self):
from tests.sampletest.hellotest import HelloTest, TimeoutTest
foo_test = datadrive.load_datadrive_tests(FooTest, 1)[0]
testsuite = SeqTestSuite([HelloTest(), TimeoutTest(), foo_test])
data = serialization.dumps(testsuite)
deserialized_testsuite = serialization.loads(data)
self.assertEqual(len(deserialized_testsuite), len(testsuite))
for deserialized_test, test in zip(deserialized_testsuite, testsuite):
self.assertEqual(type(deserialized_test), type(test))
for attr in ["owner", "timeout", "priority", "status", "tags", "test_doc"]:
self.assertEqual(getattr(deserialized_test, attr), getattr(test, attr))
if __name__ == "__main__":
unittest.main(defaultTest="SerializationTest.test_serialize_testsuite")
| 41.676471 | 87 | 0.636909 |
8f9b2c3312d654109b6084102ee8a3f48a85f867 | 3,871 | py | Python | test/functional/rpc_invalid_address_message.py | pmconrad/bitcoin | 19815b096c6a2b2e0bcb802e9e5842e67952993e | [
"MIT"
] | 5 | 2020-08-25T13:49:02.000Z | 2020-10-08T10:25:39.000Z | test/functional/rpc_invalid_address_message.py | digim3ta/bitcoin | fe03f7a37fd0ef05149161f6b95a25493e1fe38f | [
"MIT"
] | null | null | null | test/functional/rpc_invalid_address_message.py | digim3ta/bitcoin | fe03f7a37fd0ef05149161f6b95a25493e1fe38f | [
"MIT"
] | 3 | 2020-09-01T16:17:50.000Z | 2020-10-08T10:25:43.000Z | #!/usr/bin/env python3
# Copyright (c) 2020-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test error messages for 'getaddressinfo' and 'validateaddress' RPC commands."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
BECH32_VALID = 'bcrt1qtmp74ayg7p24uslctssvjm06q5phz4yrxucgnv'
BECH32_INVALID_BECH32 = 'bcrt1p0xlxvlhemja6c4dqv22uapctqupfhlxm9h8z3k2e72q4k9hcz7vqdmchcc'
BECH32_INVALID_BECH32M = 'bcrt1qw508d6qejxtdg4y5r3zarvary0c5xw7k35mrzd'
BECH32_INVALID_VERSION = 'bcrt130xlxvlhemja6c4dqv22uapctqupfhlxm9h8z3k2e72q4k9hcz7vqynjegk'
BECH32_INVALID_SIZE = 'bcrt1s0xlxvlhemja6c4dqv22uapctqupfhlxm9h8z3k2e72q4k9hcz7v8n0nx0muaewav25430mtr'
BECH32_INVALID_V0_SIZE = 'bcrt1qw508d6qejxtdg4y5r3zarvary0c5xw7kqqq5k3my'
BECH32_INVALID_PREFIX = 'bc1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7k7grplx'
BASE58_VALID = 'mipcBbFg9gMiCh81Kj8tqqdgoZub1ZJRfn'
BASE58_INVALID_PREFIX = '17VZNX1SN5NtKa8UQFxwQbFeFc3iqRYhem'
INVALID_ADDRESS = 'asfah14i8fajz0123f'
class InvalidAddressErrorMessageTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def test_validateaddress(self):
node = self.nodes[0]
# Bech32
info = node.validateaddress(BECH32_INVALID_SIZE)
assert not info['isvalid']
assert_equal(info['error'], 'Invalid Bech32 address data size')
info = node.validateaddress(BECH32_INVALID_PREFIX)
assert not info['isvalid']
assert_equal(info['error'], 'Invalid prefix for Bech32 address')
info = node.validateaddress(BECH32_INVALID_BECH32)
assert not info['isvalid']
assert_equal(info['error'], 'Version 1+ witness address must use Bech32m checksum')
info = node.validateaddress(BECH32_INVALID_BECH32M)
assert not info['isvalid']
assert_equal(info['error'], 'Version 0 witness address must use Bech32 checksum')
info = node.validateaddress(BECH32_INVALID_V0_SIZE)
assert not info['isvalid']
assert_equal(info['error'], 'Invalid Bech32 v0 address data size')
info = node.validateaddress(BECH32_VALID)
assert info['isvalid']
assert 'error' not in info
info = node.validateaddress(BECH32_INVALID_VERSION)
assert not info['isvalid']
assert_equal(info['error'], 'Invalid Bech32 address witness version')
# Base58
info = node.validateaddress(BASE58_INVALID_PREFIX)
assert not info['isvalid']
assert_equal(info['error'], 'Invalid prefix for Base58-encoded address')
info = node.validateaddress(BASE58_VALID)
assert info['isvalid']
assert 'error' not in info
# Invalid address format
info = node.validateaddress(INVALID_ADDRESS)
assert not info['isvalid']
assert_equal(info['error'], 'Invalid address format')
def test_getaddressinfo(self):
node = self.nodes[0]
assert_raises_rpc_error(-5, "Invalid Bech32 address data size", node.getaddressinfo, BECH32_INVALID_SIZE)
assert_raises_rpc_error(-5, "Invalid prefix for Bech32 address", node.getaddressinfo, BECH32_INVALID_PREFIX)
assert_raises_rpc_error(-5, "Invalid prefix for Base58-encoded address", node.getaddressinfo, BASE58_INVALID_PREFIX)
assert_raises_rpc_error(-5, "Invalid address format", node.getaddressinfo, INVALID_ADDRESS)
def run_test(self):
self.test_validateaddress()
if self.is_wallet_compiled():
self.init_wallet(node=0)
self.test_getaddressinfo()
if __name__ == '__main__':
InvalidAddressErrorMessageTest().main()
| 39.10101 | 124 | 0.740119 |
0a3e7f5b219b9e2c29b201ee7f33dcf359829c0d | 5,014 | py | Python | tests/test_core.py | paulosalem/time-blender | 9f902af8068e3db7c7f8a073efc52d5a395e21a1 | [
"MIT"
] | 4 | 2019-02-03T13:46:46.000Z | 2021-04-18T22:04:29.000Z | tests/test_core.py | paulosalem/time-blender | 9f902af8068e3db7c7f8a073efc52d5a395e21a1 | [
"MIT"
] | 1 | 2020-08-16T23:52:01.000Z | 2020-08-16T23:52:01.000Z | tests/test_core.py | paulosalem/time-blender | 9f902af8068e3db7c7f8a073efc52d5a395e21a1 | [
"MIT"
] | null | null | null | import copy
import numpy as np
from tests.common import AbstractTest
from time_blender.coordination_events import Piecewise, Replicated, PastEvent
from time_blender.core import Generator, ConstantEvent, LambdaEvent
from time_blender.deterministic_events import WalkEvent
from time_blender.models import BankingModels, ClassicModels
from time_blender.random_events import NormalEvent, UniformEvent, PoissonEvent, TopResistance, BottomResistance
class TestEvent(AbstractTest):
def setUp(self):
super().setUp()
def test_clone(self):
#
# Let's test the copy strategy using a piecewise model.
#
banking_model_1 = BankingModels.salary_earner(salary=ConstantEvent(5000.0,
require_lower_bound=0,
require_upper_bound=30000),
expense_mean=ConstantEvent(100.0,
require_lower_bound=0,
require_upper_bound=1000),
expense_sd=ConstantEvent(100.0,
require_lower_bound=0,
require_upper_bound=30000))
banking_model_2 = banking_model_1.clone()
banking_model_3 = banking_model_1.clone()
# top level names must be unique
self.assertNotEqual(banking_model_1.name, banking_model_2.name)
self.assertNotEqual(banking_model_1.name, banking_model_3.name)
self.assertNotEqual(banking_model_2.name, banking_model_3.name)
# nested names must also be unique
self.assertNotEqual(banking_model_1._causal_parameters[0].name,
banking_model_2._causal_parameters[0].name)
# classes must be equal, though
self.assertEqual(banking_model_1._causal_parameters[0].__class__,
banking_model_2._causal_parameters[0].__class__)
t_separator_1 = NormalEvent(ConstantEvent(60.0,
require_lower_bound=0,
require_upper_bound=100),
ConstantEvent(20.0,
require_lower_bound=0,
require_upper_bound=100))
t_separator_2 = t_separator_1.clone()
# top level names must be unique
self.assertNotEqual(t_separator_1.name, t_separator_2.name)
pw = Piecewise([banking_model_1, banking_model_2, banking_model_3],
t_separators=[t_separator_1, t_separator_2])
res = self.common_model_test(pw)
def test_clone_2(self):
base_event = NormalEvent() + PoissonEvent()
def aux(t, i, memory, sub_events):
res = 2 * sub_events['base'].execute(t)
return res
print(aux.__closure__)
base_model = LambdaEvent(aux, sub_events={'base': base_event})
model = Replicated(base_model, NormalEvent(mean=10, std=5), max_replication=2)
data = self.common_model_test(model, n=2)
self.assertTrue((data[0].iloc[-10:-1].values != data[1].iloc[-10:-1].values).any())
def test_clone_3(self):
pe = PastEvent(1)
event = ConstantEvent(1) + pe
pe.refers_to(event)
self.common_model_test(event)
# cloning must not break anything
cloned_event = event.clone()
self.common_model_test(cloned_event)
self.assertNotEqual(event._causal_parameters[0].name, cloned_event._causal_parameters[0].name)
def test_constant_generation(self):
constant_event_1 = ConstantEvent(10)
data_1 = self.common_model_test(constant_event_1, n=2)
# series generated from a constant must have the same values
self.assertTrue((data_1[0].iloc[-10:-1].values == data_1[1].iloc[-10:-1].values).all())
def test_lambda_composition_generation(self):
#
# Various composition strategies
#
events = [NormalEvent(0, 1),
NormalEvent(0, 1)*ConstantEvent(1000),
NormalEvent(0, 1)+ConstantEvent(1000),
NormalEvent(0, 1)-ConstantEvent(1000),
NormalEvent(0, 1)/ConstantEvent(1000)]
data_sets = [self.common_model_test(e, n=2) for e in events]
# check each composition behavior
for data in data_sets:
# different generated data series must have different values
#print(data[0].iloc[-10:-1].values, data[1].iloc[-10:-1].values)
self.assertFalse((data[0].iloc[-10:-1].values == data[1].iloc[-10:-1].values).all())
| 42.134454 | 111 | 0.571799 |
05191626d5e04f239f2dd55fcca267c6efac5461 | 4,123 | py | Python | tensorforce/core/optimizers/optimizer.py | vbelus/tensorforce | 85504f49f33b4114cdbb19d73cc196539a702a2c | [
"Apache-2.0"
] | 1 | 2019-10-18T17:36:28.000Z | 2019-10-18T17:36:28.000Z | tensorforce/core/optimizers/optimizer.py | vbelus/tensorforce | 85504f49f33b4114cdbb19d73cc196539a702a2c | [
"Apache-2.0"
] | null | null | null | tensorforce/core/optimizers/optimizer.py | vbelus/tensorforce | 85504f49f33b4114cdbb19d73cc196539a702a2c | [
"Apache-2.0"
] | 1 | 2020-07-13T03:00:34.000Z | 2020-07-13T03:00:34.000Z | # Copyright 2018 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
from tensorforce import TensorforceError, util
from tensorforce.core import Module
class Optimizer(Module):
"""
Base class for optimizers.
Args:
name (string): Module name
(<span style="color:#0000C0"><b>internal use</b></span>).
summary_labels ('all' | iter[string]): Labels of summaries to record
(<span style="color:#00C000"><b>default</b></span>: inherit value of parent module).
"""
def __init__(self, name, summary_labels=None):
super().__init__(name=name, summary_labels=summary_labels)
def tf_step(self, variables, **kwargs):
raise NotImplementedError
def tf_apply_step(self, variables, deltas):
if len(variables) != len(deltas):
raise TensorforceError("Invalid variables and deltas lists.")
assignments = list()
for variable, delta in zip(variables, deltas):
assignments.append(tf.assign_add(ref=variable, value=delta))
with tf.control_dependencies(control_inputs=assignments):
return util.no_operation()
def tf_minimize(self, variables, **kwargs):
if any(variable.dtype != util.tf_dtype(dtype='float') for variable in variables):
TensorforceError.unexpected()
deltas = self.step(variables=variables, **kwargs)
update_norm = tf.linalg.global_norm(t_list=deltas)
deltas = self.add_summary(
label='update-norm', name='update-norm', tensor=update_norm, pass_tensors=deltas
)
for n in range(len(variables)):
name = variables[n].name
if name[-2:] != ':0':
raise TensorforceError.unexpected()
deltas[n] = self.add_summary(
label=('updates', 'updates-full'), name=(name[:-2] + '-update'), tensor=deltas[n],
mean_variance=True
)
deltas[n] = self.add_summary(
label='updates-full', name=(name[:-2] + '-update'), tensor=deltas[n]
)
# TODO: experimental
# with tf.control_dependencies(control_inputs=deltas):
# zero = tf.constant(value=0.0, dtype=util.tf_dtype(dtype='float'))
# false = tf.constant(value=False, dtype=util.tf_dtype(dtype='bool'))
# deltas = [self.cond(
# pred=tf.math.reduce_all(input_tensor=tf.math.equal(x=delta, y=zero)),
# true_fn=(lambda: tf.Print(delta, (variable.name,))),
# false_fn=(lambda: delta)) for delta, variable in zip(deltas, variables)
# ]
# assertions = [
# tf.debugging.assert_equal(
# x=tf.math.reduce_all(input_tensor=tf.math.equal(x=delta, y=zero)), y=false
# ) for delta, variable in zip(deltas, variables)
# if util.product(xs=util.shape(x=delta)) > 4 and 'distribution' not in variable.name
# ]
# with tf.control_dependencies(control_inputs=assertions):
with tf.control_dependencies(control_inputs=deltas):
return util.no_operation()
def add_variable(self, name, dtype, shape, is_trainable=False, initializer='zeros'):
if is_trainable:
raise TensorforceError("Invalid trainable variable.")
return super().add_variable(
name=name, dtype=dtype, shape=shape, is_trainable=is_trainable, initializer=initializer
)
| 41.23 | 101 | 0.62115 |
abb2466001716bd862791946936f478b4e0f91ce | 787 | py | Python | tests/test_measuring_from_threads.py | tqsd/EQSN_python | 823a315b1c2f5658cded19d8c97e579ce7285c42 | [
"MIT"
] | 3 | 2020-05-03T15:09:41.000Z | 2021-12-17T11:26:34.000Z | tests/test_measuring_from_threads.py | tqsd/EQSN_python | 823a315b1c2f5658cded19d8c97e579ce7285c42 | [
"MIT"
] | 5 | 2020-03-13T10:03:39.000Z | 2020-07-09T12:56:04.000Z | tests/test_measuring_from_threads.py | tqsd/EQSN_python | 823a315b1c2f5658cded19d8c97e579ce7285c42 | [
"MIT"
] | 1 | 2020-05-03T15:06:24.000Z | 2020-05-03T15:06:24.000Z | import threading
import random
import time
from eqsn import EQSN
def test_measure_from_threads():
q_sim = EQSN()
def measure_or_hadamard(_id):
n = random.randrange(10, 20, 1)
for _ in range(n):
time.sleep(0.1)
q_sim.H_gate(_id)
nr_threads = 10
ids = [str(x) for x in range(nr_threads)]
for _id in ids:
q_sim.new_qubit(_id)
id1 = ids[0]
for c in ids:
if c != id1:
q_sim.cnot_gate(id1, c)
thread_list = []
for _id in ids:
t = threading.Thread(target=measure_or_hadamard, args=(_id,))
t.start()
thread_list.append(t)
for t in thread_list:
t.join()
q_sim.stop_all()
if __name__ == "__main__":
test_measure_from_threads()
exit(0)
| 19.195122 | 69 | 0.584498 |
629e6e8c5ac86ec291072f229c9ae5a4ade46716 | 2,731 | py | Python | Server/Python/src/dbs/components/migration/StartUp.py | vkuznet/DBS | 14df8bbe8ee8f874fe423399b18afef911fe78c7 | [
"Apache-2.0"
] | 8 | 2015-08-14T04:01:32.000Z | 2021-06-03T00:56:42.000Z | Server/Python/src/dbs/components/migration/StartUp.py | yuyiguo/DBS | 14df8bbe8ee8f874fe423399b18afef911fe78c7 | [
"Apache-2.0"
] | 162 | 2015-01-07T21:34:47.000Z | 2021-10-13T09:42:41.000Z | Server/Python/src/dbs/components/migration/StartUp.py | yuyiguo/DBS | 14df8bbe8ee8f874fe423399b18afef911fe78c7 | [
"Apache-2.0"
] | 16 | 2015-01-22T15:27:29.000Z | 2021-04-28T09:23:28.000Z | #!/usr/bin/env python
from optparse import OptionParser
import cherrypy, logging, sys
from WMCore.Configuration import Configuration, loadConfigurationFile
from dbs.components.migration.DBSMigrationServer import DBSMigrationServer, MigrationTask, MigrationWebMonitoring
def get_command_line_options(executable_name, arguments):
parser = OptionParser(usage='%s options' % executable_name)
parser.add_option('-c', '--config', type='string', dest='config', help='Migration Server Configuration')
options, args = parser.parse_args()
if not (options.config):
parser.print_help()
parser.error('You need to provide following options, --config=DefaultConfig.py')
return options
def configure(configfile):
cfg = loadConfigurationFile(configfile)
web_cfg = cfg.web.dictionary_()
###configure cherry py
cherrypy_cfg = {'server.host' : web_cfg.get('host', '127.0.0.1'),
'server.socket_port' : web_cfg.get('port', 8251),
'log.screen' : web_cfg.get('log_screen', False),
'server.thread_pool' : web_cfg.get('thread_pool', 10)}
cherrypy.config.update(cherrypy_cfg)
error_log_level = web_cfg.get('error_log_level', logging.WARNING)
access_log_level = web_cfg.get("access_log_level", logging.INFO)
cherrypy.log.error_log.setLevel(error_log_level)
cherrypy.log.access_log.setLevel(access_log_level)
migration_cfg = cfg.dbsmigration
migration_config = {}
for instance in migration_cfg.instances:
instance_settings = getattr(migration_cfg.database.instances, instance)
migration_config.setdefault('database', {}).update({instance :
{'threads' : instance_settings.threads,
'dbowner' : instance_settings.dbowner,
'engineParameters' : instance_settings.engineParameters,
'connectUrl' : instance_settings.connectUrl}})
return migration_config
if __name__ == '__main__':
options = get_command_line_options(__name__, sys.argv)
migration_config = configure(options.config)
for instance in migration_config['database'].keys():
for thread in xrange(migration_config['database'][instance]['threads']):
DBSMigrationServer(MigrationTask(migration_config['database'][instance]), duration = 5)
root = MigrationWebMonitoring()
cherrypy.log.error_log.info("*********** DBS Migration Server Starting. ************")
##mount tree and start service
cherrypy.tree.mount(root)
cherrypy.quickstart(root)
| 43.349206 | 116 | 0.656536 |
f69fbfe83a9ede9376849ca8c7ce1ab8a2948695 | 154 | py | Python | python/gdal_cookbook/cookbook_geometry/create_geometry_from_wkt.py | zeroam/TIL | 43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1 | [
"MIT"
] | null | null | null | python/gdal_cookbook/cookbook_geometry/create_geometry_from_wkt.py | zeroam/TIL | 43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1 | [
"MIT"
] | null | null | null | python/gdal_cookbook/cookbook_geometry/create_geometry_from_wkt.py | zeroam/TIL | 43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1 | [
"MIT"
] | null | null | null | from osgeo import ogr
wkt = "POINT (1120351.5712494177 741921.4223245403)"
point = ogr.CreateGeometryFromWkt(wkt)
print(f'{point.GetX()},{point.GetY()}') | 30.8 | 52 | 0.746753 |
b963768ae6b5d7ae5dad85cd12a592f418e50cee | 5,318 | py | Python | lldb/packages/Python/lldbsuite/test/python_api/watchpoint/watchlocation/TestTargetWatchAddress.py | jimingham/llvm-project | 598fd4f8d7ed21b6c4473aec85265f49512d25bd | [
"Apache-2.0"
] | 1 | 2022-03-28T05:58:03.000Z | 2022-03-28T05:58:03.000Z | lldb/packages/Python/lldbsuite/test/python_api/watchpoint/watchlocation/TestTargetWatchAddress.py | DougGregor/llvm-project | 97602a8bd045f087e02348b64ffbdd143a33e10b | [
"Apache-2.0"
] | 2 | 2022-02-19T07:12:22.000Z | 2022-02-27T11:21:53.000Z | llvm-project/lldb/packages/Python/lldbsuite/test/python_api/watchpoint/watchlocation/TestTargetWatchAddress.py | ananthdurbha/llvm-mlir | a5e0b8903b83c777fa40a0fa538fbdb8176fd5ac | [
"MIT"
] | null | null | null | """
Use lldb Python SBtarget.WatchAddress() API to create a watchpoint for write of '*g_char_ptr'.
"""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TargetWatchAddressAPITestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Our simple source filename.
self.source = 'main.cpp'
# Find the line number to break inside main().
self.line = line_number(
self.source, '// Set break point at this line.')
# This is for verifying that watch location works.
self.violating_func = "do_bad_thing_with_location"
@add_test_categories(['pyapi'])
def test_watch_address(self):
"""Exercise SBTarget.WatchAddress() API to set a watchpoint."""
self.build()
exe = self.getBuildArtifact("a.out")
# Create a target by the debugger.
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Now create a breakpoint on main.c.
breakpoint = target.BreakpointCreateByLocation(self.source, self.line)
self.assertTrue(breakpoint and
breakpoint.GetNumLocations() == 1,
VALID_BREAKPOINT)
# Now launch the process, and do not stop at the entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
# We should be stopped due to the breakpoint. Get frame #0.
process = target.GetProcess()
self.assertTrue(process.GetState() == lldb.eStateStopped,
PROCESS_STOPPED)
thread = lldbutil.get_stopped_thread(
process, lldb.eStopReasonBreakpoint)
frame0 = thread.GetFrameAtIndex(0)
value = frame0.FindValue('g_char_ptr',
lldb.eValueTypeVariableGlobal)
pointee = value.CreateValueFromAddress(
"pointee",
value.GetValueAsUnsigned(0),
value.GetType().GetPointeeType())
# Watch for write to *g_char_ptr.
error = lldb.SBError()
watchpoint = target.WatchAddress(
value.GetValueAsUnsigned(), 1, False, True, error)
self.assertTrue(value and watchpoint,
"Successfully found the pointer and set a watchpoint")
self.DebugSBValue(value)
self.DebugSBValue(pointee)
# Hide stdout if not running with '-t' option.
if not self.TraceOn():
self.HideStdout()
print(watchpoint)
# Continue. Expect the program to stop due to the variable being
# written to.
process.Continue()
if (self.TraceOn()):
lldbutil.print_stacktraces(process)
thread = lldbutil.get_stopped_thread(
process, lldb.eStopReasonWatchpoint)
self.assertTrue(thread, "The thread stopped due to watchpoint")
self.DebugSBValue(value)
self.DebugSBValue(pointee)
self.expect(
lldbutil.print_stacktrace(
thread,
string_buffer=True),
exe=False,
substrs=[
self.violating_func])
# This finishes our test.
@add_test_categories(['pyapi'])
# No size constraint on MIPS for watches
@skipIf(archs=['mips', 'mipsel', 'mips64', 'mips64el'])
@skipIf(archs=['s390x']) # Likewise on SystemZ
def test_watch_address_with_invalid_watch_size(self):
"""Exercise SBTarget.WatchAddress() API but pass an invalid watch_size."""
self.build()
exe = self.getBuildArtifact("a.out")
# Create a target by the debugger.
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Now create a breakpoint on main.c.
breakpoint = target.BreakpointCreateByLocation(self.source, self.line)
self.assertTrue(breakpoint and
breakpoint.GetNumLocations() == 1,
VALID_BREAKPOINT)
# Now launch the process, and do not stop at the entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
# We should be stopped due to the breakpoint. Get frame #0.
process = target.GetProcess()
self.assertTrue(process.GetState() == lldb.eStateStopped,
PROCESS_STOPPED)
thread = lldbutil.get_stopped_thread(
process, lldb.eStopReasonBreakpoint)
frame0 = thread.GetFrameAtIndex(0)
value = frame0.FindValue('g_char_ptr',
lldb.eValueTypeVariableGlobal)
pointee = value.CreateValueFromAddress(
"pointee",
value.GetValueAsUnsigned(0),
value.GetType().GetPointeeType())
# Watch for write to *g_char_ptr.
error = lldb.SBError()
watchpoint = target.WatchAddress(
value.GetValueAsUnsigned(), 365, False, True, error)
self.assertFalse(watchpoint)
self.expect(error.GetCString(), exe=False,
substrs=['watch size of %d is not supported' % 365])
| 36.675862 | 94 | 0.617525 |
5f84f995804b7238c743a99e555a1d80255f0f20 | 601 | py | Python | libros/forms.py | dkwelffer/rocbook | 87c92f26e8bd2139cc57d11f65c3fb82ebbc5223 | [
"Apache-2.0"
] | null | null | null | libros/forms.py | dkwelffer/rocbook | 87c92f26e8bd2139cc57d11f65c3fb82ebbc5223 | [
"Apache-2.0"
] | null | null | null | libros/forms.py | dkwelffer/rocbook | 87c92f26e8bd2139cc57d11f65c3fb82ebbc5223 | [
"Apache-2.0"
] | null | null | null |
from django import forms
class ActualizarLibroForm(forms.Form):
isbn_libro = forms.CharField(label="ISBN del libro", required=True)
titulo_libro = forms.CharField(label="Nombre del libro", required=True)
autor_libro = forms.CharField(label="Autor del libro", required=True)
anio_libro = forms.CharField(label="Año de publicación", required=True)
class ActualizarPaginaForm(forms.Form):
texto_pagina = forms.CharField(
label="Contenido de la página", required=True)
class BusquedaLibroForm(forms.Form):
busqueda = forms.CharField(label="Busqueda", required=True)
| 31.631579 | 75 | 0.748752 |
5f26eb01ce0b44c64b107b0f93484bb779011ded | 43 | py | Python | tests/components/tile/__init__.py | domwillcode/home-assistant | f170c80bea70c939c098b5c88320a1c789858958 | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | tests/components/tile/__init__.py | jagadeeshvenkatesh/core | 1bd982668449815fee2105478569f8e4b5670add | [
"Apache-2.0"
] | 31,101 | 2020-03-02T13:00:16.000Z | 2022-03-31T23:57:36.000Z | tests/components/tile/__init__.py | jagadeeshvenkatesh/core | 1bd982668449815fee2105478569f8e4b5670add | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """Define tests for the Tile component."""
| 21.5 | 42 | 0.697674 |
eb5e67387944de7f45754d52182d8c5ad5f3b966 | 2,471 | py | Python | switcheo/test_switcheo_utils.py | Devel484/switcheo-python | 5b3b77fc31328dc5b23fa83a5e71b9a26723559a | [
"MIT"
] | null | null | null | switcheo/test_switcheo_utils.py | Devel484/switcheo-python | 5b3b77fc31328dc5b23fa83a5e71b9a26723559a | [
"MIT"
] | null | null | null | switcheo/test_switcheo_utils.py | Devel484/switcheo-python | 5b3b77fc31328dc5b23fa83a5e71b9a26723559a | [
"MIT"
] | null | null | null | import unittest
from switcheo.utils import get_epoch_milliseconds, num2hexstring, num2varint, reverse_hex,\
stringify_message, Request
r = Request(api_url='https://jsonplaceholder.typicode.com/', api_version='')
s = Request()
class TestSwitcheoUtils(unittest.TestCase):
def test_get_epoch_milliseconds(self):
self.assertGreaterEqual(get_epoch_milliseconds(), 13)
def test_num2hexstring(self):
self.assertEqual(num2hexstring(0), '00')
self.assertEqual(num2hexstring(255), 'ff')
self.assertEqual(num2hexstring(256, size=2, little_endian=True), '0001')
self.assertEqual(num2hexstring(2222, size=2, little_endian=True), 'ae08')
def test_num2varint(self):
self.assertEqual(num2varint(0), '00')
self.assertEqual(num2varint(252), 'fc')
self.assertEqual(num2varint(253), 'fdfd00')
self.assertEqual(num2varint(255), 'fdff00')
self.assertEqual(num2varint(256), 'fd0001')
self.assertEqual(num2varint(2222), 'fdae08')
self.assertEqual(num2varint(111111), 'fe07b20100')
self.assertEqual(num2varint(11111111111), 'ffc719469602000000')
def test_reverse_hex(self):
self.assertEqual(reverse_hex('ABCD'), 'CDAB')
self.assertEqual(reverse_hex('0000000005f5e100'), '00e1f50500000000')
def test_stringify_message(self):
json_msg = {"name": "John Smith", "age": 27, "siblings": ["Jane", "Joe"]}
stringify_msg = '{"age":27,"name":"John Smith","siblings":["Jane","Joe"]}'
self.assertEqual(stringify_message(json_msg), stringify_msg)
def test_request_get(self):
json_msg = {
"userId": 1,
"id": 1,
"title": "sunt aut facere repellat provident occaecati excepturi optio reprehenderit",
"body": "quia et suscipit\nsuscipit recusandae consequuntur expedita et cum\nreprehenderit molestiae ut ut quas totam\nnostrum rerum est autem sunt rem eveniet architecto"}
self.assertDictEqual(r.get(path='/posts/1'), json_msg)
def test_request_post(self):
json_dict = {
'title': 'foo',
'body': 'bar',
'userId': 1}
json_msg = {
'id': 101,
'title': 'foo',
'body': 'bar',
'userId': 1}
self.assertDictEqual(r.post(path='/posts', json_data=json_dict), json_msg)
def test_request_status(self):
self.assertDictEqual(s.status(), {'status': 'ok'})
| 39.854839 | 184 | 0.649535 |
0c508de0e4a998bd5ff71ad6935bd21d78608c6f | 5,888 | py | Python | rab_distributed_system.py | senjianlu/rab_python_packages | 9116ecff2e29889a327f0e1063760edd456f8008 | [
"MIT"
] | 35 | 2021-03-08T06:49:55.000Z | 2022-03-06T00:16:39.000Z | rab_distributed_system.py | senjianlu/rab_python_packages | 9116ecff2e29889a327f0e1063760edd456f8008 | [
"MIT"
] | null | null | null | rab_distributed_system.py | senjianlu/rab_python_packages | 9116ecff2e29889a327f0e1063760edd456f8008 | [
"MIT"
] | 8 | 2021-03-08T07:31:11.000Z | 2021-09-25T09:19:38.000Z | #!/usr/bin/env python
# -*- coding:UTF-8 -*-
#
# @AUTHOR: Rabbir
# @FILE: /root/GitHub/rab_python_packages/rab_distributed_node.py
# @DATE: 2021/07/25 Sun
# @TIME: 14:54:27
#
# @DESCRIPTION: 分布式系统管理模块
import sys
from datetime import datetime, timezone, timedelta
sys.path.append("..") if (".." not in sys.path) else True
from rab_python_packages import rab_config
from rab_python_packages import rab_logging
from rab_python_packages import rab_postgresql
# 日志记录
r_logger = rab_logging.r_logger()
"""
@description: 获取当前机器的节点 ID
-------
@param:
-------
@return:
"""
def get_node_id():
node_id = rab_config.load_package_config(
"rab_config.ini", "rab_distributed_system", "node_id")
return node_id
"""
@description: 获取此节点延迟
-------
@param:
-------
@return:
"""
def get_node_delay_time():
# 获取节点所需要等待的单位时间
node_delay_time = rab_config.load_package_config(
"rab_config.ini", "rab_distributed_system", "node_delay_time")
# 获取节点序号,除开 main 之外均需等待
node_no = get_node_id().split("_")[-1]
if ("main" == node_no.lower()):
return 0
elif(node_no.isdigit()):
return int(node_delay_time)*int(node_no)
else:
return 0
"""
@description: 系统状态类
-------
@param:
-------
@return:
"""
class r_status():
"""
@description: 初始化
-------
@param:
-------
@return:
"""
def __init__(self,
project,
subproject,
module=None,
function=None,
status=None,
start_time=None,
over_time=None,
result=None):
self.project = project
self.subproject = subproject
self.module = module
self.function = function
self.status = status
self.start_time = start_time
self.over_time = over_time
self.result = result
"""
@description: 更新
-------
@param:
-------
@return:
"""
def update(self, r_pgsql_driver):
update_sql = """
INSERT INTO
sa_status(
ss_project,
ss_subproject,
ss_method,
ss_function,
ss_status,
ss_start_time,
ss_over_time,
ss_result,
ss_history
) VALUES(
%s, %s, %s, %s, %s,
%s, %s, %s, '{}'
) ON CONFLICT(ss_project, ss_subproject, ss_method, ss_function)
DO UPDATE SET
ss_status = %s,
ss_start_time = %s,
ss_over_time = %s,
ss_result = %s
"""
data = [
self.project,
self.subproject,
self.module,
self.function,
self.status,
self.start_time,
self.over_time,
self.result,
self.status,
self.start_time,
self.over_time,
self.result
]
r_pgsql_driver.update(update_sql, data)
"""
@description: 保存运行结果
-------
@param:
-------
@return:
"""
def save(self, r_pgsql_driver):
update_sql = """
UPDATE
sa_status
SET
ss_history = ss_history || JSONB_BUILD_OBJECT(
TO_CHAR(ss_start_time AT TIME ZONE 'Asia/Shanghai',
'YYYY-MM-DD HH24:MI:SS'),
ss_result)
WHERE
ss_project = %s
AND ss_subproject = %s
AND ss_method = %s
AND ss_function = %s
"""
data = [self.project, self.subproject, self.module, self.function]
r_pgsql_driver.update(update_sql, data)
"""
@description: 方法开始
-------
@param:
-------
@return:
"""
def start(self, r_pgsql_driver, module=None, function=None):
if (module):
self.module = module
if (function):
self.function = function
self.status = "running"
self.start_time = datetime.now(timezone.utc)
self.over_time = None
self.result = None
self.update(r_pgsql_driver)
r_logger.info("方法 {} 开始!".format(self.function))
"""
@description: 方法结束
-------
@param:
-------
@return:
"""
def over(self, result, r_pgsql_driver, module=None, function=None):
if (module):
self.module = module
if (function):
self.function = function
self.status = "over"
# self.start_time = None
self.over_time = datetime.now(timezone.utc)
self.result = result
self.update(r_pgsql_driver)
self.save(r_pgsql_driver)
r_logger.info("方法 {} 结束!".format(self.function))
"""
@description: 方法出错
-------
@param:
-------
@return:
"""
def error(self, result, r_pgsql_driver, module=None, function=None):
if (module):
self.module = module
if (function):
self.function = function
self.status = "error"
# self.start_time = None
self.over_time = datetime.now(timezone.utc)
self.result = result
self.update(r_pgsql_driver)
r_logger.error("方法 {} 出错!".format(self.function))
"""
@description: 单体测试
-------
@param:
-------
@return:
"""
if __name__ == "__main__":
import time
r_pgsql_driver = rab_postgresql.r_pgsql_driver()
try:
r_status = r_status("test_project", "test_subproject", "test_module")
r_status.start(r_pgsql_driver, function="test_function")
time.sleep(10)
r_status.over("test_result", r_pgsql_driver, function="test_function")
except Exception as e:
r_logger.error("单体测试出错!")
r_logger.error(e)
finally:
r_pgsql_driver.close() | 24.330579 | 78 | 0.53142 |
b3915a0c9cc5e714ac12201e338bb4869209ccc3 | 929 | py | Python | grammar/scan.py | esevre/silvius | 4952f76e0cbbe7f9454264a31ceea9b9706bdf35 | [
"BSD-2-Clause"
] | null | null | null | grammar/scan.py | esevre/silvius | 4952f76e0cbbe7f9454264a31ceea9b9706bdf35 | [
"BSD-2-Clause"
] | null | null | null | grammar/scan.py | esevre/silvius | 4952f76e0cbbe7f9454264a31ceea9b9706bdf35 | [
"BSD-2-Clause"
] | null | null | null | # Lexer that produces a sequence of tokens (keywords + ANY).
import re
from lm import get_terminals
def find_keywords(parser):
global keywords
keywords = get_terminals(parser)
global noise
noise = set([x.lower() for x in ['[BREATH]', '[COUGH]', '[NOISE]', \
'[SMACK]', '[UH]', '[UM]', '<unk>']])
class Token:
def __init__(self, type, wordno=-1, extra=''):
self.type = type
self.extra = extra
self.wordno = wordno
def __cmp__(self, o):
return cmp(self.type, o)
def __repr__(self):
return str(self.type)
def scan(line):
tokens = []
wordno = 0
for t in line.lower().split():
if(t in noise):
pass
wordno += 1
if(t in keywords):
tokens.append(Token(t, wordno))
else:
tokens.append(Token('ANY', wordno, t))
tokens.append(Token('END'))
print tokens
return tokens
| 24.447368 | 72 | 0.564047 |
3147cdf72d2484c335b4ba65844019e490ee7008 | 61 | py | Python | demo01.py | kjhflakf/geng | d609992d03cee119798b5f9fba586debe3fbb7f6 | [
"MIT"
] | null | null | null | demo01.py | kjhflakf/geng | d609992d03cee119798b5f9fba586debe3fbb7f6 | [
"MIT"
] | null | null | null | demo01.py | kjhflakf/geng | d609992d03cee119798b5f9fba586debe3fbb7f6 | [
"MIT"
] | null | null | null | num1 = 10
num2 = 20
num3 = 123
num4 = 40
num5 = 50
num6 = 60
| 8.714286 | 10 | 0.606557 |
31799e3a29569d98d5970b7309ecfb35802b2189 | 1,420 | py | Python | setup.py | echan00/FARM | 2dab3362c749eaddccacfc71d3bcfee4753fd72a | [
"Apache-2.0"
] | 4 | 2020-07-22T02:22:52.000Z | 2021-12-27T22:26:37.000Z | setup.py | echan00/FARM | 2dab3362c749eaddccacfc71d3bcfee4753fd72a | [
"Apache-2.0"
] | null | null | null | setup.py | echan00/FARM | 2dab3362c749eaddccacfc71d3bcfee4753fd72a | [
"Apache-2.0"
] | 1 | 2021-04-07T19:30:02.000Z | 2021-04-07T19:30:02.000Z | from io import open
from setuptools import find_packages, setup
with open("requirements.txt") as f:
parsed_requirements = f.read().splitlines()
# remove blank lines and comments
parsed_requirements = [
x.strip()
for x in parsed_requirements
if ((x.strip()[0] != "#") and (len(x.strip()) > 3))
]
setup(
name="farm",
version="0.2.0",
author="Malte Pietsch, Timo Moeller, Branden Chan, Tanay Soni, Huggingface Team Authors, Google AI Language Team Authors, Open AI team Authors",
author_email="malte.pietsch@deepset.ai",
description="Toolkit for finetuning and evaluating transformer based language models",
long_description=open("readme.rst", "r", encoding="utf-8").read(),
long_description_content_type="text/x-rst",
keywords="BERT NLP deep learning language-model transformer",
license="Apache",
url="https://gitlab.com/deepset-ai/ml/lm/farm",
download_url="https://github.com/deepset-ai/FARM/archive/0.2.0.tar.gz",
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
install_requires=parsed_requirements,
python_requires=">=3.5.0",
tests_require=["pytest"],
classifiers=[
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
)
| 37.368421 | 148 | 0.68169 |
8d0ac90eb3e900ece99c8045324523eec91c875d | 11,103 | py | Python | rnn_train.py | qaz734913414/Real-time-dynamic-perception-of-facial-expression | 603ea8c18ce6c6d2c011416002b63eeace888a7c | [
"MIT"
] | 10 | 2019-07-19T10:59:06.000Z | 2019-10-14T12:57:31.000Z | rnn_train.py | jerrynpc/Real-time-dynamic-perception-of-facial-expression | 603ea8c18ce6c6d2c011416002b63eeace888a7c | [
"MIT"
] | null | null | null | rnn_train.py | jerrynpc/Real-time-dynamic-perception-of-facial-expression | 603ea8c18ce6c6d2c011416002b63eeace888a7c | [
"MIT"
] | 3 | 2019-07-24T06:01:55.000Z | 2019-10-14T13:22:28.000Z |
#Written by jerrynpc
#写于2019-6月 雄盛科技
from __future__ import print_function
import tensorflow as tf
from time import time
import numpy as np
from LSTM.setting import batch_size, width, height, rnn_size, out_size, channel, learning_rate, num_epoch
#########################################
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
import keras
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import LSTM
import matplotlib
import matplotlib.pyplot as plt
import tensorflow as tf
import keras.backend.tensorflow_backend as KTF
from keras.backend.tensorflow_backend import set_session
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
# 指定第一块GPU可用
os.environ["CUDA_VISIBLE_DEVICES"] = "0" #指定GPU的第二种方法
#os.environ["CUDA_VISIBLE_DEVICES"] = "1"
config = tf.ConfigProto()
config.gpu_options.allocator_type = 'BFC' #A "Best-fit with coalescing" algorithm, simplified from a version of dlmalloc.
config.gpu_options.per_process_gpu_memory_fraction = 0.8 #定量
config.gpu_options.allow_growth = True #按需
set_session(tf.Session(config=config))
#tf.ConfigProto()
#log_device_placement=True #是否打印设备分配日志
#allow_soft_placement=True #如果你指定的设备不存在,允许TF自动分配设备
'''
训练主函数
tensorboard --logdir=./
'''
def weight_variable(shape, w_alpha=0.01):
'''
增加噪音,随机生成权重
:param shape: 权重形状
:param w_alpha:随机噪声
:return:
'''
initial = w_alpha * tf.random_normal(shape)
return tf.Variable(initial)
def bias_variable(shape, b_alpha=0.1):
'''
增加噪音,随机生成偏置项
:param shape:权重形状
:param b_alpha:随机噪声
:return:
'''
initial = b_alpha * tf.random_normal(shape)
return tf.Variable(initial)
def rnn_graph(x, rnn_size, out_size, width, height, channel):
'''
循环神经网络计算图
:param x:输入数据
:param rnn_size:
:param out_size:
:param width:
:param height:
:return:
'''
# 权重及偏置
w = weight_variable([rnn_size, out_size])
b = bias_variable([out_size])
# LSTM
# rnn_size这里指BasicLSTMCell的num_units,指输出的向量维度
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(rnn_size)
# transpose的作用将(?,32,448,3)形状转为(32,?,448,3),?为batch-size,32为高,448为宽,3为通道数(彩图)
# 准备划分为32个相同网络,输入序列为(448,3),这样速度较快,逻辑较为符合一般思维
x = tf.transpose(x, [1,0,2,3])
# reshape -1 代表自适应,这里按照图像每一列的长度为reshape后的列长度
x = tf.reshape(x, [-1, channel*width])
# split默任在第一维即0 dimension进行分割,分割成height份,这里实际指把所有图片向量按对应行号进行重组
x = tf.split(x, height)
# 这里RNN会有与输入层相同数量的输出层,我们只需要最后一个输出
outputs, status = tf.nn.static_rnn(lstm_cell, x, dtype=tf.float32)
y_conv = tf.add(tf.matmul(outputs[-1], w), b)
return y_conv
def accuracy_graph(y, y_conv):
'''
偏差计算图
:param y:
:param y_conv:
:return:
'''
correct = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
return accuracy
def get_batch(image_list,label_list,img_width,img_height,batch_size,capacity,channel):
'''
#通过读取列表来载入批量图片及标签
:param image_list: 图片路径list
:param label_list: 标签list
:param img_width: 图片宽度
:param img_height: 图片高度
:param batch_size:
:param capacity:
:return:
'''
image = tf.cast(image_list,tf.string)
label = tf.cast(label_list,tf.int32)
input_queue = tf.train.slice_input_producer([image,label],shuffle=True)
label = input_queue[1]
image_contents = tf.read_file(input_queue[0])
image = tf.image.decode_jpeg(image_contents,channels=channel)
image = tf.cast(image,tf.float32)
if channel==3:
image -= [42.79902,42.79902,42.79902] # 减均值
elif channel == 1:
image -= 42.79902 # 减均值
image.set_shape((img_height,img_width,channel))
image_batch,label_batch = tf.train.batch([image,label],batch_size=batch_size,num_threads=64,capacity=capacity)
label_batch = tf.reshape(label_batch,[batch_size])
return image_batch,label_batch
def get_file(file_dir):
'''
通过文件路径获取图片路径及标签
:param file_dir: 文件路径
:return:
'''
images = []
for root,sub_folders,files in os.walk(file_dir):
for name in files:
images.append(os.path.join(root,name))
labels = []
for label_name in images:
letter = label_name.split("\\")[-2]
if letter =="anger":labels.append(00)
elif letter =="contempt":labels.append(11)
elif letter == "disgust":labels.append(22)
elif letter == "fear":labels.append(33)
elif letter == "happy":labels.append(44)
elif letter == "normal":labels.append(55)
elif letter == "sad":labels.append(66)
elif letter == "surprised":labels.append(77)
labels.append(8)
print("check for get_file:",images[0],"label is ",labels[0])
#shuffle
temp = np.array([images,labels])
temp = temp.transpose()
np.random.shuffle(temp)
image_list = list(temp[:,0])
label_list = list(temp[:,1])
label_list = [int(float(i)) for i in label_list]
return image_list,label_list
#标签格式重构
def onehot(labels):
n_sample = len(labels)
n_class = 9 # max(labels) + 1
onehot_labels = np.zeros((n_sample,n_class))
onehot_labels[np.arange(n_sample),labels] = 1
return onehot_labels
if __name__ == '__main__':
startTime = time()
# 按照图片大小申请占位符
x = tf.placeholder(tf.float32, [None, height, width, channel])
y = tf.placeholder(tf.float32)
# rnn模型
y_conv = rnn_graph(x, rnn_size, out_size, width, height, channel)
# 独热编码转化
y_conv_prediction = tf.argmax(y_conv, 1)
y_real = tf.argmax(y, 1)
# 优化计算图
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_conv, labels=y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# 偏差
accuracy = accuracy_graph(y, y_conv)
# 自训练图像
xs, ys = get_file('./dataset/train/') # 获取图像列表与标签列表
image_batch, label_batch = get_batch(xs, ys, img_width=width, img_height=height, batch_size=batch_size, capacity=256,channel=channel)
# 验证集
xs_val, ys_val = get_file('./dataset/validation/') # 获取图像列表与标签列表
image_val_batch, label_val_batch = get_batch(xs_val, ys_val, img_width=width, img_height=height,batch_size=455, capacity=256,channel=channel)
# 启动会话.开始训练
sess = tf.Session()
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
# 启动线程
coord = tf.train.Coordinator() # 使用协调器管理线程
threads = tf.train.start_queue_runners(coord=coord, sess=sess)
# 日志记录
summary_writer = tf.summary.FileWriter('./logs/', graph=sess.graph, flush_secs=15)
summary_writer2 = tf.summary.FileWriter('./logs/plot2/', flush_secs=15)
tf.summary.scalar(name='loss_func', tensor=loss)
tf.summary.scalar(name='accuracy', tensor=accuracy)
merged_summary_op = tf.summary.merge_all()
step = 0
acc_rate = 0.98
epoch_start_time = time()
for i in range(num_epoch):
batch_x, batch_y = sess.run([image_batch, label_batch])
batch_y = onehot(batch_y)
merged_summary,_,loss_show = sess.run([merged_summary_op,optimizer,loss], feed_dict={x: batch_x, y: batch_y})
summary_writer.add_summary(merged_summary, global_step=i)
if i % (int(7000//batch_size)) == 0:
batch_x_test, batch_y_test = sess.run([image_val_batch, label_val_batch])
batch_y_test = onehot(batch_y_test)
batch_x_test = batch_x_test.reshape([-1, height, width, channel])
merged_summary_val,acc,prediction_val_out,real_val_out,loss_show = sess.run([merged_summary_op,accuracy,y_conv_prediction,y_real,loss],feed_dict={x: batch_x_test, y: batch_y_test})
summary_writer2.add_summary(merged_summary_val, global_step=i)
# 输出每个类别正确率
lh1_right, lh2_right, lh3_right, lh4_right, lh5_right, lh6_right, lh7_right = 0, 0, 0, 0, 0, 0, 0
lh1_wrong, lh2_wrong, lh3_wrong, lh4_wrong, lh5_wrong, lh6_wrong, lh7_wrong = 0, 0, 0, 0, 0, 0, 0
for ii in range(len(prediction_val_out)):
if prediction_val_out[ii] == real_val_out[ii]:
if real_val_out[ii] == 0:
lh1_right += 1
elif real_val_out[ii] == 1:
lh2_right += 1
elif real_val_out[ii] == 2:
lh3_right += 1
elif real_val_out[ii] == 3:
lh4_right += 1
elif real_val_out[ii] == 4:
lh5_right += 1
elif real_val_out[ii] == 5:
lh6_right += 1
elif real_val_out[ii] == 6:
lh7_right += 1
else:
if real_val_out[ii] == 0:
lh1_wrong += 1
elif real_val_out[ii] == 1:
lh2_wrong += 1
elif real_val_out[ii] == 2:
lh3_wrong += 1
elif real_val_out[ii] == 3:
lh4_wrong += 1
elif real_val_out[ii] == 4:
lh5_wrong += 1
elif real_val_out[ii] == 5:
lh6_wrong += 1
elif real_val_out[ii] == 6:
lh7_wrong += 1
print(step, "correct rate :", ((lh1_right) / (lh1_right + lh1_wrong)), ((lh2_right) / (lh2_right + lh2_wrong)),
((lh3_right) / (lh3_right + lh3_wrong)), ((lh4_right) / (lh4_right + lh4_wrong)),
((lh5_right) / (lh5_right + lh5_wrong)), ((lh6_right) / (lh6_right + lh6_wrong)),
((lh7_right) / (lh7_right + lh7_wrong)))
print(step, "准确的估计准确率为",(((lh1_right) / (lh1_right + lh1_wrong))+((lh2_right) / (lh2_right + lh2_wrong))+
((lh3_right) / (lh3_right + lh3_wrong))+((lh4_right) / (lh4_right + lh4_wrong))+
((lh5_right) / (lh5_right + lh5_wrong))+((lh6_right) / (lh6_right + lh6_wrong))+
((lh7_right) / (lh7_right + lh7_wrong)))/7)
epoch_end_time = time()
print("takes time:",(epoch_end_time-epoch_start_time), ' step:', step, ' accuracy:', acc," loss_fun:",loss_show)
epoch_start_time = epoch_end_time
# 偏差满足要求,保存模型
if acc >= acc_rate:
model_path = os.getcwd() + os.sep + '\models\\'+str(acc_rate) + "LSTM.model"
saver.save(sess, model_path, global_step=step)
break
if step % 10 == 0 and step != 0:
model_path = os.getcwd() + os.sep + '\models\\' + str(acc_rate)+ "LSTM"+str(step)+".model"
print(model_path)
saver.save(sess, model_path, global_step=step)
step += 1
duration = time() - startTime
print("total takes time:",duration)
summary_writer.close()
coord.request_stop() # 通知线程关闭
coord.join(threads) # 等其他线程关闭这一函数才返回
| 37.894198 | 192 | 0.627578 |
c558cd161d971289c2d702f86b9a33b6c1e4b041 | 335 | py | Python | fire.py | v0lkan/learning-salt | 6b13b3a4c474b851568ecb77267c9860db909c2c | [
"MIT"
] | null | null | null | fire.py | v0lkan/learning-salt | 6b13b3a4c474b851568ecb77267c9860db909c2c | [
"MIT"
] | null | null | null | fire.py | v0lkan/learning-salt | 6b13b3a4c474b851568ecb77267c9860db909c2c | [
"MIT"
] | null | null | null | import salt.utils.event
sock_dir = '/var/run/salt/master'
payload = {'sample_msg': 'This is a test.'}
event = salt.utils.event.SaltEvent('master', sock_dir)
event.fire_event(payload, 'salt/mycustomtag')
# other file
import salt.client
caller = salt.client.Caller()
caller.function('event.send', 'salt/mycustomtag', {'foo': 'bar'}) | 23.928571 | 65 | 0.722388 |
1a57dc0e3424dc80e4f2a4e6702468079146dde7 | 96 | py | Python | venv/lib/python3.8/site-packages/poetry/core/pyproject/exceptions.py | Retraces/UkraineBot | 3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71 | [
"MIT"
] | 2 | 2022-03-13T01:58:52.000Z | 2022-03-31T06:07:54.000Z | venv/lib/python3.8/site-packages/poetry/core/pyproject/exceptions.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | 19 | 2021-11-20T04:09:18.000Z | 2022-03-23T15:05:55.000Z | venv/lib/python3.8/site-packages/poetry/core/pyproject/exceptions.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | null | null | null | /home/runner/.cache/pip/pool/9e/48/38/c6f60827bedacc0441f5955c7a40fcfa1ac3439b28fdaa1773deb1f24b | 96 | 96 | 0.895833 |
560b072990fa87de3f2708a5bab7b5cf13482f85 | 5,356 | py | Python | speech/api/speech_streaming.py | ettorerizza/python-docs-samples | 439ca4c552940284743f5f22a590cc4b6dae1bef | [
"Apache-2.0"
] | 1 | 2018-09-24T04:54:26.000Z | 2018-09-24T04:54:26.000Z | speech/api/speech_streaming.py | DalavanCloud/python-docs-samples | 439ca4c552940284743f5f22a590cc4b6dae1bef | [
"Apache-2.0"
] | null | null | null | speech/api/speech_streaming.py | DalavanCloud/python-docs-samples | 439ca4c552940284743f5f22a590cc4b6dae1bef | [
"Apache-2.0"
] | 1 | 2018-09-24T04:53:12.000Z | 2018-09-24T04:53:12.000Z | #!/usr/bin/python
# Copyright (C) 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sample that streams audio to the Google Cloud Speech API via GRPC."""
from __future__ import division
import contextlib
import re
import threading
from gcloud.credentials import get_credentials
from google.cloud.speech.v1beta1 import cloud_speech_pb2 as cloud_speech
from google.rpc import code_pb2
from grpc.beta import implementations
import pyaudio
# Audio recording parameters
RATE = 16000
CHANNELS = 1
CHUNK = int(RATE / 10) # 100ms
# Keep the request alive for this many seconds
DEADLINE_SECS = 8 * 60 * 60
SPEECH_SCOPE = 'https://www.googleapis.com/auth/cloud-platform'
def make_channel(host, port):
"""Creates an SSL channel with auth credentials from the environment."""
# In order to make an https call, use an ssl channel with defaults
ssl_channel = implementations.ssl_channel_credentials(None, None, None)
# Grab application default credentials from the environment
creds = get_credentials().create_scoped([SPEECH_SCOPE])
# Add a plugin to inject the creds into the header
auth_header = (
'Authorization',
'Bearer ' + creds.get_access_token().access_token)
auth_plugin = implementations.metadata_call_credentials(
lambda _, cb: cb([auth_header], None),
name='google_creds')
# compose the two together for both ssl and google auth
composite_channel = implementations.composite_channel_credentials(
ssl_channel, auth_plugin)
return implementations.secure_channel(host, port, composite_channel)
# [START audio_stream]
@contextlib.contextmanager
def record_audio(channels, rate, chunk):
"""Opens a recording stream in a context manager."""
audio_interface = pyaudio.PyAudio()
audio_stream = audio_interface.open(
format=pyaudio.paInt16, channels=channels, rate=rate,
input=True, frames_per_buffer=chunk,
)
yield audio_stream
audio_stream.stop_stream()
audio_stream.close()
audio_interface.terminate()
# [END audio_stream]
def request_stream(stop_audio, channels=CHANNELS, rate=RATE, chunk=CHUNK):
"""Yields `StreamingRecognizeRequest`s constructed from a recording audio
stream.
Args:
stop_audio: A threading.Event object stops the recording when set.
channels: How many audio channels to record.
rate: The sampling rate.
chunk: Buffer audio into chunks of this size before sending to the api.
"""
# The initial request must contain metadata about the stream, so the
# server knows how to interpret it.
recognition_config = cloud_speech.RecognitionConfig(
encoding='LINEAR16', sample_rate=rate)
streaming_config = cloud_speech.StreamingRecognitionConfig(
config=recognition_config,
# Note that setting interim_results to True means that you'll likely
# get multiple results for the same bit of audio, as the system
# re-interprets audio in the context of subsequent audio. However, this
# will give us quick results without having to tell the server when to
# finalize a piece of audio.
interim_results=True, single_utterance=True
)
yield cloud_speech.StreamingRecognizeRequest(
streaming_config=streaming_config)
with record_audio(channels, rate, chunk) as audio_stream:
while not stop_audio.is_set():
data = audio_stream.read(chunk)
if not data:
raise StopIteration()
# Subsequent requests can all just have the content
yield cloud_speech.StreamingRecognizeRequest(audio_content=data)
def listen_print_loop(recognize_stream):
for resp in recognize_stream:
if resp.error.code != code_pb2.OK:
raise RuntimeError('Server error: ' + resp.error.message)
# Display the transcriptions & their alternatives
for result in resp.results:
print(result.alternatives)
# Exit recognition if any of the transcribed phrases could be
# one of our keywords.
if any(re.search(r'\b(exit|quit)\b', alt.transcript)
for result in resp.results
for alt in result.alternatives):
print('Exiting..')
return
def main():
stop_audio = threading.Event()
with cloud_speech.beta_create_Speech_stub(
make_channel('speech.googleapis.com', 443)) as service:
try:
listen_print_loop(
service.StreamingRecognize(
request_stream(stop_audio), DEADLINE_SECS))
finally:
# Stop the request stream once we're done with the loop - otherwise
# it'll keep going in the thread that the grpc lib makes for it..
stop_audio.set()
if __name__ == '__main__':
main()
| 35.706667 | 79 | 0.702763 |
6d5b62bef5f761457d3f896675d1939de79057db | 670 | py | Python | 2021/Day 02/day02.py | nicoloverardo/aoc | 3645c6826880fa3c86686a84cf715796a785cc5b | [
"BSD-3-Clause"
] | null | null | null | 2021/Day 02/day02.py | nicoloverardo/aoc | 3645c6826880fa3c86686a84cf715796a785cc5b | [
"BSD-3-Clause"
] | null | null | null | 2021/Day 02/day02.py | nicoloverardo/aoc | 3645c6826880fa3c86686a84cf715796a785cc5b | [
"BSD-3-Clause"
] | null | null | null | import pandas as pd
df = pd.read_csv("2021/Day 02/input.txt", sep=" ", header=None, names=["move", "val"])
# Part 1
df.loc[df.move == "up", "val"] *= -1
df.move = df.move.replace({"down": "depth", "up": "depth"})
dfg = df.groupby("move").agg({"val": sum})
print(dfg.val["depth"] * dfg.val["forward"])
# Part 2
# Cumsum helps us to avoid a for loop.
df["aim"] = df[df.move == "depth"]["val"].cumsum()
df["hor"] = df[df.move == "forward"]["val"].cumsum()
df.aim = df.aim.fillna(method="ffill").fillna(0)
df.hor = df.hor.fillna(method="ffill")
df["d"] = (df[df.move == "forward"]["val"] * df[df.move == "forward"]["aim"]).cumsum()
print(df.iloc[-1].hor * df.iloc[-1].d)
| 35.263158 | 86 | 0.595522 |
f2963e564df868682ff9c509e0228d88dfc986d0 | 1,278 | py | Python | root/gzip_big_files.py | transitanalystisarel/TransitAnalystIsrael | 341de9272b352c18333ff136a00de0b97cd82216 | [
"MIT"
] | null | null | null | root/gzip_big_files.py | transitanalystisarel/TransitAnalystIsrael | 341de9272b352c18333ff136a00de0b97cd82216 | [
"MIT"
] | null | null | null | root/gzip_big_files.py | transitanalystisarel/TransitAnalystIsrael | 341de9272b352c18333ff136a00de0b97cd82216 | [
"MIT"
] | 3 | 2019-05-08T04:36:03.000Z | 2020-11-23T19:46:52.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# gzip big data files for upload to cloud
#
import transitanalystisrael_config as cfg
import shutil
import os
from pathlib import Path
cwd = Path.cwd()
print('********** gzip big data files for upload to cloud *************')
if cfg.get_service_date == 'auto' :
gzip_dir = cwd.parent / cfg.websitelocalcurrentpath
else : # cfg.get_service_date == 'on_demand'
gzip_dir = cwd.parent / cfg.websitelocalondemandpath.replace('yyyymmdd', cfg.gtfsdate)
print(gzip_dir)
os.chdir(gzip_dir)
toolslist = ['lines_on_street', 'line_freq', 'muni_fairsharescore', 'muni_score_lists_and_charts', 'muni_tpd_per_line', 'muni_transitscore', 'stops_near_trainstops_editor', 'tpd_at_stops_per_line', 'tpd_near_trainstops_per_line', 'transit_time_map', 'transitscore']
for tooldir in toolslist:
print('# ',tooldir)
tooldirfilelist = os.listdir(gzip_dir / tooldir)
for filename in tooldirfilelist :
print(filename)
#print (os.path.getsize(gzip_dir / tooldir / filename))
filepath = gzip_dir / tooldir / filename
filesize = os.path.getsize(filepath)
if filename.endswith(".js") and filesize > int(cfg.bigjs2gzip) :
print(' ',filepath, filesize)
os.system('gzip -9 -k -f ' + filepath.as_posix())
print(os.listdir(gzip_dir))
| 33.631579 | 265 | 0.730829 |
7c2110c352ab6abad614b07d65b483c3de802f22 | 12,110 | py | Python | core/models.py | alexander-koval/bctip | 60b0eeaf4a62123af5c0f4d65aa184e23c9910db | [
"MIT"
] | null | null | null | core/models.py | alexander-koval/bctip | 60b0eeaf4a62123af5c0f4d65aa184e23c9910db | [
"MIT"
] | null | null | null | core/models.py | alexander-koval/bctip | 60b0eeaf4a62123af5c0f4d65aa184e23c9910db | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import json
from decimal import Decimal
from urllib.request import urlopen
import shortuuid
from django.conf import settings
from django.core.cache import cache
from django.db import models
from django.utils.translation import ugettext as _
from lnurl import Lnurl, LnurlWithdrawResponse, encode as lnurl_encode
from jsonrpc import ServiceProxy
BITCOIND = ServiceProxy(settings.BITCOIND_CONNECTION_STRING)
"""
access.getbalance()
access.getinfo()
settxfee(0.00001)
a.listaccounts()
a.listtransactions(acc)
"""
# 1 usd in rub
CURRENCY_RATES = {'USD': 1, 'EUR': 0.85, 'GBP': 0.75, 'SEK': 8.5, 'RUB': 60.0}
CURRENCY_SIGNS = {'USD': '$', 'EUR': '€', 'GBP': '£', 'SEK': 'kr'}
MESSAGES = {}
THANKS_MESSAGE = _("Thank you for your service!")
# Translators: Please provide youtube video about bitcoin in your language
LOCALIZED_YOUTUBE_URL = _("www.youtube.com/embed/Gc2en3nHxA4")
class Wallet(models.Model):
key = models.CharField(max_length=64) # secret
ctime = models.DateTimeField(auto_now_add=True)
# activation time (paid)
atime = models.DateTimeField(null=True, blank=True)
ip = models.GenericIPAddressField(null=True, blank=True)
ua = models.CharField(max_length=255, null=True, blank=True)
bcaddr = models.CharField(max_length=90) # pay to
bcaddr_from = models.CharField(
max_length=90, null=True, blank=True) # paid from
# target country: AU, US, RU. none for universal
audience = models.CharField(max_length=2, null=True, blank=True)
# amount of every tip, ex.:$2
divide_by = models.DecimalField(
decimal_places=8, max_digits=16, blank=True, null=True, default=0)
divide_currency = models.CharField(
max_length=3, default="USD") # iso currency code
quantity = models.SmallIntegerField(
blank=True, null=True) # how many
template = models.CharField(
max_length=34, default="001-original.odt")
template_back = models.CharField(max_length=34, default="0000-default.odt")
target_language = models.CharField(max_length=3, default="en")
# custom message
message = models.CharField(max_length=128, null=True, blank=True)
# donation fee in percents
price = models.DecimalField(decimal_places=2, max_digits=5, default="0")
# tags for statistics
hashtag = models.CharField(max_length=40, null=True, blank=True)
# order print and post?
print_and_post = models.BooleanField(default=False)
# price of 1 BTC in target currency on this date
rate = models.DecimalField(
decimal_places=2, max_digits=10, blank=True, null=True, default=0)
balance = models.BigIntegerField(null=True, blank=True)
invoice = models.BigIntegerField(null=True, blank=True)
activated = models.BooleanField(default=False)
# expiration in days
expiration = models.IntegerField(null=True, blank=True)
src_site = models.SmallIntegerField(
blank=True, null=True, default=0) # for custom
email = models.CharField(max_length=64, null=True, blank=True)
fee = models.DecimalField(
decimal_places=8, max_digits=10, blank=True, null=True)
@property
def balance_nbtc(self):
return self.balance / 100.0 # 1e3
@property
def balance_mbtc(self):
return self.balance / 100000.0 # 1e5
@property
def balance_btc(self):
if self.balance:
return self.balance / 100000000.0 or None # 1e8
else:
return None
@property
def fee_float(self):
if self.fee:
return float(self.fee)
else:
return 0.00001
@property
def txfee_float(self):
return round(self.fee_float * 3, 6)
@property
def invoice_btc(self):
if self.invoice is not None:
return self.invoice / 100000000.0 # 1e8
else:
return None
@property
def bcaddr_uri(self):
return "lightning:%s?amount=%s&label=bctip.org" % (
self.bcaddr, self.invoice_btc)
@property
def divide_currency_sign(self):
return CURRENCY_SIGNS[self.divide_currency]
def __unicode__(self):
return u"%s" % (self.key)
def get_absolute_url(self):
return u"/w/%s/" % self.key
def get_account(self):
return "%s_%s" % (self.id, self.key[-6:])
def activated_tips(self):
return Tip.objects.filter(wallet=self, activated=True).count()
@property
def rate_fiat(self):
return int(self.rate * Decimal(CURRENCY_RATES[self.divide_currency]))
class Address(models.Model):
wallet = models.ForeignKey(Wallet, on_delete=models.CASCADE)
country = models.CharField(max_length=64, default="USA")
state = models.CharField(max_length=64, null=True, blank=True)
address1 = models.CharField("Address line 1", max_length=64)
address2 = models.CharField(
"Address line 2", max_length=64, null=True, blank=True)
city = models.CharField(max_length=64, blank=False)
postal_code = models.CharField("Postal Code", max_length=32)
def __unicode__(self):
return u"%s" % (self.country, self.city, self.address1)
def get_absolute_url(self):
return u"/admin/core/address/%d/" % self.id
class Tip(models.Model):
wallet = models.ForeignKey(
Wallet, null=True, blank=True, on_delete=models.CASCADE)
key = models.CharField(max_length=64, null=True, blank=True)
ctime = models.DateTimeField(
auto_now_add=True, null=True, blank=True)
# activation
atime = models.DateTimeField(null=True, blank=True)
# expiration
etime = models.DateTimeField(null=True, blank=True)
ip = models.GenericIPAddressField(null=True, blank=True) # пока не исп
ua = models.CharField(max_length=255, null=True, blank=True) # user agent
balance = models.BigIntegerField(
blank=True, null=True, default=0)
miniid = models.CharField(max_length=4)
comment = models.CharField(max_length=40, default="")
comment_time = models.DateTimeField(null=True, blank=True)
activated = models.BooleanField(default=False)
expired = models.BooleanField(default=False)
bcaddr = models.CharField(max_length=300, null=True, blank=True) # TODO DEPRECATED
txid = models.CharField(max_length=64, null=True, blank=True)
# tip page visit counter
times = models.IntegerField(null=True, blank=True, default=0)
def __unicode__(self):
return u"%s: %s" % (self.wallet, self.balance_btc)
def get_absolute_url(self):
domain = "https://www.bctip.org"
return "%s/%s/" % (domain, self.key)
@property
def balance_nbtc(self):
return self.balance / 100.0 # 1e3
@property
def balance_mbtc(self):
return self.balance / 100000.0 # 1e5
@property
def balance_btc(self):
return self.balance / 100000000.0 # 1e8
@property
def balance_usd(self):
return round(self.balance_btc * get_avg_rate(), 2)
@property
def balance_eur(self):
return round(self.balance_btc * get_avg_rate_euro(), 2)
@property
def balance_fiat(self):
fiat = Decimal(self.balance_usd) * \
Decimal(CURRENCY_RATES[self.wallet.divide_currency])
return round(fiat, 2)
class Payment(models.Model):
wallet = models.ForeignKey(
Wallet, null=True, blank=True, on_delete=models.CASCADE)
checking_id = models.CharField(max_length=90)
payment_request = models.CharField(max_length=90)
payment_hash = models.CharField(max_length=90)
memo = models.TextField(null=True, blank=True)
amount = models.IntegerField(default=0)
fee = models.IntegerField(default=0)
preimage = models.CharField(max_length=90)
pending = models.BooleanField(default=True)
extra = models.JSONField(blank=True, null=True)
webhook = models.CharField(max_length=90, blank=True, null=True)
@property
def msat(self) -> int:
return self.amount
@property
def sat(self) -> int:
return self.amount // 1000
@property
def is_in(self) -> bool:
return self.amount > 0
@property
def is_out(self) -> bool:
return self.amount < 0
class WithdrawLink(models.Model):
# uuid = models.CharField(max_length=90)
wallet = models.ForeignKey(Wallet, null=True, blank=True, on_delete=models.SET_NULL)
title = models.CharField(max_length=255)
min_withdrawable = models.IntegerField()
max_withdrawable = models.IntegerField()
uses = models.IntegerField()
wait_time = models.IntegerField()
is_unique = models.BooleanField()
unique_hash = models.TextField()
k1 = models.TextField()
open_time = models.IntegerField()
used = models.IntegerField(default=0)
uses_csv = models.TextField()
number = 0
@property
def is_spent(self) -> bool:
return self.used >= self.uses
@property
def lnurl(self) -> Lnurl:
if self.is_unique:
uses_csv = self.uses_csv.split(",")
to_hash = self.unique_hash + uses_csv[self.number]
multi_hash = shortuuid.uuid(name=to_hash)
# TODO url_for()
url = f"withdraw.lnurl_multi_response?" \
f"unique_hash={self.unique_hash},id_unique_hash={multi_hash}"
else:
url = f"withdraw.lnurl_response?" \
f"unique_hash={self.unique_hash}"
full_url = "https://127.0.0.1:8443/en/" + url
return lnurl_encode(full_url)
@property
def lnurl_response(self):
url = f"withdraw.lnurl_callback?unique_hash={self.unique_hash}"
return LnurlWithdrawResponse(
callback=url,
k1=self.k1,
min_withdrawable=self.min_withdrawable * 1000,
max_withdrawable=self.max_withdrawable * 1000,
default_description=self.title
)
def get_avg_rate():
rate = get_bitstamp_avg_rate()
if rate:
return rate
rate = get_coinbase_avg_rate()
if rate:
return rate
return 770.0 # if everything failed
def get_avg_rate_euro():
rate = get_avg_rate()
return int(rate * CURRENCY_RATES['EUR'])
"""
def get_mtgox_avg_rate():
try:
mtgox = urlopen("https://data.mtgox.com/api/1/BTCUSD/ticker", timeout=5).read()
mtgox = json.loads(mtgox)
return float(mtgox['return']['avg']['value'])
except:
return None
"""
def get_btce_avg_rate(force=False):
key = 'avg_rate_btce'
rate = cache.get(key)
if rate and not force:
return rate
try:
btce = urlopen(
"https://btc-e.com/api/2/btc_usd/ticker", timeout=4).read()
btce = json.loads(btce)
rate = float(btce['ticker']['avg'])
cache.set(key, rate, 60 * 60) # cache for an hour
return rate
except:
return None
def get_coinbase_avg_rate(force=False):
key = 'avg_rate__coinbase'
rate = cache.get(key)
if rate and not force:
return rate
try:
coinbase = urlopen(
"https://coinbase.com/api/v1/prices/buy", timeout=4).read()
coinbase = json.loads(coinbase)
rate = float(coinbase['total']['amount'])
cache.set(key, rate, 60 * 60) # cache for an hour
return rate
except:
return None
def get_bitstamp_avg_rate(force=False):
key = 'avg_rate__bitstamp'
rate = cache.get(key)
if rate and not force:
return rate
try:
bitstamp = urlopen(
"https://www.bitstamp.net/api/ticker/", timeout=4).read()
bitstamp = json.loads(bitstamp)
rate = int((float(bitstamp['high']) + float(bitstamp['low'])) / 2.0)
cache.set(key, rate, 60 * 60) # cache for an hour
return rate
except:
return None
def get_est_fee(force=False):
return float(0)
key = 'est_fee'
fee = cache.get(key)
if not fee or force:
fee = BITCOIND.estimatesmartfee(6 * 24)['feerate']
fee = round(fee / 3, 8)
cache.set(key, fee, 60 * 60)
return fee
# http://127.0.0.1:8000/ru/w/57W68phEpNUgoJtUXTk8tLsnCSpDCziiq/
| 32.207447 | 88 | 0.654996 |
08752675066900c89c1b1d699c0be3d652e4d8c5 | 2,542 | py | Python | Notebooks/dishonest_casino.py | willyrv/projet2MIC | f57a4c61a1a463fec37518c0e52686ec9645fdcf | [
"Unlicense"
] | 1 | 2020-06-19T13:04:10.000Z | 2020-06-19T13:04:10.000Z | Notebooks/dishonest_casino.py | willyrv/projet2MIC | f57a4c61a1a463fec37518c0e52686ec9645fdcf | [
"Unlicense"
] | null | null | null | Notebooks/dishonest_casino.py | willyrv/projet2MIC | f57a4c61a1a463fec37518c0e52686ec9645fdcf | [
"Unlicense"
] | null | null | null | """
python implementation of the dishonest casino example.
"""
import random
import bisect
import numpy as np
class Dice:
"""
A dice with arbitrary probability (not necessary a fair dice)
"""
def __init__(self, prob_vector):
"""
:type prob_vector: list
"""
if (min(prob_vector) < 0) | (not np.isclose(sum(prob_vector), 1)):
raise ValueError('Invalid probability vector')
self.prob_vector = [sum(prob_vector[:i+1])
for i in range(len(prob_vector))]
def roll(self, n):
"""
Roll the dice
:type n: int
"""
rand_values = [random.random() for _ in range(n)]
dice_values = [bisect.bisect_right(self.prob_vector, i) + 1
for i in rand_values]
return dice_values
def dishonest_casino_play(n, fair_prob=(1./6, 1./6, 1./6, 1./6, 1./6, 1./6),
unfair_prob=(0.1, 0.1, 0.1, 0.1, 0.1, 0.5),
prob_switch_to_unfair=0.05,
prob_switch_to_fair=0.1,
initial_dice=0):
"""
Simulate n rolls of dice
:param n: the number of rolls
:param fair_prob: list, the probabilities for the fair dice
:param unfair_prob: list, the probabilities for the loaded dice
:param prob_switch_to_unfair: float, probability of switch from
fair to loaded
:param prob_switch_to_fair: float, probability of switch from
loaded to fair
:param initial_dice: the first dice used (0 for fair dice)
:return: a tuple with two list
(i.e. the sequence of dices used, hidden states and
the values obtained, observations)
"""
fair_dice = Dice(fair_prob)
loaded_dice = Dice(unfair_prob)
available_dices = (fair_dice, loaded_dice)
used_dices = [initial_dice]
values = []
for _ in range(n):
current_dice = available_dices[used_dices[-1]]
values.append(current_dice.roll(1)[0])
# Switch dices with some probability
if used_dices[-1] == 0:
# Switch to unfair dice with probability equal
# to prob_switch_to_unfair
used_dices.append(int(random.random() < prob_switch_to_unfair))
else:
# Switch to fair dice with probability equal
# to prob_switch_to_fair
used_dices.append(int(random.random() >= prob_switch_to_fair))
return used_dices[:-1], values
| 35.305556 | 76 | 0.586939 |
0f51fa3cc2d72dc188c779a7324b63b5fdc64f47 | 6,123 | py | Python | examples/VPC_With_VPN_Connection.py | jpvowen/troposphere | 6a9efa7717db75905b846a9f3aafd092f55c7925 | [
"BSD-2-Clause"
] | 1 | 2021-04-03T22:24:36.000Z | 2021-04-03T22:24:36.000Z | examples/VPC_With_VPN_Connection.py | jpvowen/troposphere | 6a9efa7717db75905b846a9f3aafd092f55c7925 | [
"BSD-2-Clause"
] | null | null | null | examples/VPC_With_VPN_Connection.py | jpvowen/troposphere | 6a9efa7717db75905b846a9f3aafd092f55c7925 | [
"BSD-2-Clause"
] | 5 | 2020-05-10T13:50:32.000Z | 2021-09-09T09:06:54.000Z | # Converted from VPC_With_VPN_Connection.template located at:
# http://aws.amazon.com/cloudformation/aws-cloudformation-templates/
from troposphere import Join, Output
from troposphere import Parameter, Ref, Tags, Template
from troposphere.ec2 import PortRange
from troposphere.ec2 import NetworkAcl
from troposphere.ec2 import Route
from troposphere.ec2 import VPCGatewayAttachment
from troposphere.ec2 import SubnetRouteTableAssociation
from troposphere.ec2 import Subnet
from troposphere.ec2 import CustomerGateway
from troposphere.ec2 import VPNConnectionRoute
from troposphere.ec2 import RouteTable
from troposphere.ec2 import VPC
from troposphere.ec2 import NetworkAclEntry
from troposphere.ec2 import VPNGateway
from troposphere.ec2 import SubnetNetworkAclAssociation
from troposphere.ec2 import VPNConnection
t = Template()
t.add_version("2010-09-09")
t.set_description("""\
AWS CloudFormation Sample Template VPC_With_VPN_Connection.template: \
Sample template showing how to create a private subnet with a VPN connection \
using static routing to an existing VPN endpoint. NOTE: The VPNConnection \
created will define the configuration you need yonk the tunnels to your VPN \
endpoint - you can get the VPN Gateway configuration from the AWS Management \
console. You will be billed for the AWS resources used if you create a stack \
from this template.""")
VPNAddress = t.add_parameter(Parameter(
"VPNAddress",
Type="String",
Description="IP Address of your VPN device",
MinLength="7",
AllowedPattern=r"(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})",
MaxLength="15",
ConstraintDescription="must be a valid IP address of the form x.x.x.x",
))
OnPremiseCIDR = t.add_parameter(Parameter(
"OnPremiseCIDR",
ConstraintDescription=(
"must be a valid IP CIDR range of the form x.x.x.x/x."),
Description="IP Address range for your existing infrastructure",
Default="10.0.0.0/16",
MinLength="9",
AllowedPattern=r"(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})/(\d{1,2})",
MaxLength="18",
Type="String",
))
VPCCIDR = t.add_parameter(Parameter(
"VPCCIDR",
ConstraintDescription=(
"must be a valid IP CIDR range of the form x.x.x.x/x."),
Description="IP Address range for the VPN connected VPC",
Default="10.1.0.0/16",
MinLength="9",
AllowedPattern=r"(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})/(\d{1,2})",
MaxLength="18",
Type="String",
))
SubnetCIDR = t.add_parameter(Parameter(
"SubnetCIDR",
ConstraintDescription=(
"must be a valid IP CIDR range of the form x.x.x.x/x."),
Description="IP Address range for the VPN connected Subnet",
Default="10.1.0.0/24",
MinLength="9",
AllowedPattern=r"(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})/(\d{1,2})",
MaxLength="18",
Type="String",
))
PrivateNetworkAcl = t.add_resource(NetworkAcl(
"PrivateNetworkAcl",
VpcId=Ref("VPC"),
Tags=Tags(
Application=Ref("AWS::StackName"),
Network="Private",
)
))
PrivateRoute = t.add_resource(Route(
"PrivateRoute",
GatewayId=Ref("VPNGateway"),
DestinationCidrBlock="0.0.0.0/0",
RouteTableId=Ref("PrivateRouteTable"),
))
VPNGatewayAttachment = t.add_resource(VPCGatewayAttachment(
"VPNGatewayAttachment",
VpcId=Ref("VPC"),
VpnGatewayId=Ref("VPNGateway"),
))
PrivateSubnetRouteTableAssociation = t.add_resource(
SubnetRouteTableAssociation(
"PrivateSubnetRouteTableAssociation",
SubnetId=Ref("PrivateSubnet"),
RouteTableId=Ref("PrivateRouteTable"),
)
)
PrivateSubnet = t.add_resource(Subnet(
"PrivateSubnet",
VpcId=Ref("VPC"),
CidrBlock=Ref(SubnetCIDR),
Tags=Tags(
Application=Ref("AWS::StackName"),
Network="VPN Connected Subnet",
)
))
CustomerGateway = t.add_resource(CustomerGateway(
"CustomerGateway",
BgpAsn="65000",
IpAddress=Ref(VPNAddress),
Type="ipsec.1",
Tags=Tags(
Application=Ref("AWS::StackName"),
VPN=Join("", ["Gateway to ", Ref(VPNAddress)]),
)
))
VPNConnectionRoute = t.add_resource(VPNConnectionRoute(
"VPNConnectionRoute",
VpnConnectionId=Ref("VPNConnection"),
DestinationCidrBlock=Ref(OnPremiseCIDR),
))
PrivateRouteTable = t.add_resource(RouteTable(
"PrivateRouteTable",
VpcId=Ref("VPC"),
Tags=Tags(
Application=Ref("AWS::StackName"),
Network="VPN Connected Subnet",
)
))
VPC = t.add_resource(VPC(
"VPC",
EnableDnsSupport="true",
CidrBlock=Ref(VPCCIDR),
EnableDnsHostnames="true",
Tags=Tags(
Application=Ref("AWS::StackName"),
Network="VPN Connected VPC",
)
))
OutBoundPrivateNetworkAclEntry = t.add_resource(NetworkAclEntry(
"OutBoundPrivateNetworkAclEntry",
NetworkAclId=Ref(PrivateNetworkAcl),
RuleNumber="100",
Protocol="6",
PortRange=PortRange(To="65535", From="0"),
Egress="true",
RuleAction="allow",
CidrBlock="0.0.0.0/0",
))
VPNGateway = t.add_resource(VPNGateway(
"VPNGateway",
Type="ipsec.1",
Tags=Tags(
Application=Ref("AWS::StackName"),
)
))
PrivateSubnetNetworkAclAssociation = t.add_resource(
SubnetNetworkAclAssociation(
"PrivateSubnetNetworkAclAssociation",
SubnetId=Ref(PrivateSubnet),
NetworkAclId=Ref(PrivateNetworkAcl),
)
)
VPNConnection = t.add_resource(VPNConnection(
"VPNConnection",
CustomerGatewayId=Ref(CustomerGateway),
StaticRoutesOnly="true",
Type="ipsec.1",
VpnGatewayId=Ref(VPNGateway),
))
InboundPrivateNetworkAclEntry = t.add_resource(NetworkAclEntry(
"InboundPrivateNetworkAclEntry",
NetworkAclId=Ref(PrivateNetworkAcl),
RuleNumber="100",
Protocol="6",
PortRange=PortRange(To="65535", From="0"),
Egress="false",
RuleAction="allow",
CidrBlock="0.0.0.0/0",
))
PrivateSubnet = t.add_output(Output(
"PrivateSubnet",
Description="SubnetId of the VPN connected subnet",
Value=Ref(PrivateSubnet),
))
VPCId = t.add_output(Output(
"VPCId",
Description="VPCId of the newly created VPC",
Value=Ref(VPC),
))
print(t.to_json())
| 28.347222 | 78 | 0.690838 |
5f3519cb1c71aefd92f96513f258c411c1e1910e | 3,467 | py | Python | app/lib/core/dl/model/dler.py | nonomal/PixivBiu | 19f01736c06c3c2eca101c7e02417b4f81d5d33f | [
"MIT"
] | 1 | 2022-01-12T09:02:42.000Z | 2022-01-12T09:02:42.000Z | app/lib/core/dl/model/dler.py | nonomal/PixivBiu | 19f01736c06c3c2eca101c7e02417b4f81d5d33f | [
"MIT"
] | null | null | null | app/lib/core/dl/model/dler.py | nonomal/PixivBiu | 19f01736c06c3c2eca101c7e02417b4f81d5d33f | [
"MIT"
] | null | null | null | import re
import uuid
class Dler(object):
"""
biu-dl 下载模块接口类
"""
CODE_BAD = 0
CODE_BAD_FAILED = (0, 0)
CODE_BAD_CANCELLED = (0, 1)
CODE_GOOD = 1
CODE_GOOD_RUNNING = (1, 0)
CODE_GOOD_SUCCESS = (1, 1)
CODE_WAIT = 2
CODE_WAIT_PAUSE = (2, 0)
TEMP_dlArgs = {"_headers": {}, "@requests": {}, "@aria2": {}}
def __init__(self, url, folder, name, dlArgs, dlRetryMax, callback):
self._id = str(uuid.uuid1())
self._dlUrl = url
self._dlArgs = dlArgs
self._dlFileSize = -1
self._dlSaveUri = None
self._dlSaveDir = folder
self._dlSaveName = name
self._dlRetryMax = dlRetryMax
self._dlRetryNum = 0
self._funCallback = callback
self._stuING = 2
self._stuExtra = -1
self._stuIngFileSize = 0
self._stuIngSpeed = 0
self._errMsg = (0, "None")
# 线程启动函数
def run(self):
return False
def pause(self):
self.status(Dler.CODE_WAIT_PAUSE, True)
return True
def unpause(self):
self.status(Dler.CODE_GOOD_RUNNING, True)
return True
def cancel(self):
self.status(Dler.CODE_BAD_CANCELLED, True)
return True
# 下载任务信息
def info(self):
r = {
"url": self._dlUrl,
"size": self._dlFileSize,
"saveDir": self._dlSaveDir,
"saveName": self._dlSaveName,
"retryNum": self._dlRetryNum,
"ingSize": self._stuIngFileSize,
"ingSpeed": self._stuIngSpeed
}
return r
# 下载任务状态值
def status(self, code, isBool=None):
if isBool is True:
if type(code) == tuple:
self._stuING, self._stuExtra = code
return
if type(code) != tuple and self._stuING == code:
return True
if type(code) == tuple and (self._stuING, self._stuExtra) == code:
return True
return False
# 下载回调
def callback(self):
if self._funCallback is None:
return
r = [self._funCallback] if type(self._funCallback) is not list else self._funCallback
for fun in r:
if hasattr(fun, "__call__"):
fun(self)
@staticmethod
def pure_size(size, dig=2, space=1):
"""
格式化文件 size。
:param size: int: 文件大小
:param dig: int: 保留小数位数
:param space: int: 大小与单位之间的空格数量
:return:
str: 格式化的 size,如 "1.23 MB"
"""
units = ["B", "KB", "MB", "GB", "TB", "PB"]
unit_index = 0
K = 1024.0
while size >= K:
size = size / K
unit_index += 1
return ("%." + str(dig) + "f" + " " * space + "%s") % (size, units[unit_index])
@staticmethod
def get_dl_filename(url, headers):
"""
获取预下载文件的名称,判断过程如下:
1. 以 "/" 分割,若最后一项包含 ".",则返回该项
2. 请求目标 url header,若 content-disposition 中存在 filename 项,则返回该项
3. 若 1、2 皆未成功获取,则直接返回以 "/" 分割的最后一项
:param url: str: 目标 URL
:param headers: str: 请求头
:return:
str: 名称
"""
urlLastPart = url.split("/")[-1]
if "." in urlLastPart:
return urlLastPart
if "content-disposition" in headers:
name = re.findall("filename=(.+)", headers["content-disposition"])[0]
return re.sub(r"[\/\\\:\*\?\"\<\>\|]", "", name)
return urlLastPart
| 27.736 | 93 | 0.527834 |
0d98bd736a41241f68d3fe60de8e7310e1c5e691 | 7,035 | py | Python | examples/pwr_run/checkpointing/nonpc_short/v100_only/job1.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | examples/pwr_run/checkpointing/nonpc_short/v100_only/job1.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | examples/pwr_run/checkpointing/nonpc_short/v100_only/job1.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | """
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import VGG19
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 256
args_lr = 0.001
args_model = 'vgg16'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_v100_only/' + job_name + '*'
total_epochs = 6
starting_epoch = 0
# first step is to update the PID
pid_dict = {}
with open('pid_lock.json', 'r') as fp:
pid_dict = json.load(fp)
pid_dict[job_name] = os.getpid()
json_file = json.dumps(pid_dict)
with open('pid_lock.json', 'w') as fp:
fp.write(json_file)
os.rename('pid_lock.json', 'pid.json')
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
model = keras.models.load_model(save_file)
else:
print('train from start')
model = models.Sequential()
if '16' in args_model:
base_model = VGG16(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '19' in args_model:
base_model = VGG19(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
model.add(base_model)
model.add(layers.Flatten())
model.add(layers.BatchNormalization())
model.add(layers.Dense(128, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(64, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
epoch_waste_dict = {}
with open('epoch_waste.json', 'r') as fp:
epoch_waste_dict = json.load(fp)
epoch_waste_dict[job_name] += epoch_waste_time
json_file3 = json.dumps(epoch_waste_dict)
with open('epoch_waste.json', 'w') as fp:
fp.write(json_file3)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_v100_only/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
checkpoint_dict = {}
with open('checkpoint.json', 'r') as fp:
checkpoint_dict = json.load(fp)
checkpoint_dict[job_name] = 1
json_file3 = json.dumps(checkpoint_dict)
with open('checkpoint.json', 'w') as fp:
fp.write(json_file3)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
| 31.266667 | 118 | 0.703767 |
53970803c5a0ec63644a15324e8e9af25dc96a9c | 6,632 | py | Python | src/sst/elements/ariel/frontend/simple/examples/stream/memHstream.py | feldergast/sst-elements | a7abc015aed709feb05821d269d233110569fd72 | [
"BSD-3-Clause"
] | 2 | 2019-06-10T15:32:03.000Z | 2019-06-11T14:17:32.000Z | src/sst/elements/ariel/frontend/simple/examples/stream/memHstream.py | feldergast/sst-elements | a7abc015aed709feb05821d269d233110569fd72 | [
"BSD-3-Clause"
] | 39 | 2016-01-06T15:08:15.000Z | 2020-06-03T18:12:31.000Z | src/sst/elements/ariel/frontend/simple/examples/stream/memHstream.py | feldergast/sst-elements | a7abc015aed709feb05821d269d233110569fd72 | [
"BSD-3-Clause"
] | 1 | 2021-01-08T15:56:19.000Z | 2021-01-08T15:56:19.000Z | """
Sample script
Simulating System
Every core with Private L1 and L2 caches.
All L2 cache are connected to shared L3 cache via bus.
L3 connects to MainMemory through memory Controller.
core_0 core_1 core_2 core_3 core_4 core_5 core_6 core_7
L1_0 L1_1 L1_2 L1_3 L1_4 L1_5 L1_6 L1_7
L2_0 L2_1 L2_2 L2_3 L2_4 L2_5 L2_6 L2_7
BUS
Shared L3
MemoryController
Main Memory (DRAMSIM)
"""
import sst
import os
## Flags
memDebug = 0
memDebugLevel = 10
coherenceProtocol = "MESI"
rplPolicy = "lru"
busLat = "50 ps"
cacheFrequency = "2 Ghz"
defaultLevel = 0
cacheLineSize = 64
corecount = 8
## Application Info
os.environ['SIM_DESC'] = 'EIGHT_CORES'
os.environ['OMP_NUM_THREADS'] = str(corecount)
sst_root = os.getenv( "SST_ROOT" )
app = sst_root + "/sst-elements/src/sst/elements/ariel/frontend/simple/examples/stream/stream"
if not os.path.exists(app):
app = os.getenv( "OMP_EXE" )
## Application Info:
## Executable -> exe_file
## appargcount -> Number of commandline arguments after <exec_file> name
## apparg<#> -> arguments
## Commandline execution for the below example would be
## /home/amdeshp/arch/benchmarks/PathFinder_1.0.0/PathFinder_ref/PathFinder.x -x /home/amdeshp/arch/benchmarks/PathFinder_1.0.0/generatedData/small1.adj_list
## AppArgs = ({
## "executable" : "/home/amdeshp/arch/benchmarks/PathFinder_1.0.0/PathFinder_ref/PathFinder.x",
## "appargcount" : "0",
## "apparg0" : "-x",
## "apparg1" : "/home/amdeshp/arch/benchmarks/PathFinder_1.0.0/generatedData/small1.adj_list",
## })
## Processor Model
ariel = sst.Component("A0", "ariel.ariel")
## ariel.addParams(AppArgs)
ariel.addParams({
"verbose" : "0",
"maxcorequeue" : "256",
"maxissuepercycle" : "2",
"pipetimeout" : "0",
"executable" : app,
"arielinterceptcalls" : "1",
"launchparamcount" : 1,
"launchparam0" : "-ifeellucky",
"arielmode" : "1",
"corecount" : corecount,
"defaultlevel" : defaultLevel,
})
ariel.setSubComponent("memmgr", "ariel.MemoryManagerSimple")
## MemHierarchy
def genMemHierarchy(cores):
membus = sst.Component("membus", "memHierarchy.Bus")
membus.addParams({
"bus_frequency" : cacheFrequency,
})
memctrl = sst.Component("memory", "memHierarchy.MemController")
memctrl.addParams({
"debug" : memDebug,
"clock" : "1Ghz",
"verbose" : 2,
"request_width" : cacheLineSize,
})
memory = memctrl.setSubComponent("backend", "memHierarchy.timingDRAM")
memory.addParams({
"mem_size" : "512MiB",
"id" : 0,
"addrMapper" : "memHierarchy.simpleAddrMapper",
"addrMapper.interleave_size" : "64B",
"addrMapper.row_size" : "1KiB",
"clock" : "1GHz",
"channels" : 1,
"channel.numRanks" : 2,
"channel.rank.numBanks" : 8,
"channel.transaction_Q_size" : 32,
"channel.rank.bank.CL" : 10,
"channel.rank.bank.CL_WR" : 12,
"channel.rank.bank.RCD" : 10,
"channel.rank.bank.TRP" : 14,
"channel.rank.bank.dataCycles" : 2,
"channel.rank.bank.pagePolicy" : "memHierarchy.simplePagePolicy",
"channel.rank.bank.transactionQ" : "memHierarchy.reorderTransactionQ",
"channel.rank.bank.pagePolicy.close" : 1,
})
for core in range (cores):
l1 = sst.Component("l1cache_%d"%core, "memHierarchy.Cache")
l1.addParams({
"cache_frequency" : cacheFrequency,
"cache_size" : "32KB",
"cache_line_size" : cacheLineSize,
"associativity" : "8",
"access_latency_cycles" : "4",
"coherence_protocol" : coherenceProtocol,
"replacement_policy" : rplPolicy,
"L1" : "1",
"debug" : memDebug,
"debug_level" : memDebugLevel,
"verbose" : 2,
})
l2 = sst.Component("l2cache_%d"%core, "memHierarchy.Cache")
l2.addParams({
"cache_frequency" : cacheFrequency,
"cache_size" : "256 KB",
"cache_line_size" : cacheLineSize,
"associativity" : "8",
"access_latency_cycles" : "10",
"coherence_protocol" : coherenceProtocol,
"replacement_policy" : rplPolicy,
"L1" : "0",
"debug" : memDebug,
"debug_level" : memDebugLevel,
"verbose" : 2,
"mshr_num_entries" : 16,
"mshr_latency_cycles" : 2,
})
## SST Links
# Ariel -> L1(PRIVATE) -> L2(PRIVATE) -> L3 (SHARED) -> DRAM
ArielL1Link = sst.Link("cpu_cache_%d"%core)
ArielL1Link.connect((ariel, "cache_link_%d"%core, busLat), (l1, "high_network_0", busLat))
L1L2Link = sst.Link("l1_l2_%d"%core)
L1L2Link.connect((l1, "low_network_0", busLat), (l2, "high_network_0", busLat))
L2MembusLink = sst.Link("l2_membus_%d"%core)
L2MembusLink.connect((l2, "low_network_0", busLat), (membus, "high_network_%d"%core, busLat))
l3 = sst.Component("L3cache", "memHierarchy.Cache")
l3.addParams({
"cache_frequency" : cacheFrequency,
"cache_size" : "8 MB",
"cache_line_size" : cacheLineSize,
"associativity" : "8",
"access_latency_cycles" : "20",
"coherence_protocol" : coherenceProtocol,
"replacement_policy" : rplPolicy,
"L1" : "0",
"debug" : memDebug,
"debug_level" : memDebugLevel,
"mshr_num_entries" : "16",
"mshr_latency_cycles" : 2,
"verbose" : 2,
})
# Bus to L3 and L3 <-> MM
BusL3Link = sst.Link("bus_L3")
BusL3Link.connect((membus, "low_network_0", busLat), (l3, "high_network_0", busLat))
L3MemCtrlLink = sst.Link("L3MemCtrl")
L3MemCtrlLink.connect((l3, "low_network_0", busLat), (memctrl, "direct_link", busLat))
genMemHierarchy(corecount)
| 36.844444 | 158 | 0.546592 |
f205bc161630ce4dcdfe9023cbedfbbf3e6d1f69 | 32,802 | py | Python | django_actual/scaffold.py | sipmann/django-actual | f97fef8b4182ba2fdaa3c5b67cbffd7b38f3d52a | [
"MIT"
] | null | null | null | django_actual/scaffold.py | sipmann/django-actual | f97fef8b4182ba2fdaa3c5b67cbffd7b38f3d52a | [
"MIT"
] | null | null | null | django_actual/scaffold.py | sipmann/django-actual | f97fef8b4182ba2fdaa3c5b67cbffd7b38f3d52a | [
"MIT"
] | null | null | null | from __future__ import print_function, unicode_literals, with_statement, division
from os import path, system, listdir, sys, mkdir
from django.conf import settings
import shutil
# VIEW CONSTS
LIST_VIEW = """
def %(lower_model)s_list(request, template='%(lower_model)s/list.html'):
rows = %(model)s.objects.all()
if request.GET.get('order') is not None:
rows = rows.order_by(request.GET.get('order'))
p = Paginator(rows, 7)
page_number = request.GET.get('page')
page_obj = p.get_page(page_number)
return render(request, template, {'%(lower_model)s_list' : page_obj})
"""
NEW_VIEW = """
def %(lower_model)s_new(request, template='%(lower_model)s/new.html'):
form = %(model)sForm()
if request.method == 'POST':
form = %(model)sForm(request.POST)
if form.is_valid():
form.save()
messages.success(request, '%(lower_model)s successfully created.')
return HttpResponseRedirect(reverse('%(app)s:%(lower_model)s-list'))
return render(request, template, {'form': form})
"""
DETAILS_VIEW = """
def %(lower_model)s_details(request, id, template='%(lower_model)s/details.html'):
item = get_object_or_404(%(model)s, pk=id)
form = %(model)sForm(instance=item)
if request.method == 'POST':
form = %(model)sForm(request.POST, instance=item)
if form.is_valid():
form.save()
messages.success(request, '%(lower_model)s successfully edited.')
return HttpResponseRedirect(reverse('%(app)s:%(lower_model)s-list'))
return render(request, template, {'form': form, '%(lower_model)s': item})
"""
DELETE_VIEW = """
def %(lower_model)s_delete(request, id):
item = %(model)s.objects.get(pk=id)
item.delete()
messages.success(request, '%(lower_model)s deleted.')
return HttpResponseRedirect(reverse('%(app)s:%(lower_model)s-list'))
"""
# MODELS CONSTS
MODEL_TEMPLATE = """
class %s(models.Model):
%s
update_date = models.DateTimeField(auto_now=True)
create_date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.%s
class Meta:
ordering = ['-id']
"""
IMPORT_MODEL_TEMPLATE = """from %(app)s.models import %(model)s"""
CHARFIELD_TEMPLATE = """ %(name)s = models.CharField(max_length=%(length)s, null=%(null)s, blank=%(null)s)
"""
BOOLEANFIELD_TEMPLATE = """ %(name)s = models.BooleanField(default=True, null=%(null)s, blank=%(null)s)
"""
SLUGFIELD_TEMPLATE = """ %(name)s = models.SlugField(max_length=%(length)s, null=%(null)s, blank=%(null)s)
"""
TEXTFIELD_TEMPLATE = """ %(name)s = models.TextField(null=%(null)s, blank=%(null)s)
"""
INTEGERFIELD_TEMPLATE = """ %(name)s = models.IntegerField(null=%(null)s, default=%(default)s)
"""
DECIMALFIELD_TEMPLATE = """ %(name)s = models.DecimalField(max_digits=%(digits)s, decimal_places=%(places)s, null=%(null)s, default=%(default)s)
"""
DATETIMEFIELD_TEMPLATE = """ %(name)s = models.DateTimeField(null=%(null)s, default=%(default)s)
"""
FOREIGNFIELD_TEMPLATE = """ %(name)s = models.ForeignKey(%(foreign)s, null=%(null)s, blank=%(null)s, on_delete=models.DO_NOTHING)
"""
EMAIL_TEMPLATE = """ %(name)s = models.EmailField(max_length=%(length)s, null=%(null)s, blank=%(null)s)
"""
TEMPLATE_LIST_CONTENT = """{%% extends "base.html" %%}
{%% block page-title %%}%(title)s{%% endblock %%}
{%% block content %%}
<div class="card">
<div class="card-body">
<div class="card-title d-flex justify-content-between mb-4">
<h2>%(model)s list</h2>
<a class="btn btn-primary" href="{%% url '%(app)s:%(model)s-new' %%}">Add new %(model)s</a>
</div>
<table class="table">
<thead>
<tr>
<th>ID</th>
<th>Name</th>
<th>Action</th>
</tr>
</thead>
{%% for item in %(model)s_list %%}
<thead>
<tr>
<td>{{ item.id }}</td>
<td>{{ item }}</td>
<td><a href="{%% url '%(app)s:%(model)s-details' item.id %%}">Edit</a> <a href="{%% url '%(app)s:%(model)s-delete' item.id %%}" onclick="ConfirmDelete(e)">Delete</a></td>
</tr>
</thead>
{%% endfor %%}
</table>
<nav aria-label="Page navigation">
<ul class="step-links">
{%% if %(model)s_list.has_previous %%}
<li class="page-item" href="?page=1">« first</li>
<li class="page-item" href="?page={{ %(model)s_list.previous_page_number }}">previous</li>
{%% endif %%}
<span class="current">
Page {{ %(model)s_list.number }} of {{ %(model)s_list.paginator.num_pages }}.
</span>
{%% if %(model)s_list.has_next %%}
<li class="page-item" href="?page={{ %(model)s_list.next_page_number }}">next</li>
<li class="page-item" href="?page={{ %(model)s_list.paginator.num_pages }}">last »</li>
{%% endif %%}
</ul>
</nav>
<script>
function ConfirmDelete(e) {
if (!confirm('Delete this %(model)s?'))
e.preventDefault();
}
</script>
</div>
</div>
{%% endblock %%}
"""
TEMPLATE_NEW_CONTENT = """{%% extends "base.html" %%}
{%% block page-title %%}%(title)s - {{ %(model)s }} {%% endblock %%}
{%% block content %%}
<div class="card">
<div class="card-body">
<div class="card-title">
<h2>%(model)s - {{ %(model)s }} </h2>
</div>
<form action="{%% url '%(app)s:%(model)s-new' %%}" method="POST">
<div>
{%% csrf_token %%}
{{ form }}
</div>
<input type="submit" value="Submit" class="btn btn-success">
<a class="btn btn-danger" href="{%% url '%(app)s:%(model)s-list' %%}">back to list</a>
</form>
</div>
</div>
{%% endblock %%}
"""
TEMPLATE_DETAILS_CONTENT = """{%% extends "base.html" %%}
{%% block page-title %%}%(title)s - {{ %(model)s }} {%% endblock %%}
{%% block content %%}
<div class="card">
<div class="card-body">
<div class="card-title">
<h2>%(model)s - {{ %(model)s }} </h2>
</div>
<form action="{%% url '%(app)s:%(model)s-details' %(model)s.id %%}" method="POST">
<div>
{%% csrf_token %%}
{{ form }}
</div>
<input type="submit" value="Submit" class="btn btn-success">
<a class="btn btn-danger" href="{%% url '%(app)s:%(model)s-list' %%}">back to list</a>
</form>
</div>
</div>
{%% endblock %%}
"""
URL_CONTENT = """
from django.urls import path
from django.contrib.auth import views as auth_views
from %(app)s import views
app_name = '%(app)s'
urlpatterns = [
path('', views.%(model)s_list, name='%(model)s-list'),
path('new', views.%(model)s_new, name='%(model)s-new'),
path('<int:id>', views.%(model)s_details, name='%(model)s-details'),
path('<int:id>/delete', views.%(model)s_delete, name='%(model)s-delete'),
]
"""
URL_EXISTS_CONTENT = """
path('%(model)s/', views.%(model)s_list, name='%(model)s-list'),
path('%(model)s/new', views.%(model)s_new, name='%(model)s-new'),
path('%(model)s/<int:id>', views.%(model)s_details, name='%(model)s-details'),
path('%(model)s/<int:id>/delete', views.%(model)s_delete, name='%(model)s-delete'),
"""
ADMIN_CONTENT = """
from %(app)s.models import %(model)s
admin.site.register(%(model)s)
"""
FORM_CONTENT = """
from %(app)s.models import %(model)s
class %(model)sForm(forms.ModelForm):
class Meta:
model = %(model)s
fields = '__all__'
"""
TESTS_CONTENT = """
from %(app)s.models import %(model)s
class %(model)sTest(TestCase):
def setUp(self):
self.user = User.objects.create(username='test_user')
def tearDown(self):
self.user.delete()
def test_list(self):
response = self.client.get(reverse('%(app)s:%(lower_model)s-list'))
self.assertEquals(response.status_code, 200)
def test_crud(self):
# Create new instance
response = self.client.post(reverse('%(app)s:%(lower_model)s-new'), {})
self.assertEquals(response.status_code, 302)
# Read instance
items = %(model)s.objects.all()
self.failUnlessEqual(items.count(), 1)
item = items[0]
response = self.client.get(reverse('%(app)s:%(lower_model)s-details', kwargs={'id': item.id}))
self.assertEquals(response.status_code, 200)
# Update instance
response = self.client.post(reverse('%(app)s:%(lower_model)s-details', kwargs={'id': item.id}), {})
self.assertEquals(response.status_code, 302)
# Delete instance
response = self.client.post(reverse('%(app)s:%(lower_model)s-delete', kwargs={'id': item.id}), {})
self.assertEquals(response.status_code, 302)
items = %(model)s.objects.all()
self.assertEquals(items.count(), 0)
"""
class Scaffold(object):
def _info(self, msg, indent=0):
print("{0} {1}".format("\t" * int(indent), msg))
def __init__(self, app, model, fields):
self.app = app
self.model = model
self.fields = fields
try:
self.SCAFFOLD_APPS_DIR = settings.SCAFFOLD_APPS_DIR
except:
self.SCAFFOLD_APPS_DIR = './'
def get_import(self, model):
for directory in listdir(self.SCAFFOLD_APPS_DIR):
if path.isdir('{0}{1}'.format(self.SCAFFOLD_APPS_DIR, directory)) \
and path.exists('{0}{1}/models.py'.format(self.SCAFFOLD_APPS_DIR, directory)):
with open('{0}{1}/models.py'.format(self.SCAFFOLD_APPS_DIR, directory), 'r') as fp:
# Check if model exists
for line in fp.readlines():
if 'class {0}(models.Model)'.format(model) in line:
# print "Foreign key '%s' was found in app %s..." % (model, dir)
return IMPORT_MODEL_TEMPLATE % {'app': directory, 'model': model}
return None
def is_imported(self, path, model):
with open(path, 'r') as import_file:
for line in import_file.readlines():
if 'import {0}'.format(model) in line:
# print "Foreign key '%s' was found in models.py..." % (foreign)
return True
return False
def add_global_view_imports(self, path):
import_list = list()
with open(path, 'r') as import_file:
need_import_shortcut = True
need_import_urlresolvers = True
need_import_users = True
need_import_token = True
need_import_JsonResponse = True
need_import_form = True
need_import_contrib = True
need_import_paginator = True
for line in import_file.readlines():
if 'from django.shortcuts import render, redirect, get_object_or_404' in line:
need_import_shortcut = False
if 'from django.urls import reverse' in line:
need_import_urlresolvers = False
if 'from django.contrib.auth.models import User, Group' in line:
need_import_users = False
if 'from django.middleware.csrf import get_token' in line:
need_import_token = False
if 'from django.http import JsonResponse, HttpResponseRedirect' in line:
need_import_JsonResponse = False
if 'from django.contrib import messages' in line:
need_import_contrib = False
if ('from %(app)s.forms import %(model)sForm' % { 'model': self.model, 'app': self.app }) in line:
need_import_form = False
if 'from django.core.paginator import Paginator' in line:
need_import_paginator = False
if need_import_shortcut:
import_list.append(
'from django.shortcuts import render, redirect, get_object_or_404')
if need_import_urlresolvers:
import_list.append('from django.urls import reverse')
if need_import_users:
import_list.append('from django.contrib.auth.models import User, Group')
#if need_import_token:
# import_list.append('from django.middleware.csrf import get_token')
if need_import_JsonResponse:
import_list.append('from django.http import JsonResponse, HttpResponseRedirect')
if need_import_contrib:
import_list.append('from django.contrib import messages')
if need_import_form:
import_list.append('from %(app)s.forms import %(model)sForm' % { 'model': self.model, 'app': self.app })
if need_import_paginator:
import_list.append('from django.core.paginator import Paginator')
return import_list
def view_exists(self, path, view):
# Check if view already exists
with open(path, 'r') as view_file:
for line in view_file.readlines():
if 'def {0}('.format(view) in line:
return True
return False
def get_field(self, field):
field = field.split(':')
field_name = field[0]
field_type = field[1] if len(field) > 1 else 'char'
if field_type.lower() == 'char':
try:
length = field[2]
except IndexError:
length = 255
try:
null = field[3]
null = 'False'
except IndexError:
null = 'True'
return CHARFIELD_TEMPLATE % {'name': field_name, 'length': length, 'null': null}
elif field_type.lower() == 'text':
try:
null = field[2]
null = 'False'
except IndexError:
null = 'True'
return TEXTFIELD_TEMPLATE % {'name': field_name, 'null': null}
elif field_type.lower() == 'int':
try:
null = field[2]
null = 'False'
except IndexError:
null = 'True'
try:
default = field[3]
except IndexError:
default = None
return INTEGERFIELD_TEMPLATE % {'name': field_name, 'null': null, 'default': default}
elif field_type.lower() == 'decimal':
try:
null = field[4]
null = 'False'
except IndexError:
null = 'True'
try:
default = field[5]
except IndexError:
default = None
return DECIMALFIELD_TEMPLATE % {
'name': field_name,
'digits': field[2],
'places': field[3],
'null': null,
'default': default,
}
elif field_type.lower() == 'datetime':
try:
null = field[2]
null = 'False'
except IndexError:
null = 'True'
try:
default = field[3]
except IndexError:
default = None
return DATETIMEFIELD_TEMPLATE % {'name': field_name, 'null': null, 'default': default}
elif field_type.lower() == 'foreign':
foreign = field[2]
# Check if this foreign key is already in models.py
#TODO:If is the current model, skip the import
if foreign in ('User', 'Group'):
if not self.is_imported('{0}{1}/models.py'.format(self.SCAFFOLD_APPS_DIR,
self.app), foreign):
self.imports.append('\nfrom django.contrib.auth.models import User, Group\n')
return FOREIGNFIELD_TEMPLATE % {'name': field_name, 'foreign': foreign, 'null': 'True'}
if self.is_imported('{0}{1}/models.py'.format(
self.SCAFFOLD_APPS_DIR, self.app), foreign):
return FOREIGNFIELD_TEMPLATE % {'name': field_name, 'foreign': foreign, 'null': 'True'}
# Check imports
if self.get_import(foreign):
self.imports.append(self.get_import(foreign))
return FOREIGNFIELD_TEMPLATE % {'name': field_name, 'foreign': foreign, 'null': 'True'}
self._info('error\t{0}{1}/models.py\t{2} class not found'.format(
self.SCAFFOLD_APPS_DIR, self.app, foreign), 1)
return None
elif field_type.lower() == 'email':
try:
length = field[2]
except IndexError:
length = 255
try:
null = field[3]
null = 'False'
except IndexError:
null = 'True'
return EMAIL_TEMPLATE % {'name': field_name, 'length': length, 'null': null}
elif field_type.lower() == 'bool':
try:
null = field[3]
null = 'False'
except IndexError:
null = 'True'
return BOOLEANFIELD_TEMPLATE % {'name': field_name, 'null': null}
elif field_type.lower() == 'slug':
try:
length = field[2]
except IndexError:
length = 255
try:
null = field[3]
null = 'False'
except IndexError:
null = 'True'
return SLUGFIELD_TEMPLATE % {'name': field_name, 'length': length, 'null': null}
else:
self._info("Field %s has unknow type %s" % (field_name, field_type.lower()))
def create_app(self):
self._info(" App ")
self._info("===========")
if self.SCAFFOLD_APPS_DIR and not path.exists('{0}'.format(self.SCAFFOLD_APPS_DIR)):
raise Exception(
"SCAFFOLD_APPS_DIR {0} does not exists".format(self.SCAFFOLD_APPS_DIR))
if not path.exists('{0}{1}'.format(self.SCAFFOLD_APPS_DIR, self.app)):
system('python manage.py startapp {0}'.format(self.app))
if self.SCAFFOLD_APPS_DIR != './':
shutil.move(self.app, self.SCAFFOLD_APPS_DIR)
self._info("create\t{0}{1}".format(self.SCAFFOLD_APPS_DIR, self.app), 1)
else:
self._info("exists\t{0}{1}".format(self.SCAFFOLD_APPS_DIR, self.app), 1)
def create_views(self):
self._info(" Views ")
self._info("===========")
# Open views.py to read
view_path = '{0}{1}/views.py'.format(self.SCAFFOLD_APPS_DIR, self.app)
# Check if urls.py exists
if path.exists(view_path):
self._info('exists\t{0}{1}/views.py'.format(self.SCAFFOLD_APPS_DIR, self.app), 1)
else:
with open(view_path, 'w'):
self._info('create\t{0}{1}/views.py'.format(self.SCAFFOLD_APPS_DIR, self.app), 1)
import_list = list()
view_list = list()
# Add global imports
import_list.append('\n'.join(imp for imp in self.add_global_view_imports(view_path)))
# Add model imports
if not self.is_imported(view_path, self.model):
import_list.append(self.get_import(self.model))
lower_model = self.model.lower()
# Check if view already exists
if not self.view_exists(view_path, "{0}_list".format(lower_model)):
view_list.append(LIST_VIEW % {
'lower_model': lower_model,
'model': self.model,
'app': self.app,
})
self._info("added \t{0}\t{1}_view".format(view_path, lower_model), 1)
else:
self._info("exists\t{0}\t{1}_view".format(view_path, lower_model), 1)
if not self.view_exists(view_path, "{0}_new".format(lower_model)):
view_list.append(NEW_VIEW % {
'lower_model': lower_model,
'model': self.model,
'app': self.app,
})
self._info("added \t{0}\t{1}_new".format(view_path, lower_model), 1)
else:
self._info("exists\t{0}\t{1}_new".format(view_path, lower_model), 1)
if not self.view_exists(view_path, "{0}_details".format(lower_model)):
view_list.append(DETAILS_VIEW % {
'lower_model': lower_model,
'model': self.model,
'app': self.app,
})
self._info("added \t{0}\t{1}_details".format(view_path, lower_model), 1)
else:
self._info("exists\t{0}\t{1}_details".format(view_path, lower_model), 1)
if not self.view_exists(view_path, "{0}_delete".format(lower_model)):
view_list.append(DELETE_VIEW % {
'lower_model': lower_model,
'model': self.model,
'app': self.app,
})
self._info("added \t{0}\t{1}_delete".format(view_path, lower_model), 1)
else:
self._info("exists\t{0}\t{1}_delete".format(view_path, lower_model), 1)
# Open views.py to append
with open(view_path, 'a') as view_file:
view_file.write('\n'.join([import_line for import_line in import_list]))
view_file.write(''.join([view for view in view_list]))
def create_model(self):
self._info(" Model ")
self._info("===========")
# Open models.py to read
with open('{0}{1}/models.py'.format(self.SCAFFOLD_APPS_DIR, self.app), 'r') as fp:
self.models_file = fp
# Check if model already exists
for line in self.models_file.readlines():
if 'class {0}'.format(self.model) in line:
self._info('exists\t{0}{1}/models.py'.format(self.SCAFFOLD_APPS_DIR, self.app), 1)
return
self._info('create\t{0}{1}/models.py'.format(self.SCAFFOLD_APPS_DIR, self.app), 1)
# Prepare fields
self.imports = []
fields = []
charField = ''
for field in self.fields:
new_field = self.get_field(field)
if new_field:
if 'CharField' in new_field and charField == '':
charField = field.split(':')[0]
fields.append(new_field)
self._info('added\t{0}{1}/models.py\t{2} field'.format(
self.SCAFFOLD_APPS_DIR, self.app, field.split(':')[0]), 1)
if charField == '':
charField = 'id'
# Open models.py to append
with open('{0}{1}/models.py'.format(self.SCAFFOLD_APPS_DIR, self.app), 'a') as fp:
fp.write(''.join([import_line for import_line in self.imports]))
fp.write(MODEL_TEMPLATE % (self.model, ''.join(field for field in fields), charField))
def create_templates(self):
self._info(" Templates ")
self._info("===========")
# Check if template dir exists
if path.exists('{0}{1}/templates/'.format(self.SCAFFOLD_APPS_DIR, self.app)):
self._info('exists\t{0}{1}/templates/'.format(self.SCAFFOLD_APPS_DIR, self.app), 1)
else:
mkdir("{0}{1}/templates/".format(self.SCAFFOLD_APPS_DIR, self.app))
self._info('create\t{0}{1}/templates/'.format(self.SCAFFOLD_APPS_DIR, self.app), 1)
# Check if model template dir exists
if path.exists('{0}{1}/templates/{2}/'.format(self.SCAFFOLD_APPS_DIR, self.app,
self.model.lower())):
self._info('exists\t{0}{1}/templates/{2}/'.format(self.SCAFFOLD_APPS_DIR, self.app,
self.model.lower()), 1)
else:
mkdir("{0}{1}/templates/{2}/".format(self.SCAFFOLD_APPS_DIR, self.app,
self.model.lower()))
self._info('create\t{0}{1}/templates/{2}/'.format(
self.SCAFFOLD_APPS_DIR, self.app, self.model.lower()), 1)
self.create_view_file('list', TEMPLATE_LIST_CONTENT)
self.create_view_file('details', TEMPLATE_DETAILS_CONTENT)
self.create_view_file('new', TEMPLATE_NEW_CONTENT)
def create_view_file(self, name, template):
if path.exists('{0}{1}/templates/{2}/{3}.html'.format(self.SCAFFOLD_APPS_DIR, self.app,
self.model.lower(), name)):
self._info('exists\t{0}{1}/templates/{2}/{3}.html'.format(
self.SCAFFOLD_APPS_DIR, self.app, self.model.lower(), name), 1)
else:
with open("{0}{1}/templates/{2}/{3}.html".format(self.SCAFFOLD_APPS_DIR, self.app,
self.model.lower(), name), 'w') as fp:
fp.write(template % {
'app': self.app,
'model': self.model.lower(),
'title': self.model.lower(),
})
self._info('create\t{0}{1}/templates/{2}/{3}.html'.format(
self.SCAFFOLD_APPS_DIR, self.app, self.model.lower(), name), 1)
def create_urls(self):
self._info(" URLs ")
self._info("===========")
# Check if urls.py exists
if path.exists('{0}{1}/urls.py'.format(self.SCAFFOLD_APPS_DIR, self.app)):
# If does we need to add urls
new_urls = ''
with open("{0}{1}/urls.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'r') as fp:
for line in fp.readlines():
new_urls += line
if 'urlpatterns' in line:
new_urls += URL_EXISTS_CONTENT % {
'app': self.app,
'model': self.model.lower(),
}
with open("{0}{1}/urls.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'w') as fp:
fp.write(new_urls)
self._info('update\t{0}{1}/urls.py'.format(self.SCAFFOLD_APPS_DIR, self.app), 1)
else:
with open("{0}{1}/urls.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'w') as fp:
fp.write(URL_CONTENT % {
'app': self.app,
'model': self.model.lower(),
})
self._info('create\t{0}{1}/urls.py'.format(self.SCAFFOLD_APPS_DIR, self.app), 1)
def create_admin(self):
self._info(" Admin ")
self._info("===========")
# Check if admin.py exists
if path.exists('{0}{1}/admin.py'.format(self.SCAFFOLD_APPS_DIR, self.app)):
self._info('exists\t{0}{1}/admin.py'.format(self.SCAFFOLD_APPS_DIR, self.app), 1)
else:
with open("{0}{1}/admin.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'w') as fp:
fp.write("from django.contrib import admin\n")
self._info('create\t{0}{1}/urls.py'.format(self.SCAFFOLD_APPS_DIR, self.app), 1)
# Check if admin entry already exists
with open("{0}{1}/admin.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'r') as fp:
content = fp.read()
if "admin.site.register({0})".format(self.model) in content:
self._info('exists\t{0}{1}/admin.py\t{2}'.format(self.SCAFFOLD_APPS_DIR, self.app,
self.model.lower()), 1)
else:
with open("{0}{1}/admin.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'a') as fp:
fp.write(ADMIN_CONTENT % {'app': self.app, 'model': self.model})
self._info('added\t{0}{1}/admin.py\t{2}'.format(self.SCAFFOLD_APPS_DIR, self.app,
self.model.lower()), 1)
def create_forms(self):
self._info(" Forms ")
self._info("===========")
# Check if forms.py exists
if path.exists('{0}{1}/forms.py'.format(self.SCAFFOLD_APPS_DIR, self.app)):
self._info('exists\t{0}{1}/forms.py'.format(self.SCAFFOLD_APPS_DIR, self.app), 1)
else:
with open("{0}{1}/forms.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'w') as fp:
fp.write("from django import forms\n")
self._info('create\t{0}{1}/forms.py'.format(self.SCAFFOLD_APPS_DIR, self.app), 1)
# Check if form entry already exists
with open("{0}{1}/forms.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'r') as fp:
content = fp.read()
if "class {0}Form".format(self.model) in content:
self._info('exists\t{0}{1}/forms.py\t{2}'.format(
self.SCAFFOLD_APPS_DIR, self.app, self.model.lower()), 1)
else:
with open("{0}{1}/forms.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'a') as fp:
fp.write(FORM_CONTENT % {'app': self.app, 'model': self.model})
self._info('added\t{0}{1}/forms.py\t{2}'.format(
self.SCAFFOLD_APPS_DIR, self.app, self.model.lower()), 1)
def create_tests(self):
self._info(" Tests ")
self._info("===========")
# Check if tests.py exists
if path.exists('{0}{1}/tests.py'.format(self.SCAFFOLD_APPS_DIR, self.app)):
self._info('exists\t{0}{1}/tests.py'.format(self.SCAFFOLD_APPS_DIR, self.app), 1)
# Check if imports exists:
import_testcase = True
import_user = True
import_reverse = True
with open("{0}{1}/tests.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'r') as fp:
for line in fp.readlines():
if 'import TestCase' in line:
import_testcase = False
if 'import User' in line:
import_user = False
if 'import reverse' in line:
import_reverse = False
with open("{0}{1}/tests.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'a') as fp:
if import_testcase:
fp.write("from django.test import TestCase\n")
if import_user:
fp.write("from django.contrib.auth.models import User\n")
if import_reverse:
fp.write("from django.urls import reverse\n")
else:
with open("{0}{1}/tests.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'w') as fp:
fp.write("from django.test import TestCase\n")
fp.write("from django.contrib.auth.models import User\n")
fp.write("from django.urls import reverse\n")
self._info('create\t{0}{1}/tests.py'.format(self.SCAFFOLD_APPS_DIR, self.app), 1)
# Check if test class already exists
with open("{0}{1}/tests.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'r') as fp:
content = fp.read()
if "class {0}Test".format(self.model) in content:
self._info('exists\t{0}{1}/tests.py\t{2}'.format(
self.SCAFFOLD_APPS_DIR, self.app, self.model.lower()), 1)
else:
with open("{0}{1}/tests.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'a') as fp:
fp.write(TESTS_CONTENT % {
'app': self.app,
'model': self.model,
'lower_model': self.model.lower(),
})
self._info('added\t{0}{1}/tests.py\t{2}'.format(self.SCAFFOLD_APPS_DIR, self.app,
self.model.lower()), 1)
def run(self):
if not self.app:
sys.exit("No application name found...")
if not self.app.isalnum():
sys.exit("Model name should be alphanumerical...")
self.create_app()
if self.model:
self.create_model()
self.create_views()
self.create_admin()
self.create_forms()
self.create_urls()
self.create_templates()
self.create_tests()
self._info(' All Done ')
self._info('===========')
self._info("Add '{0}.apps.{0}Config' to the settings file".format(self.app))
self._info("Add path('{0}', include('{1}.urls')) to the router file".format(self.app.lower(), self.app)) | 39.003567 | 190 | 0.537193 |
7b35640cc33e749326b0108e01c9308ac2c3ffda | 603 | py | Python | pRestore/stuff.py | snaiperskaya96/pRestore | cd51050fbd02423b038233c804e1c1ee0bfe59e7 | [
"MIT"
] | null | null | null | pRestore/stuff.py | snaiperskaya96/pRestore | cd51050fbd02423b038233c804e1c1ee0bfe59e7 | [
"MIT"
] | null | null | null | pRestore/stuff.py | snaiperskaya96/pRestore | cd51050fbd02423b038233c804e1c1ee0bfe59e7 | [
"MIT"
] | null | null | null | class Stuff:
def __init__(self):
return
@staticmethod
def print_logo():
print '''
____ _
_ __ | _ \ ___ ___| |_ ___ _ __ ___
| '_ \| |_) / _ \/ __| __/ _ \| '__/ _ \\
| |_) | _ < __/\__ \ || (_) | | | __/
| .__/|_| \_\___||___/\__\___/|_| \___|
|_|
'''
class NotAFolder(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class NotAFile(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
| 19.451613 | 41 | 0.507463 |
cf3369496df34c08b7fc5667c3f13cb9692aea97 | 1,450 | py | Python | USBCameraStream.py | lilbowser/CamPS | 293a48160ac21a2ca04b291ce7c1e34ebcdec4a2 | [
"MIT"
] | null | null | null | USBCameraStream.py | lilbowser/CamPS | 293a48160ac21a2ca04b291ce7c1e34ebcdec4a2 | [
"MIT"
] | null | null | null | USBCameraStream.py | lilbowser/CamPS | 293a48160ac21a2ca04b291ce7c1e34ebcdec4a2 | [
"MIT"
] | null | null | null | """
The MIT License (MIT)
Copyright (c) 2015 Adrian Rosebrock, http://www.pyimagesearch.com
"""
# import the necessary packages
from threading import Thread
import cv2
class USBCameraStream:
def __init__(self, src=0):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture()
self.stream.open(src)
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
# self.error = False
def start(self):
# start the thread to read frames from the video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
def close(self):
"""
Clean up OpenCV Camera
"""
self.stream.release()
| 25.892857 | 71 | 0.604138 |
a0bd20c3eac4d7d05fde18567b2c2420a540f32a | 8,386 | py | Python | inventory.py | roman-geraskin/fssh | 80078b02524108994677f891ea7b26c951d49404 | [
"MIT"
] | null | null | null | inventory.py | roman-geraskin/fssh | 80078b02524108994677f891ea7b26c951d49404 | [
"MIT"
] | null | null | null | inventory.py | roman-geraskin/fssh | 80078b02524108994677f891ea7b26c951d49404 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import argparse
import os
import re
from ansible.inventory.manager import InventoryManager
from ansible.parsing.dataloader import DataLoader
def _get_ssh_params(inventory_file_name):
hosts = {}
data_loader = DataLoader()
inventory = InventoryManager(loader=data_loader,
sources=[inventory_file_name])
for host, val in inventory.hosts.items():
vars = val.get_vars()
hosts[host] = {
"HostName": vars.get("ansible_host", host),
"Port": vars.get("ansible_port"),
"User": vars.get("ansible_user"),
"SshPrivateKeyFile": vars.get("ansible_ssh_private_key_file"),
"Options": vars.get("ansible_ssh_common_args")
}
return hosts
def _main(mode, inventory_path, target_client=None):
clients = {}
inventory_path_full = os.path.expanduser(inventory_path)
for root, dirs, files in os.walk(inventory_path_full):
for name in files:
basename = os.path.basename(root)
if (basename not in ["vaults", "vars", "group_vars", "host_vars"]
and os.path.splitext(name)[1] in [".yml", ".yaml"]):
client = root.replace(inventory_path_full,
"").split(os.sep)[0].split("-")[0]
if mode == "all":
clients.setdefault(client, {}).update(
_get_ssh_params(os.path.join(root, name)))
elif mode == "clients_only":
clients.setdefault(client, {})
elif mode == "client_hosts":
if client == target_client:
clients.setdefault(client, {}).update(
_get_ssh_params(os.path.join(root, name)))
return clients
def _get_flat_hosts(client_hosts):
flat_list = []
for sublist in client_hosts.values():
for item in sublist.keys():
flat_list.append(item)
return flat_list
def _replace_ansible_host_with_ip(client_hosts, ssh_opts):
proxycommand_re = re.compile(r"ProxyCommand=[\"|'](.*)[\"|']")
host_re = re.compile(r"([a-zA-Z][\w\.-]+)")
proxycommand = proxycommand_re.findall(ssh_opts)
if proxycommand:
host = host_re.findall(proxycommand[0])
if "ssh" in host:
host.remove("ssh")
if host:
if host[0] in client_hosts:
ip = client_hosts[host[0]]["HostName"]
ssh_opts = ssh_opts.replace(host[0], ip)
return ssh_opts
def get_clients(inventory_path):
print(" ".join(_main("clients_only", inventory_path).keys()))
def get_hosts_all(inventory_path):
client_hosts = _main("all", inventory_path)
print(" ".join(_get_flat_hosts(client_hosts)))
def get_client_hosts(client, inventory_path):
client_hosts = _main("client_hosts", inventory_path, target_client=client)
print(" ".join(client_hosts.get(client, {}).keys()))
def get_client_hosts_all(inventory_path):
client_hosts = _main("all", inventory_path)
clients = client_hosts.keys()
hosts = _get_flat_hosts(client_hosts)
print(" ".join(list(clients) + hosts))
def get_ssh_string(host, inventory_path, client=None, quote_opts_quotes=False):
if client:
client_hosts = _main("client_hosts",
inventory_path,
target_client=client)[client]
else:
client_hosts = {}
for hosts in _main("all", inventory_path).values():
client_hosts.update(hosts)
host_vars = client_hosts.get(host)
if host_vars:
port_str = ("-p " +
str(host_vars["Port"]) if host_vars["Port"] else None)
user_str = ("-l " +
str(host_vars["User"]) if host_vars["User"] else "")
pkey_str = ("-i " + str(host_vars["SshPrivateKeyFile"])
if host_vars["SshPrivateKeyFile"] else None)
ssh_opts = host_vars["Options"]
if host_vars["Options"]:
ssh_opts = _replace_ansible_host_with_ip(client_hosts, ssh_opts)
if quote_opts_quotes:
ssh_opts = re.sub('("[^"]+")', r"'\g<1>'",
ssh_opts.replace("'", '"'))
ssh_args = [
x for x in
[port_str, user_str, pkey_str, ssh_opts, host_vars['HostName']]
if x
]
ssh_str = f"{' '.join(ssh_args)}"
print(ssh_str)
def config_update(config_main, config_dir, inventory_path, client=None):
config_dir_full = os.path.expanduser(config_dir)
if not os.path.isdir(config_dir_full):
try:
os.mkdir(config_dir_full, 0o700)
except OSError:
print(f"Directory creation failed: {config_dir_full}")
if client:
client_hosts = _main("client_hosts",
inventory_path,
target_client=client)
else:
client_hosts = _main("all", inventory_path)
for client, hosts in client_hosts.items():
config_lines = {}
print(f"{client}: {len(hosts)} hosts")
for host, host_vars in hosts.items():
lines = [f"Host {host}"]
print(f"{client}: adding {host} with {host_vars}")
for param, value in host_vars.items():
if value:
lines.append(f" {param} {value}")
if len(lines) > 1:
config_lines[host] = lines.copy()
client_ssh_conf = os.path.join(config_dir_full, client)
with open(client_ssh_conf, "w") as f:
for host, val in config_lines.items():
f.writelines([x + "\n" for x in val])
f.write("\n")
os.chmod(client_ssh_conf, 0o600)
config_main_full = os.path.expanduser(config_main)
relative_dir_path = os.path.relpath(config_dir_full,
os.path.dirname(config_main_full))
option = f"Include {relative_dir_path}/*"
with open(config_main_full, "r") as f:
ssh_config_conts = f.readlines()
if option not in [x.rstrip() for x in ssh_config_conts]:
print(f"Adding '{option}' to {config_main}")
with open(config_main_full, "w") as f:
f.write(option + "\n\n")
f.writelines(ssh_config_conts)
def _argparser():
parser = argparse.ArgumentParser()
parser.add_argument("--inventory",
help="Path to ansible inventory",
required=True)
parser.add_argument("--client", help="Search in client hosts only")
parser.add_argument("--config-dir",
help="Path to the ssh clients config dir",
default="~/.ssh/config.d/")
parser.add_argument("--config-main",
help="Path to the main ssh config",
default="~/.ssh/config")
# workaround for xxh
parser.add_argument("--quote-opts-quotes",
help="Place quoted ssh options in quotes",
action="store_true")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--string", help="Get ssh string")
group.add_argument("--config",
help="Update ssh configs",
action="store_true")
group.add_argument("--completion",
help="Get hosts completion",
choices=["hosts", "clients", "both"])
args = parser.parse_args()
return args
if __name__ == "__main__":
args = _argparser()
if not os.path.exists(args.inventory):
raise FileNotFoundError(f"{args.inventory} does not exist!")
if args.completion:
if args.completion == "hosts":
if args.client:
get_client_hosts(args.client, args.inventory)
else:
get_hosts_all(args.inventory)
elif args.completion == "clients":
get_clients(args.inventory)
elif args.completion == "both":
get_client_hosts_all(args.inventory)
elif args.string:
get_ssh_string(args.string, args.inventory, args.client,
args.quote_opts_quotes)
elif args.config:
config_update(args.config_main, args.config_dir, args.inventory,
args.client)
| 36.620087 | 79 | 0.576198 |
b857a0b57a36c9d584fa05bf88a80f1f5140af7e | 2,015 | py | Python | tests/test_helloheart_airflow_utils.py | Hello-Heart/helloheart-airflow-utils | 3c21b7e554b4994dfd86eebdc621685f3c09505c | [
"MIT"
] | null | null | null | tests/test_helloheart_airflow_utils.py | Hello-Heart/helloheart-airflow-utils | 3c21b7e554b4994dfd86eebdc621685f3c09505c | [
"MIT"
] | null | null | null | tests/test_helloheart_airflow_utils.py | Hello-Heart/helloheart-airflow-utils | 3c21b7e554b4994dfd86eebdc621685f3c09505c | [
"MIT"
] | null | null | null | """Tests for custom logging class"""
import importlib
from airflow.config_templates import airflow_local_settings
from helloheart_airflow_utils import __version__
from helloheart_airflow_utils.logging import log_config
def test_version():
"""Tests that the __version__ attribute is correct"""
assert __version__ == "0.2.2"
def test_logging_config():
"""Tests that the LOGGING_CONFIG is not the same as airflow's default"""
assert log_config.LOGGING_CONFIG != airflow_local_settings.DEFAULT_LOGGING_CONFIG
log_config.LOGGING_CONFIG["loggers"]["airflow.processor"]["level"] = "INFO"
# after reverting the change, configs are the same
assert log_config.LOGGING_CONFIG == airflow_local_settings.DEFAULT_LOGGING_CONFIG
def test_dag_processor_logging_config():
"""
Tests that the DAG_PARSING_LOGGING_CONFIG is not the same as airflow's default
"""
assert (
log_config.DAG_PARSING_LOGGING_CONFIG
!= airflow_local_settings.DEFAULT_DAG_PARSING_LOGGING_CONFIG
)
log_config.DAG_PARSING_LOGGING_CONFIG["handlers"]["processor_manager"][
"maxBytes"
] = (100 * 2**20)
# after reverting the change, configs are the same
assert (
log_config.DAG_PARSING_LOGGING_CONFIG
== airflow_local_settings.DEFAULT_DAG_PARSING_LOGGING_CONFIG
)
def test_complex_logging_config(monkeypatch):
"""Tests global logging config + processor_manager logging config"""
monkeypatch.setenv("CONFIG_PROCESSOR_MANAGER_LOGGER", "True")
importlib.reload(airflow_local_settings)
importlib.reload(log_config)
assert log_config.LOGGING_CONFIG != airflow_local_settings.DEFAULT_LOGGING_CONFIG
log_config.LOGGING_CONFIG["loggers"]["airflow.processor"]["level"] = "INFO"
log_config.LOGGING_CONFIG["handlers"]["processor_manager"]["maxBytes"] = (
100 * 2**20
) # 100MB
# after reverting the change, configs are the same
assert log_config.LOGGING_CONFIG == airflow_local_settings.DEFAULT_LOGGING_CONFIG
| 38.75 | 85 | 0.756328 |
9929939be3a45fb4e559459f76453bd7d0954f52 | 4,127 | py | Python | koans/about_set.py | VMStr8/learn_python_koans | b7b2453d559f520c7fcd3fe2ae5a87db0050fbbc | [
"MIT"
] | null | null | null | koans/about_set.py | VMStr8/learn_python_koans | b7b2453d559f520c7fcd3fe2ae5a87db0050fbbc | [
"MIT"
] | null | null | null | koans/about_set.py | VMStr8/learn_python_koans | b7b2453d559f520c7fcd3fe2ae5a87db0050fbbc | [
"MIT"
] | null | null | null | from koans_plugs import *
def test_create():
"""
Множество в python содержат не повторяющиеся элементы.
Создать множество можно через функцию set(), передав в неё любую последовательность,
или заключив последовательность в фиурные скобки {1, 2, 3}.
P.S пустое множество невозможно создать как {}, так-как синтаксис совпадёт с созданием словаря.
"""
my_set = {1, 2, 3} # попробуйте такие варианты: set(), {1, 2, 3}, {'qwerty'}, set((1, 2, 3))
assert isinstance(my_set, set)
def test_create_from_string():
"""
При создании множества все элементы будут уникальными.
Создать множество уникальных букв из строки легко через функцию set():
>>> set('qwerty') == {'q', 'w', 'e', 'r', 't', 'y'}
True
"""
my_set = set('Hello, world!') # попробуйте такие варианты: set('Hello!'), set('Hello, world!')
assert {'H', 'e', 'l', 'o', 'w', 'r', 'd', '!', ',', ' '} == my_set
def test_words_in_set():
"""
Множества могут содержать не только цифры и буквы.
"""
my_set = {3, 'set', 2} # попробуйте такие варианты: {True, 'set', 2}, {'cow', 'fox', 'cat'}
assert isinstance(my_set, set)
def test_operator_len():
"""
У множества есть длина.
len({"Множество"})
"""
my_set = {0, 1, 2, 3, 4, 5}
set_len = 6 # попробуйте такие варианты: 5, 6, 7
assert len(my_set) == set_len
def test_operator_in():
"""
Проверить вхождение элемента в множество можно с помощью оператора in
"Элемент" in {"Множество"}
"""
my_set = {'cow', 'fox', 'cat'}
current_element = 'cow' # попробуйте такие варианты: 'cow', 1, True
assert current_element in my_set
def test_union():
"""
Множества можно объединять.
"Множество AB" = "Множество A" | "Множество B"
"""
set_A = {1, 2, 3, 4, 5}
set_B = {4, 5, 6, 7, 8}
set_union = set_A | set_B
assert set_union == {1, 2, 3, 4, 5, 6, 7, 8}
def test_intersection():
"""
Пересечение — это операция выделения общих элементов множеств.
"Множество AB" = "Множество A" & "Множество B"
"""
set_A = {1, 2, 3, 4, 5}
set_B = {4, 5, 6, 7, 8}
set_intersection = set_A & set_B
assert set_intersection == {4, 5}
def test_difference():
"""
Разница — это операция выделения элементов, которых нет в другом множестве.
"Множество A-B" = "Множество A" - "Множество B"
"""
set_A = {1, 2, 3, 4, 5}
set_B = {4, 5, 6, 7, 8}
set_difference = set_A - set_B
assert set_difference == {1, 2, 3}
def test_multi_difference():
"""
Разница, объединение и пересечение можно компоновать в строке.
"Множество A-B-C" = "Множество A" - "Множество B" - "Множество C"
"""
set_A = {1, 2, 3, 4, 5}
set_B = {4, 5, 6, 7, 8}
set_C = {1, 2}
set_difference = set_A - set_B - set_C
assert set_difference == {3, }
def test_duplicate_removal():
"""
Очень часто множества используют для удаления дублей из списка путём преобразования.
"Список уникальных элементов" = list(set("Список элементов")).
Здесь важно применить сортировку к спискам перед сравнением, функцией sorted().
Иначе легко проверить, что для сравнения списков учитывается порядок эл-тов.
>>> [1, 2] == [1, 2]
True
>>> [1, 2] != [2, 1]
True
"""
my_duplicated_list = ['cow', 'cat', 'cat', 'dog', 'cat', 'cow']
my_unique_list = ['cow', 'cat', 'dog'] # исключите дубликаты вручную
assert sorted(my_unique_list) == sorted(list(set(my_duplicated_list)))
def test_list_in_set():
"""
Множество содержит в себе набор уникальных элементов. Их уникальность определяется специальной функцией __hash__,
содержащейся в типе данных. Проверить наличие данной функции можно командой hash().
Если у типа нет функции, то при добавлении в множество будет вызванно исключение.
"""
my_set = {1, 2, 3} # попробуйте такие варианты: {1, [1, 2, 3]}, {1, (1, 2, 3)}, {1, {'a': 1, 'b': 2}}
assert isinstance(my_set, set) | 30.57037 | 121 | 0.596802 |
917a60d92c3b84c1940d8b683142f325fa5baeba | 421 | py | Python | apps/security/migrations/0012_auto_20210202_1002.py | death-finger/get2unix | 1ff6f729f076040d6493251471cc0ee9cdcdc661 | [
"MIT"
] | null | null | null | apps/security/migrations/0012_auto_20210202_1002.py | death-finger/get2unix | 1ff6f729f076040d6493251471cc0ee9cdcdc661 | [
"MIT"
] | null | null | null | apps/security/migrations/0012_auto_20210202_1002.py | death-finger/get2unix | 1ff6f729f076040d6493251471cc0ee9cdcdc661 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.6 on 2021-02-02 02:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('security', '0011_auto_20210202_0749'),
]
operations = [
migrations.AlterField(
model_name='collectortasks',
name='id_code',
field=models.CharField(max_length=255, null=True, unique=True),
),
]
| 22.157895 | 75 | 0.617577 |
80f8ecc054729f5769f1865789e19964a05e28f0 | 1,876 | py | Python | scripts/plot_nr_of_boxes.py | mozewillo/ROCS | babc44e4486c69405764022c64b17c86f49b4a1a | [
"MIT"
] | null | null | null | scripts/plot_nr_of_boxes.py | mozewillo/ROCS | babc44e4486c69405764022c64b17c86f49b4a1a | [
"MIT"
] | null | null | null | scripts/plot_nr_of_boxes.py | mozewillo/ROCS | babc44e4486c69405764022c64b17c86f49b4a1a | [
"MIT"
] | 1 | 2022-01-28T12:33:59.000Z | 2022-01-28T12:33:59.000Z | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
sns.set_style("whitegrid")
if __name__ == '__main__':
train_image = pd.read_csv('../data/train_image_level.csv')
train_study = pd.read_csv('../data/train_study_level.csv')
train_study['id'] = train_study['id'].str.split('_').str[0]
train_image['nr_of_boxes'] = (train_image.label.str.split().str.len()/6).astype(np.uint8)
train_image.loc[train_image.label.str.startswith('none'), 'nr_of_boxes'] = 0
classes = train_study.iloc[:,1:].idxmax(axis=1)
classes[classes=='Negative for Pneumonia'] = 0
classes[classes=='Typical Appearance'] = 1
classes[classes=='Indeterminate Appearance'] = 2
classes[classes=='Atypical Appearance'] = 3
train_study['Class'] = classes
train_study.index = train_study.id
train_image = train_image.join(train_study['Class'], on='StudyInstanceUID')
class_names = ['Negative for Pneumonia', 'Typical Appearance',
'Indeterminate Appearance', 'Atypical Appearance']
fig, axes = plt.subplots(1, 3, figsize=(15,5))
for i in range(1, 4):
data = train_image[train_image.Class==i]['nr_of_boxes'].value_counts().sort_index()
axes[i-1].bar(x=[str(i) for i in list(data.index)], height=data, width=0.9)
axes[i-1].set_xlabel('Number of boxes', fontsize=12)
axes[i-1].set_title(class_names[i])
for rect in axes[i-1].patches:
axes[i-1].text(rect.get_x() + rect.get_width() / 2, rect.get_height(),
"%.1f%%"% (rect.get_height()/data.sum()*100),
ha='center')
axes[0].set_ylabel('Number of images', fontsize=12)
plt.suptitle('Number of boxes on images from each class', size=18, y=1.05)
plt.savefig('../plots/nr_of_boxes.png', dpi=150, bbox_inches='tight')
plt.close() | 46.9 | 93 | 0.651386 |
bb35619da078cb96e92a487d924c54278506444c | 483 | py | Python | test_squarespace.py | doprdele/squarespace-python | 140c1b389b91d390f0316e250a9e61454bc32ade | [
"MIT"
] | 7 | 2017-12-31T03:37:22.000Z | 2021-03-07T22:37:58.000Z | test_squarespace.py | doprdele/squarespace-python | 140c1b389b91d390f0316e250a9e61454bc32ade | [
"MIT"
] | 2 | 2019-01-18T19:01:59.000Z | 2020-04-02T16:22:20.000Z | test_squarespace.py | doprdele/squarespace-python | 140c1b389b91d390f0316e250a9e61454bc32ade | [
"MIT"
] | 7 | 2017-02-10T15:50:47.000Z | 2022-01-17T17:09:03.000Z | # coding=UTF-8
from squarespace import Squarespace
def test_squarespace():
store = Squarespace('test')
assert store.api_key == 'test'
assert store.useragent == 'Squarespace python API v0.0.3 by Zach White.'
def test_squarespace_useragent():
store = Squarespace('test')
store.useragent = 'Hello, World!'
assert store.useragent == 'Hello, World!'
assert store._useragent == 'Hello, World!'
assert store.http.headers['User-Agent'] == 'Hello, World!'
| 28.411765 | 76 | 0.689441 |
079066400055a448bd87fa01cc3ba885eec39265 | 2,112 | py | Python | swot_simulator/error/karin.py | CNES/swot_simulator | 92d0bb4a274ec9923265567968beea3be4283e61 | [
"BSD-3-Clause"
] | 17 | 2020-05-28T08:20:11.000Z | 2022-03-25T07:40:48.000Z | swot_simulator/error/karin.py | CNES/swot_simulator | 92d0bb4a274ec9923265567968beea3be4283e61 | [
"BSD-3-Clause"
] | 7 | 2021-07-21T02:15:52.000Z | 2021-11-14T10:46:41.000Z | swot_simulator/error/karin.py | CNES/swot_simulator | 92d0bb4a274ec9923265567968beea3be4283e61 | [
"BSD-3-Clause"
] | 8 | 2020-05-17T13:53:43.000Z | 2022-03-25T07:40:58.000Z | # Copyright (c) 2021 CNES/JPL
#
# All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
"""
Karin noise
-----------
"""
from typing import Dict
import numpy as np
from .. import random_signal
from .. import settings
class Karin:
"""Karin instrumental error computed from random realization
Args:
parameters (settings.Parameters): Simulation settings
"""
def __init__(self, parameters: settings.Parameters) -> None:
assert parameters.karin_noise is not None
# Store the generation parameters of the random signal.
self.hsdt, self.x_ac, self.swh = random_signal.read_file_karin(
parameters.karin_noise)
self.size_grid = (parameters.delta_ac * parameters.delta_al)**0.5
# Hack for unsmoothed products at high resolution
if self.size_grid < 1:
self.size_grid *= 8 / (40**.5)
def generate(self, seed: int, x_al: np.ndarray, x_ac: np.ndarray,
swh: np.ndarray) -> Dict[str, np.ndarray]:
"""Generate the karin noise
Args:
seed (int): Random seed used to initialize the pseudo-random
number generator.
x_al (numpy.ndarray): Along track distance
x_ac (numpy.ndarray): Across track distance
swh (numpy.ndarray): Significant wave height. Used to modulate
instrumental noise as a function of sea state.
Returns:
dict: variable name and errors simulated.
"""
num_pixels = x_ac.shape[0]
num_lines = x_al.shape[0]
# Generate random noise for left and right part of the mast
rng = np.random.default_rng(seed=seed)
a_karin = rng.normal(0, 1, (num_lines, num_pixels))
# Formula of karin noise as a function of x_ac (smile shape)
sigma_karin = random_signal.interpolate_file_karin(
swh, x_ac, self.hsdt, self.x_ac, self.swh) / self.size_grid
# Compute random karin error
return {"simulated_error_karin": sigma_karin * a_karin}
| 33.52381 | 74 | 0.641572 |
2d71b90695b6407690c7fbe64655405b9731f366 | 2,033 | py | Python | epytope/Data/pssms/smmpmbec/mat/A_68_02_8.py | christopher-mohr/epytope | 8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd | [
"BSD-3-Clause"
] | 7 | 2021-02-01T18:11:28.000Z | 2022-01-31T19:14:07.000Z | epytope/Data/pssms/smmpmbec/mat/A_68_02_8.py | christopher-mohr/epytope | 8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd | [
"BSD-3-Clause"
] | 22 | 2021-01-02T15:25:23.000Z | 2022-03-14T11:32:53.000Z | epytope/Data/pssms/smmpmbec/mat/A_68_02_8.py | christopher-mohr/epytope | 8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd | [
"BSD-3-Clause"
] | 4 | 2021-05-28T08:50:38.000Z | 2022-03-14T11:45:32.000Z | A_68_02_8 = {0: {'A': 0.256, 'C': 0.077, 'E': 0.093, 'D': 0.016, 'G': 0.176, 'F': 0.055, 'I': 0.076, 'H': -0.087, 'K': 0.066, 'M': -0.596, 'L': -0.088, 'N': -0.421, 'Q': -0.07, 'P': 0.341, 'S': 0.002, 'R': 0.184, 'T': 0.023, 'W': -0.04, 'V': 0.002, 'Y': -0.066}, 1: {'A': -0.003, 'C': 0.002, 'E': 0.003, 'D': 0.006, 'G': 0.012, 'F': -0.046, 'I': -0.066, 'H': 0.049, 'K': 0.069, 'M': 0.025, 'L': -0.002, 'N': 0.02, 'Q': 0.04, 'P': -0.031, 'S': 0.011, 'R': 0.121, 'T': -0.04, 'W': -0.031, 'V': -0.103, 'Y': -0.036}, 2: {'A': 0.045, 'C': 0.002, 'E': 0.022, 'D': 0.0, 'G': 0.055, 'F': -0.069, 'I': -0.092, 'H': 0.043, 'K': 0.074, 'M': -0.056, 'L': -0.038, 'N': 0.005, 'Q': 0.04, 'P': -0.038, 'S': 0.069, 'R': 0.094, 'T': 0.034, 'W': -0.055, 'V': -0.042, 'Y': -0.093}, 3: {'A': -0.007, 'C': 0.008, 'E': 0.012, 'D': 0.008, 'G': -0.049, 'F': 0.047, 'I': -0.045, 'H': 0.034, 'K': -0.019, 'M': 0.042, 'L': 0.013, 'N': 0.037, 'Q': 0.004, 'P': -0.138, 'S': -0.001, 'R': 0.013, 'T': -0.009, 'W': 0.019, 'V': -0.019, 'Y': 0.049}, 4: {'A': -0.187, 'C': 0.029, 'E': 0.074, 'D': 0.091, 'G': 0.052, 'F': 0.005, 'I': -0.252, 'H': 0.107, 'K': 0.06, 'M': -0.101, 'L': -0.037, 'N': 0.117, 'Q': 0.07, 'P': 0.064, 'S': 0.012, 'R': 0.091, 'T': -0.045, 'W': 0.069, 'V': -0.222, 'Y': 0.001}, 5: {'A': -0.246, 'C': -0.021, 'E': 0.021, 'D': 0.019, 'G': 0.034, 'F': -0.108, 'I': -0.187, 'H': 0.059, 'K': 0.048, 'M': 0.016, 'L': 0.024, 'N': 0.111, 'Q': 0.133, 'P': -0.018, 'S': 0.038, 'R': 0.07, 'T': 0.019, 'W': 0.052, 'V': -0.098, 'Y': 0.034}, 6: {'A': -0.012, 'C': 0.009, 'E': 0.023, 'D': 0.004, 'G': -0.014, 'F': -0.002, 'I': -0.005, 'H': 0.029, 'K': 0.016, 'M': 0.009, 'L': -0.047, 'N': 0.012, 'Q': 0.009, 'P': -0.088, 'S': 0.015, 'R': 0.059, 'T': 0.0, 'W': 0.019, 'V': -0.04, 'Y': 0.004}, 7: {'A': -0.152, 'C': 0.029, 'E': 0.02, 'D': 0.083, 'G': 0.064, 'F': 0.0, 'I': -0.127, 'H': 0.185, 'K': 0.15, 'M': 0.042, 'L': -0.104, 'N': 0.097, 'Q': 0.125, 'P': 0.004, 'S': 0.084, 'R': 0.157, 'T': -0.041, 'W': 0.073, 'V': -0.666, 'Y': -0.024}, -1: {'con': 4.73857}} | 2,033 | 2,033 | 0.397934 |
af934ae1f4e890bb305eae238bd717859c33c286 | 26,624 | py | Python | easytext/trainer/trainer.py | cjopengler/easytext | 5561b9a51e7b0aa5d10b7af451374359b559e9b6 | [
"MIT"
] | 17 | 2020-06-19T12:12:13.000Z | 2022-01-28T02:07:01.000Z | easytext/trainer/trainer.py | cjopengler/easytext | 5561b9a51e7b0aa5d10b7af451374359b559e9b6 | [
"MIT"
] | 24 | 2020-06-08T08:51:36.000Z | 2022-02-08T03:30:19.000Z | easytext/trainer/trainer.py | cjopengler/easytext | 5561b9a51e7b0aa5d10b7af451374359b559e9b6 | [
"MIT"
] | 7 | 2020-07-20T06:40:00.000Z | 2022-01-28T03:52:49.000Z | #!/usr/bin/env python 3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 Baidu.com, Inc. All Rights Reserved
#
"""
训练器
Authors: panxu(panxu@baidu.com)
Date: 2020/05/16 00:34:00
"""
import os
import torch
import logging
from tqdm import tqdm
import shutil
from typing import Optional, Union, List
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.nn.parallel import DistributedDataParallel
from torch import distributed as TorchDist
from torch.distributed import ReduceOp
from easytext.model import Model
from easytext.loss import Loss
from easytext.optimizer import OptimizerFactory
from easytext.optimizer import LRSchedulerFactory
from easytext.data.model_collate import ModelInputs
from easytext.metrics import ModelMetricAdapter
from easytext.utils.json_util import json2str
from easytext.utils.nn import cuda_util
from easytext.utils.distributed.distributed_util import DistributedFuncWrapper
from easytext.utils.distributed import Sync
from easytext.trainer.metric_tracker import MetricTracker
from easytext.trainer.grad_rescaled import GradRescaled
from easytext.trainer import Record
from easytext.trainer.trainer_callback import TrainerCallback
from easytext.distributed import Distributed
from easytext.distributed import DistributedDataParallelParameter, ProcessGroupParameter
class NerModelOutputs:
"""
Ner Model Outputs
"""
def __init__(self, logits, mask, crf):
"""
Ner 模型的输出
:param logits: logits 输出
:param mask: mask
:param crf: 模型中的 crf 输出出来,用来进行 loss 以及 viterbi 解码
"""
self.logits = logits
self.mask = mask
self.crf = crf
class Trainer(TrainerCallback, Distributed):
"""
训练器
"""
_TRAIN = 0
_EVALUATE = 1
def __init__(self,
serialize_dir: str,
num_epoch: int,
model: Model,
loss: Loss,
metrics: ModelMetricAdapter,
optimizer_factory: OptimizerFactory,
device: torch.device,
is_distributed: bool,
lr_scheduler_factory: LRSchedulerFactory = None,
grad_rescaled: GradRescaled = None,
patient: int = None,
num_check_point_keep: int = None,
trainer_callback: Union[TrainerCallback, List[TrainerCallback], None] = None,
distributed_data_parallel_parameter: DistributedDataParallelParameter = None
):
"""
训练器初始化
:param num_epoch: 训练的 epoch 数量
:param model: 要训练的模型
:param loss: 模型的 loss function
:param metrics: 模型的指标计算
:param optimizer_factory: 模型的优化器的创建工厂。为什么不直接使用优化器?是因为, 优化器的创建依赖于 model, 所以
这里传递的参数 optimizer factory, 避免使用者在 trainer 外面生成 optimizer, 导致在 trainer 内 optimizer 依赖于
model 的参数产生问题。典型问题是: 设置 cuda.
:param device: 训练时所依赖的 device
:param is_distributed: 当前 trainer 是否是在多 GPU 环境下使用
:param serialize_dir: 训练存储的文件路径
:param patient: early stopping 的 patient. 如果是 `None`, 将不会进行 early stopping;
否则, 当前训练的指标超出了 patient 个 epoch 将会 early stopping.
:param num_check_point_keep: checkpoint 保留的数量。如果是 `None` 则全部保留;
否则,保留 num_check_point_keep 个checkpoint.
:param trainer_callback: 训练中的回调。可以是 List, 如果是 List, 怎按照顺序逐个执行.
当 Trainer.is_distributed == True, 会去查看 trainer_back.is_distributed, True 表示
:param distributed_data_parallel_parameter: DistributedDataParallel 用到的参数, 目前只支持设置 find_unused_parameters
"""
self._device = device
self._loss = loss
self._metrics = metrics
self._optimizer_factory = optimizer_factory
self._grad_rescaled = grad_rescaled
self._serialize_dir = serialize_dir
self._metric_tracker = MetricTracker(patient=patient)
self._num_check_point_keep = num_check_point_keep
self._num_epoch = num_epoch
self._current_epoch: Optional[int] = None
self._trainer_callback = trainer_callback
self._is_distributed = is_distributed
if self.is_distributed:
self._distributed_func_wrapper = DistributedFuncWrapper(dst_rank=0)
self._ddp = distributed_data_parallel_parameter \
or DistributedDataParallelParameter(find_unused_parameters=False)
else:
self._distributed_func_wrapper = DistributedFuncWrapper(dst_rank=None)
self._model = model
if self.is_distributed:
assert self._device.type != "cpu", f"多 GPU 训练, device 不能是 cpu"
torch.cuda.set_device(self._device)
self._model.cuda(self._device)
self._optimizer = optimizer_factory.create(self._model)
self._model = DistributedDataParallel(module=self._model,
device_ids=[self._device],
output_device=self._device,
find_unused_parameters=self._ddp.find_unused_parameters)
else:
self._model = self._model.to(self._device)
self._optimizer = self._optimizer_factory.create(self._model)
if lr_scheduler_factory is not None:
self._lr_scheduler = lr_scheduler_factory.create(optimizer=self.optimizer,
model=self.model)
else:
self._lr_scheduler = None
self._check_distributed()
def _check_distributed(self):
"""
检查 metric 的合法性, 如果非法会抛出异常
:return:
"""
if self._trainer_callback is not None:
assert self._trainer_callback.is_distributed == self.is_distributed, \
f"当前 trainer_callback is_distributed: {self._trainer_callback.is_distributed} " \
f"与 trainer is_distributed:{self.is_distributed} 不相等"
@property
def is_distributed(self) -> bool:
return self._is_distributed
@property
def model(self):
return self._model
@property
def loss(self):
return self._loss
@property
def metrics(self):
return self._metrics
@property
def metric_tracker(self):
return self._metric_tracker
@property
def optimizer(self):
return self._optimizer
@property
def serialize_dir(self):
return self._serialize_dir
@property
def num_epoch(self):
return self._num_epoch
@property
def current_epoch(self):
return self._current_epoch
def save_checkpoint(self,
epoch: int) -> "Trainer":
"""
保存 checkpoint 到指定的路径下。
:param epoch: 保存的 epoch
:return: self
"""
# 创建 checkpoint dir
saved_dir = os.path.join(self._serialize_dir, f"checkpoint_epoch_{epoch}")
if not os.path.isdir(saved_dir):
os.makedirs(saved_dir)
# 模型保存
model_file_path = os.path.join(saved_dir, "model.pt")
torch.save(self._model.state_dict(), model_file_path)
# 优化器保存
optimizer_file_path = os.path.join(saved_dir, "optimizer.pt")
torch.save(self._optimizer.state_dict(), optimizer_file_path)
# 保存lr scheduler
lr_scheduler_file_path = os.path.join(saved_dir, "lr_scheduler.pt")
if self._lr_scheduler is None:
torch.save(None, lr_scheduler_file_path)
else:
torch.save(self._lr_scheduler.state_dict(), lr_scheduler_file_path)
# metric 保存
# 这里保存的是当前 epoch 的 metric, 在最外面还会保存一份完整的 metric tracker
metric_file_path = os.path.join(saved_dir, "metric.json")
metric = self._metric_tracker[epoch]
with open(metric_file_path, mode="w", encoding="utf-8") as f:
f.write(f"{json2str(metric, indent=2)}\n")
# metric tracker 保存
metric_tracker_file_path = os.path.join(self._serialize_dir, "metric_tracker.json")
self._metric_tracker.save(metric_tracker_file_path)
# save best
if self._metric_tracker.best().epoch == epoch:
# 保存当前为best
best_dir = os.path.join(self._serialize_dir, "best")
if os.path.isdir(best_dir):
# 已经存在,移动到 backup
best_bak_dir = os.path.join(self._serialize_dir, "best_bak")
if os.path.isdir(best_bak_dir): # 删除 bak
shutil.rmtree(best_bak_dir)
# 将 best 的 备份到 best_bak
shutil.move(best_dir, best_bak_dir)
shutil.copytree(saved_dir, best_dir)
# 删除keep last之外的
if self._num_check_point_keep is not None:
for removed_epoch in range(1, epoch - self._num_check_point_keep + 1):
removed_epoch_dir = os.path.join(self._serialize_dir, f"checkpoint_epoch_{removed_epoch}")
if os.path.isdir(removed_epoch_dir):
shutil.rmtree(removed_epoch_dir)
return self
@staticmethod
def _find_last_epoch(serialize_dir: str):
"""
寻找最后的 epoch
:param serialize_dir: serialize 目录
:return:
"""
# 找到 last epoch
last_epoch = None
for file_name in os.listdir(serialize_dir):
dir_path = os.path.join(serialize_dir, file_name)
if os.path.isdir(dir_path):
parts = file_name.split("_")
if len(parts) == 3 and parts[0] == "checkpoint" and parts[1] == "epoch":
epoch = int(parts[2])
if last_epoch is None:
last_epoch = epoch
else:
if epoch > last_epoch:
last_epoch = epoch
return last_epoch
def load_checkpoint(self,
serialize_dir: str) -> "Trainer":
"""
载入 check point
:param serialize_dir: 保存的路径
:return: self
"""
last_epoch = Trainer._find_last_epoch(serialize_dir=serialize_dir)
if last_epoch is not None:
self._current_epoch = last_epoch
if self.is_distributed:
if self._distributed_func_wrapper is not None \
and self._distributed_func_wrapper.dst_rank == TorchDist.get_rank():
logging.info(f"Load checkpoint, 当前 epoch: {last_epoch}")
else:
logging.info(f"Load checkpoint, 当前 epoch: {last_epoch}")
saved_dir = os.path.join(serialize_dir, f"checkpoint_epoch_{last_epoch}")
model_file_path = os.path.join(saved_dir, "model.pt")
self._model.load_state_dict(torch.load(model_file_path))
if self.is_distributed:
if self._distributed_func_wrapper is not None \
and self._distributed_func_wrapper.dst_rank == TorchDist.get_rank():
logging.info(f"last epoch{last_epoch}, loaded: {self._model.state_dict()}")
else:
logging.info(f"last epoch{last_epoch}, loaded: {self._model.state_dict()}")
optimizer_file_path = os.path.join(saved_dir, "optimizer.pt")
self._optimizer.load_state_dict(torch.load(optimizer_file_path))
lr_scheduler_file_path = os.path.join(saved_dir, "lr_scheduler.pt")
lr_state_dict = torch.load(lr_scheduler_file_path)
if lr_state_dict is None:
self._lr_scheduler = None
else:
self._lr_scheduler.load_state_dict(lr_state_dict)
metric_tracker_file_path = os.path.join(serialize_dir, "metric_tracker.json")
self._metric_tracker = MetricTracker.from_file(metric_tracker_file_path)
else:
raise RuntimeError(f"最后保存的epoch数据没有在 {self._serialize_dir} 中找到!")
return self
def _train_or_evaluate(self,
phrase: int,
data_loader: DataLoader) -> float:
total_loss = 0.
total_num = 0
self._metrics.reset()
if phrase == Trainer._TRAIN:
self._model.train()
elif phrase == Trainer._EVALUATE:
self._model.eval()
else:
raise RuntimeError(f"phrase: {phrase} 应该是 {Trainer._TRAIN} 或 {Trainer._EVALUATE}")
with torch.set_grad_enabled(phrase == Trainer._TRAIN):
if self.is_distributed:
tqdm_disable = True
if self._distributed_func_wrapper is not None \
and self._distributed_func_wrapper.dst_rank == TorchDist.get_rank():
tqdm_disable = False
else:
tqdm_disable = False
for model_inputs in tqdm(data_loader, disable=tqdm_disable):
model_inputs: ModelInputs = model_inputs
batch_size, batch_inputs, labels \
= model_inputs.batch_size, model_inputs.model_inputs, model_inputs.labels
# 设置到 cuda 训练
if self._device.type == "cuda": # 仅仅处理 GPU, 默认使用 CPU
batch_inputs = cuda_util.cuda(batch_inputs, cuda_device=self._device)
labels = cuda_util.cuda(labels, cuda_device=self._device)
outputs = self._model(**batch_inputs)
batch_loss: torch.Tensor = self._loss(outputs, labels)
if phrase == Trainer._TRAIN:
self._optimizer.zero_grad()
batch_loss.backward()
# 反向传播之后修订梯度
if self._grad_rescaled is not None:
self._grad_rescaled(self._model)
self._optimizer.step()
total_loss += batch_loss.detach().item() * batch_size
total_num += batch_size
batch_metrics, target_metric = self._metrics(model_outputs=outputs, golden_labels=labels)
if self.is_distributed:
if self._distributed_func_wrapper is not None \
and self._distributed_func_wrapper.dst_rank == TorchDist.get_rank():
logging.info(f"Epoch: {self._current_epoch}, batch loss: {batch_loss}"
f"batch metrics: {json2str(batch_metrics)}, "
f"target metric: {json2str(target_metric)}")
else:
logging.info(f"Epoch: {self._current_epoch}, batch loss: {batch_loss},"
f"batch metrics: {json2str(batch_metrics)}, "
f"target metric: {json2str(target_metric)}")
# total_loss = total_loss / total_num 这是合理的 loss, 因为所有的 total_num 是一样的所以,没有必要再除以一次了
return total_loss / total_num
def recovery_train(self,
train_data_loader: DataLoader,
validation_data_loader: DataLoader):
"""
恢复训练,是指从上次中断的位置重新开始训练
:param train_data_loader: 训练数据集
:param validation_data_loader:
:return:
"""
self.load_checkpoint(self.serialize_dir)
if self.is_distributed:
TorchDist.barrier()
self._train(train_data_loader=train_data_loader,
validation_data_loader=validation_data_loader)
if self.is_distributed:
TorchDist.barrier()
def evaluate(self,
validation_data_loader: DataLoader) -> float:
"""
评估验证集
:param validation_data_loader: 验证集data loader
:return: loss 结果, 以及当前数量
"""
loss = self._train_or_evaluate(phrase=Trainer._EVALUATE,
data_loader=validation_data_loader)
if self.is_distributed:
TorchDist.barrier()
return loss
def _is_serialize_empty(self):
"""
判断 serialize dir 是否是空的,会忽略 隐藏文件
:return: True: 空文件夹; False: 非空文件夹
"""
if not os.path.isdir(self._serialize_dir):
raise RuntimeError(f"保存路径是无效的路径: {self._serialize_dir} ")
is_empty = True
for name in os.listdir(self._serialize_dir):
if not name.startswith("."):
is_empty = False
break
return is_empty
def _check_data_loader_validity(self, data_loader: DataLoader):
"""
检查 data loader 是否有效
:param data_loader: data loader
:return:
"""
if self.is_distributed:
assert isinstance(data_loader.sampler, DistributedSampler), \
f"data_loader.sampler 必须是 DistributedSampler 实例"
def train(self,
train_data_loader: DataLoader,
validation_data_loader: DataLoader) -> None:
"""
模型训练
:param train_data_loader: 训练集 data loader
:param validation_data_loader: 验证集 data loader
:return:
"""
if not self._is_serialize_empty():
raise RuntimeError(f"新训练,请清空保存文件件: {self._serialize_dir}")
if self.is_distributed:
TorchDist.barrier()
self._check_data_loader_validity(train_data_loader)
self._check_data_loader_validity(validation_data_loader)
self._train(train_data_loader=train_data_loader,
validation_data_loader=validation_data_loader)
if self.is_distributed:
TorchDist.barrier()
def _train(self,
train_data_loader: DataLoader,
validation_data_loader: DataLoader) -> None:
"""
模型训练
:param train_data_loader: 训练集 data loader
:param validation_data_loader: 验证集 data loader
:return:
"""
if self._current_epoch is None:
start_epoch = 1
else:
start_epoch = self._current_epoch
record = Record()
for epoch in range(start_epoch, self._num_epoch + 1):
record.epoch = epoch
self._current_epoch = epoch
if self.is_distributed:
if self._distributed_func_wrapper is not None \
and self._distributed_func_wrapper.dst_rank == TorchDist.get_rank():
logging.info(f"Start train epoch: {self._current_epoch}")
else:
logging.info(f"Start train epoch: {self._current_epoch}")
self.on_train_epoch_start(trainer=self, record=record)
train_loss = self._train_or_evaluate(phrase=Trainer._TRAIN, data_loader=train_data_loader)
if self.is_distributed:
train_loss = Sync.sync(train_loss, device=self._device, op=ReduceOp.SUM)
record.epoch_train_loss = train_loss
if self.is_distributed:
sync_data, op = self._metrics.to_synchronized_data()
sync_data = Sync.sync(sync_data, device=self._device, op=op)
self._metrics.from_synchronized_data(sync_data=sync_data, reduce_op=op)
# 输出metrics
train_metric_dict, train_target_metric = self._metrics.metric
record.train_metric = train_metric_dict
record.train_target_metric = train_target_metric
if self.is_distributed:
if self._distributed_func_wrapper is not None \
and self._distributed_func_wrapper.dst_rank == TorchDist.get_rank():
logging.info(f"Train epoch: {epoch}, "
f"loss: {train_loss}, "
f"target metric: {train_target_metric.name}:{train_target_metric.value},"
f"metrics: {json2str(train_metric_dict)}")
else:
logging.info(f"Train epoch: {epoch}, "
f"loss: {train_loss}, "
f"target metric: {train_target_metric.name}:{train_target_metric.value},"
f"metrics: {json2str(train_metric_dict)}")
self.on_train_epoch_stop(trainer=self, record=record)
self.on_evaluate_validation_epoch_start(trainer=self, record=record)
validation_loss = self.evaluate(validation_data_loader=validation_data_loader)
if self.is_distributed:
validation_loss = Sync.sync(validation_loss, device=self._device, op=ReduceOp.SUM)
record.epoch_validation_loss = validation_loss
if self.is_distributed:
sync_data, op = self._metrics.to_synchronized_data()
sync_data = Sync.sync(sync_data, device=self._device, op=op)
self._metrics.from_synchronized_data(sync_data=sync_data, reduce_op=op)
validation_metric_dict, validation_target_metric = self._metrics.metric
record.validation_metric = validation_metric_dict
record.validation_target_metric = validation_target_metric
self._metric_tracker.add_metric(epoch=epoch,
train_metric=train_metric_dict,
train_model_target_metric=train_target_metric,
validation_metric=validation_metric_dict,
validation_model_target_metric=validation_target_metric)
if self.is_distributed:
if self._distributed_func_wrapper is not None \
and self._distributed_func_wrapper.dst_rank == TorchDist.get_rank():
logging.info(f"Evaluate Valid epoch: {epoch}, loss: {validation_loss}, "
f"target metric: {validation_target_metric.name}:{validation_target_metric.value} "
f"metrics: {json2str(validation_metric_dict)}")
else:
logging.info(f"Evaluate Valid epoch: {epoch}, loss: {validation_loss}, "
f"target metric: {validation_target_metric.name}:{validation_target_metric.value} "
f"metrics: {json2str(validation_metric_dict)}")
self.on_evaluate_validation_epoch_stop(trainer=self, record=record)
# 设置 lr scheduler
# 注意这样设置会有点问题,对于某些 scheduler step 需要参数, 例如: ReduceLROnPlateau
# 这种情况,暂时不处理, 如果处理需要重新重构,或者简单实用 isinstance ReduceLROnPlateau 来处理
# 这里简单实用 isinstance 处理。必须指出,ReduceLROnPlateau 的基类是 object, 这也是多少有些问题。
if self._lr_scheduler is not None:
if isinstance(self._lr_scheduler, ReduceLROnPlateau):
self._lr_scheduler.step(metrics=validation_loss, epoch=epoch)
else:
self._lr_scheduler.step(epoch=epoch)
self._distributed_func_wrapper(self.save_checkpoint, epoch=epoch)
if self._metric_tracker.early_stopping(epoch):
if self.is_distributed:
if self._distributed_func_wrapper is not None \
and self._distributed_func_wrapper.dst_rank == TorchDist.get_rank():
logging.info(f"Epoch: {epoch}, early stopping!")
else:
logging.info(f"Epoch: {epoch}, early stopping!")
break
self.on_training_complete(trainer=self, record=record)
def on_train_epoch_start(self, trainer: "Trainer", record: Record) -> None:
if self.is_distributed:
if self._distributed_func_wrapper is not None \
and self._distributed_func_wrapper.dst_rank == TorchDist.get_rank():
logging.info(f"on_train_epoch_start: {record.epoch}")
else:
logging.info(f"on_train_epoch_start: {record.epoch}")
if self._trainer_callback is not None:
self._trainer_callback.on_train_epoch_start(trainer=trainer,
record=record)
def on_train_epoch_stop(self, trainer: "Trainer", record: Record) -> None:
if self.is_distributed:
if self._distributed_func_wrapper is not None \
and self._distributed_func_wrapper.dst_rank == TorchDist.get_rank():
logging.info(f"on_train_epoch_stop: {record.epoch}")
else:
logging.info(f"on_train_epoch_stop: {record.epoch}")
if self._trainer_callback is not None:
self._trainer_callback.on_train_epoch_stop(trainer=trainer,
record=record)
def on_evaluate_validation_epoch_start(self, trainer: "Trainer", record: Record) -> None:
if self.is_distributed:
if self._distributed_func_wrapper is not None \
and self._distributed_func_wrapper.dst_rank == TorchDist.get_rank():
logging.info(f"on_evaluate_epoch_start: {record.epoch}")
else:
logging.info(f"on_evaluate_epoch_start: {record.epoch}")
if self._trainer_callback is not None:
self._trainer_callback.on_evaluate_validation_epoch_start(trainer=trainer,
record=record)
def on_evaluate_validation_epoch_stop(self, trainer: "Trainer", record: Record) -> None:
if self.is_distributed:
if self._distributed_func_wrapper is not None \
and self._distributed_func_wrapper.dst_rank == TorchDist.get_rank():
logging.info(f"on_evaluate_epoch_stop: {record.epoch}")
else:
logging.info(f"on_evaluate_epoch_stop: {record.epoch}")
if self._trainer_callback is not None:
self._trainer_callback.on_evaluate_validation_epoch_stop(trainer=trainer,
record=record)
def on_training_complete(self, trainer: "Trainer", record: Record) -> None:
if self.is_distributed:
if self._distributed_func_wrapper is not None \
and self._distributed_func_wrapper.dst_rank == TorchDist.get_rank():
logging.info(f"on_training_complete: {record.epoch}")
else:
logging.info(f"on_training_complete: {record.epoch}")
if self._trainer_callback is not None:
self._trainer_callback.on_training_complete(trainer=trainer,
record=record)
| 37.445851 | 116 | 0.604943 |
863bb8a9ac87e9b4c9f70d79100501c399c993bb | 629 | py | Python | sphinxpapyrus/docxbuilder/nodes/classifier.py | amarin/sphinxpapyrus-docxbuilder | 0fd00a0c5467554d0a2b5ad9cd93ab780511f1a3 | [
"MIT"
] | null | null | null | sphinxpapyrus/docxbuilder/nodes/classifier.py | amarin/sphinxpapyrus-docxbuilder | 0fd00a0c5467554d0a2b5ad9cd93ab780511f1a3 | [
"MIT"
] | null | null | null | sphinxpapyrus/docxbuilder/nodes/classifier.py | amarin/sphinxpapyrus-docxbuilder | 0fd00a0c5467554d0a2b5ad9cd93ab780511f1a3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Translate docutils node classifier formatting.
each classifier start will processed with visit() and finished with depart()
"""
from docutils.nodes import Node
from sphinxpapyrus.docxbuilder.translator import DocxTranslator
node_name = "classifier"
def visit(visitor: DocxTranslator, node: Node):
"""Start processing classifier node"""
assert isinstance(visitor, DocxTranslator)
assert isinstance(node, Node)
def depart(visitor: DocxTranslator, node: Node):
"""Finish processing classifier node"""
assert isinstance(visitor, DocxTranslator)
assert isinstance(node, Node)
| 27.347826 | 76 | 0.751987 |
4bc334b2c4f79cd9436a6e4ca5fed86382f2d31b | 24,929 | py | Python | azure/mgmt/sql/operations/servers_operations.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 1 | 2022-01-25T22:52:58.000Z | 2022-01-25T22:52:58.000Z | azure/mgmt/sql/operations/servers_operations.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | azure/mgmt/sql/operations/servers_operations.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
from .. import models
class ServersOperations(object):
"""ServersOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def check_name_availability(
self, name, custom_headers=None, raw=False, **operation_config):
"""Determines whether a resource can be created with the specified name.
:param name: The name whose availability is to be checked.
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: CheckNameAvailabilityResponse or ClientRawResponse if
raw=true
:rtype: ~azure.mgmt.sql.models.CheckNameAvailabilityResponse or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = models.CheckNameAvailabilityRequest(name=name)
api_version = "2014-04-01"
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Sql/checkNameAvailability'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'CheckNameAvailabilityRequest')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CheckNameAvailabilityResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Gets a list of all servers in the subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Server
:rtype:
~azure.mgmt.sql.models.ServerPaged[~azure.mgmt.sql.models.Server]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-05-01-preview"
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Sql/servers'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ServerPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ServerPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_by_resource_group(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets a list of servers in a resource groups.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Server
:rtype:
~azure.mgmt.sql.models.ServerPaged[~azure.mgmt.sql.models.Server]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-05-01-preview"
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ServerPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ServerPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def get(
self, resource_group_name, server_name, custom_headers=None, raw=False, **operation_config):
"""Gets a server.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Server or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.sql.models.Server or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-05-01-preview"
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Server', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, server_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a server.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param parameters: The requested server resource state.
:type parameters: ~azure.mgmt.sql.models.Server
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns Server or
ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.sql.models.Server]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-05-01-preview"
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'Server')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Server', response)
if response.status_code == 201:
deserialized = self._deserialize('Server', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def delete(
self, resource_group_name, server_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a server.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns None or
ClientRawResponse if raw=true
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-05-01-preview"
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.delete(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def update(
self, resource_group_name, server_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Updates a server.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param parameters: The requested server resource state.
:type parameters: ~azure.mgmt.sql.models.ServerUpdate
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns Server or
ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.sql.models.Server]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-05-01-preview"
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'ServerUpdate')
# Construct and send request
def long_running_send():
request = self._client.patch(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Server', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
| 43.430314 | 144 | 0.653135 |
75032570baae5b60b599cbf7f3e1a626494cd1ba | 502 | py | Python | app.py | melodily/wordle-with-friends | 2485c8b3127db3a921410463bab6a61f486aeb1d | [
"MIT"
] | null | null | null | app.py | melodily/wordle-with-friends | 2485c8b3127db3a921410463bab6a61f486aeb1d | [
"MIT"
] | null | null | null | app.py | melodily/wordle-with-friends | 2485c8b3127db3a921410463bab6a61f486aeb1d | [
"MIT"
] | null | null | null | from flask import Flask
import os
from flask_sqlalchemy import SQLAlchemy
from dotenv import load_dotenv
from flask_migrate import Migrate
load_dotenv()
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.getenv('DATABASE_URL')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_ENGINE_OPTIONS'] = {
"pool_pre_ping": True,
}
db = SQLAlchemy(app)
migrate = Migrate(app, db, compare_type=True)
from models import game
if __name__ == '__main__':
app.run()
| 23.904762 | 65 | 0.7749 |
e65a3832f9fd668cff746f356e49c4fe19875cbc | 24,560 | py | Python | src/sage/algebras/tensor_algebra.py | rekhabiswal/sage | e8633b09919542a65e7e990c8369fee30c7edefd | [
"BSL-1.0"
] | null | null | null | src/sage/algebras/tensor_algebra.py | rekhabiswal/sage | e8633b09919542a65e7e990c8369fee30c7edefd | [
"BSL-1.0"
] | null | null | null | src/sage/algebras/tensor_algebra.py | rekhabiswal/sage | e8633b09919542a65e7e990c8369fee30c7edefd | [
"BSL-1.0"
] | null | null | null | r"""
Tensor Algebras
AUTHORS:
- Travis Scrimshaw (2014-01-24): Initial version
.. TODO::
- Coerce to/from free algebra.
"""
#*****************************************************************************
# Copyright (C) 2014 Travis Scrimshaw <tscrim at ucdavis.edu>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.categories.algebras import Algebras
from sage.categories.pushout import ConstructionFunctor
from sage.categories.graded_hopf_algebras_with_basis import GradedHopfAlgebrasWithBasis
from sage.categories.homset import Hom
from sage.categories.morphism import Morphism
from sage.categories.modules import Modules
from sage.categories.tensor import tensor
from sage.combinat.free_module import CombinatorialFreeModule, CombinatorialFreeModule_Tensor
from sage.monoids.indexed_free_monoid import IndexedFreeMonoid
from sage.misc.cachefunc import cached_method
from sage.sets.family import Family
class TensorAlgebra(CombinatorialFreeModule):
r"""
The tensor algebra `T(M)` of a module `M`.
Let `\{ b_i \}_{i \in I}` be a basis of the `R`-module `M`. Then the
tensor algebra `T(M)` of `M` is an associative `R`-algebra, with a
basis consisting of all tensors of the form
`b_{i_1} \otimes b_{i_2} \otimes \cdots \otimes b_{i_n}` for
nonnegative integers `n` and `n`-tuples
`(i_1, i_2, \ldots, i_n) \in I^n`. The product of `T(M)` is given by
.. MATH::
(b_{i_1} \otimes \cdots \otimes b_{i_m}) \cdot (b_{j_1} \otimes
\cdots \otimes b_{j_n}) = b_{i_1} \otimes \cdots \otimes b_{i_m}
\otimes b_{j_1} \otimes \cdots \otimes b_{j_n}.
As an algebra, it is generated by the basis vectors `b_i` of `M`. It
is an `\NN`-graded `R`-algebra, with the degree of each `b_i` being
`1`.
It also has a Hopf algebra structure: The comultiplication is the
unique algebra morphism `\delta : T(M) \to T(M) \otimes T(M)` defined
by:
.. MATH::
\delta(b_i) = b_i \otimes 1 + 1 \otimes b_i
(where the `\otimes` symbol here forms tensors in
`T(M) \otimes T(M)`, not inside `T(M)` itself). The counit is the
unique algebra morphism `T(M) \to R` sending each `b_i` to `0`. Its
antipode `S` satisfies
.. MATH::
S(b_{i_1} \otimes \cdots \otimes b_{i_m}) = (-1)^m (b_{i_m} \otimes
\cdots \otimes b_{i_1}).
This is a connected graded cocommutative Hopf algebra.
REFERENCES:
- :wikipedia:`Tensor_algebra`
.. SEEALSO::
:class:`TensorAlgebra`
EXAMPLES::
sage: C = CombinatorialFreeModule(QQ, ['a','b','c'])
sage: TA = TensorAlgebra(C)
sage: TA.dimension()
+Infinity
sage: TA.base_ring()
Rational Field
sage: TA.algebra_generators()
Finite family {'a': B['a'], 'c': B['c'], 'b': B['b']}
"""
def __init__(self, M, prefix='T', category=None, **options):
r"""
Initialize ``self``.
EXAMPLES::
sage: C = CombinatorialFreeModule(QQ, ['a','b','c'])
sage: TA = TensorAlgebra(C)
sage: TestSuite(TA).run()
sage: m = SymmetricFunctions(QQ).m()
sage: Tm = TensorAlgebra(m)
sage: TestSuite(Tm).run()
"""
self._base_module = M
R = M.base_ring()
category = GradedHopfAlgebrasWithBasis(R.category()).or_subcategory(category)
CombinatorialFreeModule.__init__(self, R, IndexedFreeMonoid(M.indices()),
prefix=prefix, category=category, **options)
# the following is not the best option, but it's better than nothing.
self._print_options['tensor_symbol'] = options.get('tensor_symbol', tensor.symbol)
def _repr_(self):
r"""
Return a string representation of ``self``.
EXAMPLES::
sage: C = CombinatorialFreeModule(QQ, ['a','b','c'])
sage: TensorAlgebra(C)
Tensor Algebra of Free module generated by {'a', 'b', 'c'} over Rational Field
"""
return "Tensor Algebra of {}".format(self._base_module)
def _repr_term(self, m):
"""
Return a string of representation of the term indexed by ``m``.
TESTS::
sage: C = CombinatorialFreeModule(QQ, ['a','b','c'])
sage: TA = TensorAlgebra(C)
sage: s = TA(['a','b','c']).leading_support()
sage: TA._repr_term(s)
"B['a'] # B['b'] # B['c']"
sage: s = TA(['a']*3 + ['b']*2 + ['a','c','b']).leading_support()
sage: TA._repr_term(s)
"B['a'] # B['a'] # B['a'] # B['b'] # B['b'] # B['a'] # B['c'] # B['b']"
sage: I = TA.indices()
sage: TA._repr_term(I.one())
'1'
"""
if len(m) == 0:
return '1'
symb = self._print_options['tensor_symbol']
if symb is None:
symb = tensor.symbol
return symb.join(self._base_module._repr_term(k) for k,e in m._monomial for i in range(e))
def _latex_term(self, m):
r"""
Return a latex representation of the term indexed by ``m``.
TESTS::
sage: C = CombinatorialFreeModule(QQ, ['a','b','c'])
sage: TA = TensorAlgebra(C)
sage: s = TA(['a','b','c']).leading_support()
sage: TA._latex_term(s)
'B_{a} \\otimes B_{b} \\otimes B_{c}'
sage: I = TA.indices()
sage: TA._latex_term(I.one())
'1'
"""
if len(m) == 0:
return '1'
symb = " \\otimes "
return symb.join(self._base_module._latex_term(k) for k,e in m._monomial for i in range(e))
def _ascii_art_term(self, m):
"""
Return an ascii art representation of the term indexed by ``m``.
TESTS::
sage: C = CombinatorialFreeModule(QQ, Partitions())
sage: TA = TensorAlgebra(C)
sage: s = TA([Partition([3,2,2,1]), Partition([3])]).leading_support()
sage: TA._ascii_art_term(s)
B # B
*** ***
**
**
*
sage: s = TA([Partition([3,2,2,1])]*2 + [Partition([3])]*3 + [Partition([1])]*2).leading_support()
sage: TA._ascii_art_term(s)
B # B # B # B # B # B # B
*** *** *** *** *** * *
** **
** **
* *
sage: I = TA.indices()
sage: TA._ascii_art_term(I.one())
'1'
"""
if len(m) == 0:
return '1'
from sage.typeset.ascii_art import AsciiArt
symb = self._print_options['tensor_symbol']
if symb is None:
symb = tensor.symbol
M = self._base_module
it = iter(m._monomial)
k, e = next(it)
rpr = M._ascii_art_term(k)
for i in range(e-1):
rpr += AsciiArt([symb], [len(symb)])
rpr += M._ascii_art_term(k)
for k,e in it:
for i in range(e):
rpr += AsciiArt([symb], [len(symb)])
rpr += M._ascii_art_term(k)
return rpr
def _element_constructor_(self, x):
"""
Construct an element of ``self``.
EXAMPLES::
sage: C = CombinatorialFreeModule(QQ, ['a','b','c'])
sage: TA = TensorAlgebra(C)
sage: TA(['a','b','c'])
B['a'] # B['b'] # B['c']
sage: TA(['a','b','b'])
B['a'] # B['b'] # B['b']
sage: TA(['a','b','c']) + TA(['a'])
B['a'] + B['a'] # B['b'] # B['c']
sage: TA(['a','b','c']) + TA(['a','b','a'])
B['a'] # B['b'] # B['a'] + B['a'] # B['b'] # B['c']
sage: TA(['a','b','c']) + TA(['a','b','c'])
2*B['a'] # B['b'] # B['c']
sage: TA(C.an_element())
2*B['a'] + 2*B['b'] + 3*B['c']
"""
FM = self._indices
if isinstance(x, (list, tuple)):
x = FM.prod(FM.gen(elt) for elt in x)
return self.monomial(x)
if x in FM._indices:
return self.monomial(FM.gen(x))
if x in self._base_module:
return self.sum_of_terms((FM.gen(k), v) for k,v in x)
return CombinatorialFreeModule._element_constructor_(self, x)
def _tensor_constructor_(self, elts):
"""
Construct an element of ``self`` that is the tensor product of
the list of base module elements ``elts``.
TESTS::
sage: C = CombinatorialFreeModule(ZZ, ['a','b'])
sage: TA = TensorAlgebra(C)
sage: x = C.an_element(); x
2*B['a'] + 2*B['b']
sage: TA._tensor_constructor_([x, x])
4*B['a'] # B['a'] + 4*B['a'] # B['b']
+ 4*B['b'] # B['a'] + 4*B['b'] # B['b']
sage: y = C.monomial('b') + 3*C.monomial('a')
sage: TA._tensor_constructor_([x, y])
6*B['a'] # B['a'] + 2*B['a'] # B['b'] + 6*B['b'] # B['a']
+ 2*B['b'] # B['b']
sage: TA._tensor_constructor_([y]) == y
True
sage: TA._tensor_constructor_([x]) == x
True
sage: TA._tensor_constructor_([]) == TA.one()
True
"""
if not elts:
return self.one()
zero = self.base_ring().zero()
I = self._indices
cur = {I.gen(k): v for k,v in elts[0]}
for x in elts[1:]:
next = {}
for k,v in cur.items():
for m,c in x:
i = k * I.gen(m)
next[i] = cur.get(i, zero) + v * c
cur = next
return self._from_dict(cur)
def _coerce_map_from_(self, R):
"""
Return ``True`` if there is a coercion from ``R`` into ``self`` and
``False`` otherwise. The things that coerce into ``self`` are:
- Anything with a coercion into ``self.base_ring()``.
- Anything with a coercion into the base module of ``self``.
- A tensor algebra whose base module has a coercion into the base
module of ``self``.
- A tensor module whose factors have a coercion into the base
module of ``self``.
TESTS::
sage: C = CombinatorialFreeModule(ZZ, Set([1,2]))
sage: TAC = TensorAlgebra(C)
sage: TAC.has_coerce_map_from(ZZ)
True
sage: TAC(1) == TAC.one()
True
sage: TAC.has_coerce_map_from(C)
True
sage: c = C.monomial(2)
sage: TAC(c)
B[2]
sage: d = C.monomial(1)
sage: TAC(c) * TAC(d)
B[2] # B[1]
sage: TAC(c-d) * TAC(c+d)
-B[1] # B[1] - B[1] # B[2] + B[2] # B[1] + B[2] # B[2]
sage: TCC = tensor((C,C))
sage: TAC.has_coerce_map_from(TCC)
True
sage: TAC(tensor([c, d]))
B[2] # B[1]
::
sage: D = CombinatorialFreeModule(ZZ, Set([2,4]))
sage: TAD = TensorAlgebra(D)
sage: f = C.module_morphism(on_basis=lambda x: D.monomial(2*x), codomain=D)
sage: f.register_as_coercion()
sage: TCD = tensor((C,D))
sage: TAD.has_coerce_map_from(TCC)
True
sage: TAD.has_coerce_map_from(TCD)
True
sage: TAC.has_coerce_map_from(TCD)
False
sage: TAD.has_coerce_map_from(TAC)
True
sage: TAD(3 * TAC([1, 2, 2, 1, 1]))
3*B[2] # B[4] # B[4] # B[2] # B[2]
"""
# Base ring coercions
self_base_ring = self.base_ring()
if self_base_ring == R:
return BaseRingLift(Hom(self_base_ring, self))
if self_base_ring.has_coerce_map_from(R):
return BaseRingLift(Hom(self_base_ring, self)) * self_base_ring.coerce_map_from(R)
M = self._base_module
# Base module coercions
if R == M:
return True
if M.has_coerce_map_from(R):
phi = M.coerce_map_from(R)
return self.coerce_map_from(M) * phi
# Tensor algebra coercions
if isinstance(R, TensorAlgebra) and M.has_coerce_map_from(R._base_module):
RM = R._base_module
phi = M.coerce_map_from(RM)
return R.module_morphism(lambda m: self._tensor_constructor_(
[phi(RM.monomial(k)) for k in m.to_word_list()]),
codomain=self)
# Coercions from tensor products
if (R in Modules(self_base_ring).WithBasis().TensorProducts()
and isinstance(R, CombinatorialFreeModule_Tensor)
and all(M.has_coerce_map_from(RM) for RM in R._sets)):
modules = R._sets
vector_map = [M.coerce_map_from(RM) for RM in R._sets]
return R.module_morphism(lambda x: self._tensor_constructor_(
[vector_map[i](M.monomial(x[i]))
for i,M in enumerate(modules)]),
codomain=self)
return super(TensorAlgebra, self)._coerce_map_from_(R)
def construction(self):
"""
Return the functorial construction of ``self``.
EXAMPLES::
sage: C = CombinatorialFreeModule(ZZ, ['a','b','c'])
sage: TA = TensorAlgebra(C)
sage: f, M = TA.construction()
sage: M == C
True
sage: f(M) == TA
True
"""
return (TensorAlgebraFunctor(self.category().base()), self._base_module)
def degree_on_basis(self, m):
"""
Return the degree of the simple tensor ``m``, which is its length
(thought of as an element in the free monoid).
EXAMPLES::
sage: C = CombinatorialFreeModule(QQ, ['a','b','c'])
sage: TA = TensorAlgebra(C)
sage: s = TA(['a','b','c']).leading_support(); s
F['a']*F['b']*F['c']
sage: TA.degree_on_basis(s)
3
"""
return m.length()
def base_module(self):
"""
Return the base module of ``self``.
EXAMPLES::
sage: C = CombinatorialFreeModule(QQ, ['a','b','c'])
sage: TA = TensorAlgebra(C)
sage: TA.base_module() is C
True
"""
return self._base_module
@cached_method
def one_basis(self):
r"""
Return the empty word, which indexes of `1` of this algebra.
EXAMPLES::
sage: C = CombinatorialFreeModule(QQ, ['a','b','c'])
sage: TA = TensorAlgebra(C)
sage: TA.one_basis()
1
sage: TA.one_basis().parent()
Free monoid indexed by {'a', 'b', 'c'}
sage: m = SymmetricFunctions(QQ).m()
sage: Tm = TensorAlgebra(m)
sage: Tm.one_basis()
1
sage: Tm.one_basis().parent()
Free monoid indexed by Partitions
"""
return self._indices.one()
@cached_method
def algebra_generators(self):
r"""
Return the generators of this algebra.
EXAMPLES::
sage: C = CombinatorialFreeModule(QQ, ['a','b','c'])
sage: TA = TensorAlgebra(C)
sage: TA.algebra_generators()
Finite family {'a': B['a'], 'c': B['c'], 'b': B['b']}
sage: m = SymmetricFunctions(QQ).m()
sage: Tm = TensorAlgebra(m)
sage: Tm.algebra_generators()
Lazy family (generator(i))_{i in Partitions}
"""
return Family(self._indices.indices(),
lambda i: self.monomial(self._indices.gen(i)),
name='generator')
gens = algebra_generators
def product_on_basis(self, a, b):
r"""
Return the product of the basis elements indexed by ``a`` and
``b``, as per
:meth:`AlgebrasWithBasis.ParentMethods.product_on_basis()`.
INPUT:
- ``a``, ``b`` -- basis indices
EXAMPLES::
sage: C = CombinatorialFreeModule(QQ, ['a','b','c'])
sage: TA = TensorAlgebra(C)
sage: I = TA.indices()
sage: g = I.gens()
sage: TA.product_on_basis(g['a']*g['b'], g['a']*g['c'])
B['a'] # B['b'] # B['a'] # B['c']
"""
return self.monomial(a * b)
def counit(self, x):
"""
Return the counit of ``x``.
INPUT:
- ``x`` -- an element of ``self``
EXAMPLES::
sage: C = CombinatorialFreeModule(QQ, ['a','b','c'])
sage: TA = TensorAlgebra(C)
sage: x = TA(['a','b','c'])
sage: TA.counit(x)
0
sage: TA.counit(x + 3)
3
"""
return x[self.one_basis()]
def antipode_on_basis(self, m):
"""
Return the antipode of the simple tensor indexed by ``m``.
EXAMPLES::
sage: C = CombinatorialFreeModule(QQ, ['a','b','c'])
sage: TA = TensorAlgebra(C)
sage: s = TA(['a','b','c']).leading_support()
sage: TA.antipode_on_basis(s)
-B['c'] # B['b'] # B['a']
sage: t = TA(['a', 'b', 'b', 'b']).leading_support()
sage: TA.antipode_on_basis(t)
B['b'] # B['b'] # B['b'] # B['a']
"""
m = self._indices(reversed(m._monomial))
R = self.base_ring()
if len(m) % 2 == 1:
return self.term(m, -R.one())
else:
return self.term(m, R.one())
def coproduct_on_basis(self, m):
r"""
Return the coproduct of the simple tensor indexed by ``m``.
EXAMPLES::
sage: C = CombinatorialFreeModule(QQ, ['a','b','c'])
sage: TA = TensorAlgebra(C, tensor_symbol="(X)")
sage: TA.coproduct_on_basis(TA.one_basis())
1 # 1
sage: I = TA.indices()
sage: ca = TA.coproduct_on_basis(I.gen('a')); ca
1 # B['a'] + B['a'] # 1
sage: s = TA(['a','b','c']).leading_support()
sage: cp = TA.coproduct_on_basis(s); cp
1 # B['a'](X)B['b'](X)B['c'] + B['a'] # B['b'](X)B['c']
+ B['a'](X)B['b'] # B['c'] + B['a'](X)B['b'](X)B['c'] # 1
+ B['a'](X)B['c'] # B['b'] + B['b'] # B['a'](X)B['c']
+ B['b'](X)B['c'] # B['a'] + B['c'] # B['a'](X)B['b']
We check that `\Delta(a \otimes b \otimes c) =
\Delta(a) \Delta(b) \Delta(c)`::
sage: cb = TA.coproduct_on_basis(I.gen('b'))
sage: cc = TA.coproduct_on_basis(I.gen('c'))
sage: cp == ca * cb * cc
True
"""
S = self.tensor_square()
if len(m) == 0:
return S.one()
if len(m) == 1:
ob = self.one_basis()
return S.sum_of_monomials([(m, ob), (ob, m)])
I = self._indices
m_word = [k for k,e in m._monomial for dummy in range(e)]
ob = self.one_basis()
return S.prod(S.sum_of_monomials([(I.gen(x), ob), (ob, I.gen(x))])
for x in m_word)
# TODO: Implement a coproduct using shuffles.
# This isn't quite right:
#from sage.combinat.words.word import Word
#k = len(m)
#return S.sum_of_monomials( (I.prod(I.gen(m_word[i]) for i in w[:p]),
# I.prod(I.gen(m_word[i]) for i in w[p:]))
# for p in range(k+1)
# for w in Word(range(p)).shuffle(range(p, k)) )
#####################################################################
## TensorAlgebra functor
class TensorAlgebraFunctor(ConstructionFunctor):
r"""
The tensor algebra functor.
Let `R` be a unital ring. Let `V_R` and `A_R` be the categories of
`R`-modules and `R`-algebras respectively. The functor
`T : V_R \to A_R` sends an `R`-module `M` to the tensor
algebra `T(M)`. The functor `T` is left-adjoint to the forgetful
functor `F : A_R \to V_R`.
INPUT:
- ``base`` -- the base `R`
"""
# We choose a larger (functor) rank than most ConstructionFunctors
# since this should be applied after all of the module functors
rank = 20
def __init__(self, base):
"""
Initialize ``self``.
EXAMPLES::
sage: from sage.algebras.tensor_algebra import TensorAlgebraFunctor
sage: F = TensorAlgebraFunctor(Rings())
sage: TestSuite(F).run()
"""
ConstructionFunctor.__init__(self, Modules(base), Algebras(base))
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: from sage.algebras.tensor_algebra import TensorAlgebraFunctor
sage: TensorAlgebraFunctor(Rings())
Tensor algebra functor on modules over rings
sage: TensorAlgebraFunctor(QQ)
Tensor algebra functor on vector spaces over Rational Field
"""
return "Tensor algebra functor on {}".format(self.domain()._repr_object_names())
def _apply_functor(self, M):
"""
Construct the tensor algebra `T(M)`.
EXAMPLES::
sage: from sage.algebras.tensor_algebra import TensorAlgebraFunctor
sage: C = CombinatorialFreeModule(QQ, ['a','b','c'])
sage: F = TensorAlgebraFunctor(QQ)
sage: F._apply_functor(C)
Tensor Algebra of Free module generated by {'a', 'b', 'c'} over Rational Field
"""
if M not in self.domain().WithBasis():
raise NotImplementedError("currently only for modules with basis")
return TensorAlgebra(M)
def _apply_functor_to_morphism(self, f):
"""
Apply ``self`` to a morphism ``f`` in the domain of ``self``.
EXAMPLES::
sage: from sage.algebras.tensor_algebra import TensorAlgebraFunctor
sage: C = CombinatorialFreeModule(QQ, ['a','b','c'])
sage: D = CombinatorialFreeModule(QQ, ['x','y'])
sage: on_basis = lambda m: C.term('a', 2) + C.monomial('b') if m == 'x' else sum(C.basis())
sage: phi = D.module_morphism(on_basis, codomain=C); phi
Generic morphism:
From: Free module generated by {'x', 'y'} over Rational Field
To: Free module generated by {'a', 'b', 'c'} over Rational Field
sage: list(map(phi, D.basis()))
[2*B['a'] + B['b'], B['a'] + B['b'] + B['c']]
sage: F = TensorAlgebraFunctor(QQ)
sage: Tphi = F._apply_functor_to_morphism(phi); Tphi
Generic morphism:
From: Tensor Algebra of Free module generated by {'x', 'y'} over Rational Field
To: Tensor Algebra of Free module generated by {'a', 'b', 'c'} over Rational Field
sage: G = F(D).algebra_generators()
sage: list(map(Tphi, G))
[2*B['a'] + B['b'], B['a'] + B['b'] + B['c']]
sage: Tphi(sum(G))
3*B['a'] + 2*B['b'] + B['c']
sage: Tphi(G['x'] * G['y'])
2*B['a'] # B['a'] + 2*B['a'] # B['b'] + 2*B['a'] # B['c']
+ B['b'] # B['a'] + B['b'] # B['b'] + B['b'] # B['c']
"""
DB = f.domain()
D = self(DB)
C = self(f.codomain())
phi = lambda m: C._tensor_constructor_([f(DB.monomial(k))
for k in m.to_word_list()])
return D.module_morphism(phi, codomain=C)
#####################################################################
## Lift map from the base ring
class BaseRingLift(Morphism):
r"""
Morphism `R \to T(M)` which identifies the base ring `R` of a tensor
algebra `T(M)` with the `0`-th graded part of `T(M)`.
"""
def _call_(self, x):
"""
Construct the image of ``x``.
TESTS::
sage: C = CombinatorialFreeModule(QQ, Set([1,2]))
sage: TA = TensorAlgebra(C)
sage: TA(ZZ(2))
2
"""
T = self.codomain()
R = T.base_ring()
return T.term(T.indices().one(), R(x))
| 34.446003 | 110 | 0.500122 |
59ece827e3ea489418d9fc7ccc971f72dfa8f070 | 1,061 | py | Python | extensions/interactions/NumericInput/NumericInput.py | cleophasmashiri/oppia | 443b960756501cffc2654fb7063a309691b9f4cf | [
"Apache-2.0"
] | 3 | 2015-03-17T01:34:14.000Z | 2015-04-11T10:35:53.000Z | extensions/interactions/NumericInput/NumericInput.py | cleophasmashiri/oppia | 443b960756501cffc2654fb7063a309691b9f4cf | [
"Apache-2.0"
] | null | null | null | extensions/interactions/NumericInput/NumericInput.py | cleophasmashiri/oppia | 443b960756501cffc2654fb7063a309691b9f4cf | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, softwar
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from extensions.interactions import base
class NumericInput(base.BaseInteraction):
"""Interaction for numeric input."""
name = 'Number'
category = 'Mathematics'
description = (
'Allows learners to enter integers and floating point numbers.')
display_mode = base.DISPLAY_MODE_INLINE
_dependency_ids = []
_handlers = [{
'name': 'submit', 'obj_type': 'Real'}]
_customization_arg_specs = []
| 32.151515 | 74 | 0.721018 |
2bcd64affcf51abc8e68396258c6117323888b6c | 7,887 | py | Python | ingenico/connect/sdk/domain/payment/definitions/order.py | festicket/connect-sdk-python3 | c399c6443789dd978f319c89e1ebd387c812a77b | [
"MIT"
] | 12 | 2016-09-26T21:46:31.000Z | 2020-12-23T18:44:54.000Z | ingenico/connect/sdk/domain/payment/definitions/order.py | festicket/connect-sdk-python3 | c399c6443789dd978f319c89e1ebd387c812a77b | [
"MIT"
] | 3 | 2020-05-02T16:53:02.000Z | 2020-06-02T12:49:51.000Z | ingenico/connect/sdk/domain/payment/definitions/order.py | festicket/connect-sdk-python3 | c399c6443789dd978f319c89e1ebd387c812a77b | [
"MIT"
] | 11 | 2017-07-16T00:55:28.000Z | 2021-09-24T17:00:49.000Z | # -*- coding: utf-8 -*-
#
# This class was auto-generated from the API references found at
# https://epayments-api.developer-ingenico.com/s2sapi/v1/
#
from ingenico.connect.sdk.data_object import DataObject
from ingenico.connect.sdk.domain.definitions.amount_of_money import AmountOfMoney
from ingenico.connect.sdk.domain.payment.definitions.additional_order_input import AdditionalOrderInput
from ingenico.connect.sdk.domain.payment.definitions.customer import Customer
from ingenico.connect.sdk.domain.payment.definitions.line_item import LineItem
from ingenico.connect.sdk.domain.payment.definitions.order_references import OrderReferences
from ingenico.connect.sdk.domain.payment.definitions.seller import Seller
from ingenico.connect.sdk.domain.payment.definitions.shipping import Shipping
from ingenico.connect.sdk.domain.payment.definitions.shopping_cart import ShoppingCart
class Order(DataObject):
__additional_input = None
__amount_of_money = None
__customer = None
__items = None
__references = None
__seller = None
__shipping = None
__shopping_cart = None
@property
def additional_input(self):
"""
| Object containing additional input on the order
Type: :class:`ingenico.connect.sdk.domain.payment.definitions.additional_order_input.AdditionalOrderInput`
"""
return self.__additional_input
@additional_input.setter
def additional_input(self, value):
self.__additional_input = value
@property
def amount_of_money(self):
"""
| Object containing amount and ISO currency code attributes
Type: :class:`ingenico.connect.sdk.domain.definitions.amount_of_money.AmountOfMoney`
"""
return self.__amount_of_money
@amount_of_money.setter
def amount_of_money(self, value):
self.__amount_of_money = value
@property
def customer(self):
"""
| Object containing the details of the customer
Type: :class:`ingenico.connect.sdk.domain.payment.definitions.customer.Customer`
"""
return self.__customer
@customer.setter
def customer(self, value):
self.__customer = value
@property
def items(self):
"""
| Shopping cart data
Type: list[:class:`ingenico.connect.sdk.domain.payment.definitions.line_item.LineItem`]
Deprecated; Use shoppingCart.items instead
"""
return self.__items
@items.setter
def items(self, value):
self.__items = value
@property
def references(self):
"""
| Object that holds all reference properties that are linked to this transaction
Type: :class:`ingenico.connect.sdk.domain.payment.definitions.order_references.OrderReferences`
"""
return self.__references
@references.setter
def references(self, value):
self.__references = value
@property
def seller(self):
"""
| Object containing seller details
Type: :class:`ingenico.connect.sdk.domain.payment.definitions.seller.Seller`
Deprecated; Use Merchant.seller instead
"""
return self.__seller
@seller.setter
def seller(self, value):
self.__seller = value
@property
def shipping(self):
"""
| Object containing information regarding shipping / delivery
Type: :class:`ingenico.connect.sdk.domain.payment.definitions.shipping.Shipping`
"""
return self.__shipping
@shipping.setter
def shipping(self, value):
self.__shipping = value
@property
def shopping_cart(self):
"""
| Shopping cart data, including items and specific amounts.
Type: :class:`ingenico.connect.sdk.domain.payment.definitions.shopping_cart.ShoppingCart`
"""
return self.__shopping_cart
@shopping_cart.setter
def shopping_cart(self, value):
self.__shopping_cart = value
def to_dictionary(self):
dictionary = super(Order, self).to_dictionary()
if self.additional_input is not None:
dictionary['additionalInput'] = self.additional_input.to_dictionary()
if self.amount_of_money is not None:
dictionary['amountOfMoney'] = self.amount_of_money.to_dictionary()
if self.customer is not None:
dictionary['customer'] = self.customer.to_dictionary()
if self.items is not None:
dictionary['items'] = []
for element in self.items:
if element is not None:
dictionary['items'].append(element.to_dictionary())
if self.references is not None:
dictionary['references'] = self.references.to_dictionary()
if self.seller is not None:
dictionary['seller'] = self.seller.to_dictionary()
if self.shipping is not None:
dictionary['shipping'] = self.shipping.to_dictionary()
if self.shopping_cart is not None:
dictionary['shoppingCart'] = self.shopping_cart.to_dictionary()
return dictionary
def from_dictionary(self, dictionary):
super(Order, self).from_dictionary(dictionary)
if 'additionalInput' in dictionary:
if not isinstance(dictionary['additionalInput'], dict):
raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['additionalInput']))
value = AdditionalOrderInput()
self.additional_input = value.from_dictionary(dictionary['additionalInput'])
if 'amountOfMoney' in dictionary:
if not isinstance(dictionary['amountOfMoney'], dict):
raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['amountOfMoney']))
value = AmountOfMoney()
self.amount_of_money = value.from_dictionary(dictionary['amountOfMoney'])
if 'customer' in dictionary:
if not isinstance(dictionary['customer'], dict):
raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['customer']))
value = Customer()
self.customer = value.from_dictionary(dictionary['customer'])
if 'items' in dictionary:
if not isinstance(dictionary['items'], list):
raise TypeError('value \'{}\' is not a list'.format(dictionary['items']))
self.items = []
for element in dictionary['items']:
value = LineItem()
self.items.append(value.from_dictionary(element))
if 'references' in dictionary:
if not isinstance(dictionary['references'], dict):
raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['references']))
value = OrderReferences()
self.references = value.from_dictionary(dictionary['references'])
if 'seller' in dictionary:
if not isinstance(dictionary['seller'], dict):
raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['seller']))
value = Seller()
self.seller = value.from_dictionary(dictionary['seller'])
if 'shipping' in dictionary:
if not isinstance(dictionary['shipping'], dict):
raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['shipping']))
value = Shipping()
self.shipping = value.from_dictionary(dictionary['shipping'])
if 'shoppingCart' in dictionary:
if not isinstance(dictionary['shoppingCart'], dict):
raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['shoppingCart']))
value = ShoppingCart()
self.shopping_cart = value.from_dictionary(dictionary['shoppingCart'])
return self
| 38.661765 | 114 | 0.648789 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.