blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
baccf41159024249a36b19ceaa64725f2fbe75f5
|
6f2fef1b207299681f8d67d3831c400bb91de04b
|
/data_collection/gazette/spiders/pa_associacao_municipios.py
|
2eae49b519d8e95d99aa57223fd6aa3d567b0ee1
|
[
"MIT"
] |
permissive
|
okfn-brasil/querido-diario
|
76177747aa5ad47e99514f38402e6bc747b9a715
|
548a9b1b2718dc78ba8ccb06b36cf337543ad71d
|
refs/heads/main
| 2023-08-22T04:26:30.798196
| 2023-08-18T14:12:37
| 2023-08-18T14:12:37
| 127,598,755
| 402
| 233
|
MIT
| 2023-09-14T18:56:02
| 2018-04-01T05:01:21
|
Python
|
UTF-8
|
Python
| false
| false
| 248
|
py
|
pa_associacao_municipios.py
|
from gazette.spiders.base.sigpub import SigpubGazetteSpider
class PaAssociacaoMunicipiosSpider(SigpubGazetteSpider):
name = "pa_associacao_municipios"
TERRITORY_ID = "1500000"
CALENDAR_URL = "https://www.diariomunicipal.com.br/famep"
|
f5ad78a4254bcbac1ed3aa7e75ae22e849d6ecc5
|
fa6e1299ef52ca2d4a13b3788d2a4d0540728f81
|
/plugins/slicer/MONAILabelReviewer/MONAILabelReviewerLib/ImageDataExtractor.py
|
ba15b7e224f22398aea5ef205d2e89dddf382691
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
Project-MONAI/MONAILabel
|
c3abd164255a50279fc5aa6a87f4336fff4d6833
|
c90f42c0730554e3a05af93645ae84ccdcb5e14b
|
refs/heads/main
| 2023-09-01T21:44:42.465238
| 2023-08-31T17:17:08
| 2023-08-31T17:17:08
| 351,826,770
| 448
| 167
|
Apache-2.0
| 2023-09-14T12:06:28
| 2021-03-26T15:25:10
|
Python
|
UTF-8
|
Python
| false
| false
| 15,024
|
py
|
ImageDataExtractor.py
|
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
from typing import Dict, List
from MONAILabelReviewerLib.ImageData import ImageData
from MONAILabelReviewerLib.MONAILabelReviewerEnum import Level
"""
ImageDataExtractor gets dictionary (mapping from id to ImageData from JsonParser) and caches
Mapping:
- imageIds TO ImageData,
- client TO list of imageIds
List:
- imageIds of all images which are not segemented yet
- imageIds of all images which are approved
- all reviewers
Each modification during review process will be stored in corresponding ImageData
ImageDataExtractor provides the meta data across all ImageData-Containers when the user selects the filter option
"""
class ImageDataExtractor:
def __init__(self, nameToImageData: dict):
self.LEVEL = Level()
self.nameToImageData: Dict[str, ImageData] = nameToImageData
self.clientToImageIds: Dict[str, list] = {}
self.idsOfNotSegmented: List[str] = []
self.idsOfApprovedSementations: List[str] = []
self.reviewers: List[str] = []
def init(self):
self.groupImageDataByClientId()
self.extractAllReviewers()
self.extractNotSegmentedImageIds()
def getCurrentTime(self) -> datetime:
return datetime.datetime.now()
def groupImageDataByClientId(self):
for imageId, imageData in self.nameToImageData.items():
if imageData.isSegemented():
clientId = imageData.getClientId()
if clientId:
if clientId not in self.clientToImageIds:
self.clientToImageIds[clientId] = []
self.clientToImageIds[clientId].append(imageId)
def extractAllReviewers(self):
for imageData in self.nameToImageData.values():
if imageData.isSegemented():
reviewer = imageData.getApprovedBy()
if reviewer not in self.reviewers and reviewer != "":
self.reviewers.append(reviewer)
def extractNotSegmentedImageIds(self):
for imageId, imageData in self.nameToImageData.items():
if imageData.isSegemented() is False:
self.idsOfNotSegmented.append(imageId)
def getTotalNumImages(self) -> int:
return len(self.nameToImageData)
def getImageDataIds(self) -> List[str]:
return [*self.nameToImageData.keys()]
def getClientIds(self) -> List[str]:
return [*self.clientToImageIds.keys()]
def getReviewers(self) -> List[str]:
return self.reviewers
def getImageDataNotsegmented(self) -> List[ImageData]:
"""
returns list of ImageData of corresponingd image studies wich has not been segemeted
"""
notSegmented = []
for id in self.idsOfNotSegmented:
imageData = self.nameToImageData[id]
notSegmented.append(imageData)
return notSegmented
def getNumOfNotSegmented(self) -> int:
return len(self.idsOfNotSegmented)
def getNumOfSegmented(self) -> int:
count = 0
for idList in self.clientToImageIds.values():
count += len(idList)
return count
def getSegmentationProgessInPercentage(self) -> int:
"""
returns percentage of already segmented images out of all available images
"""
segmentedCount = self.getNumOfSegmented()
float_Num = segmentedCount / self.getTotalNumImages()
return int(float_Num * 100)
def getSegmentationVsTotalStr(self) -> str:
"""
returns the index of subjected imageData within imageData data set
"""
segmentedCount = self.getNumOfSegmented()
idxTotalSegmented = f"{segmentedCount}/{self.getTotalNumImages()}"
return idxTotalSegmented
def getApprovalProgressInPercentage(self) -> int:
"""
returns percentage of already approved imageData out of all available imageData
"""
approvalCount = self.getNumApprovedSegmentation()
fraction = approvalCount / self.getTotalNumImages()
return int(fraction * 100)
def getApprovalVsTotal(self) -> str:
approvalCount = self.getNumApprovedSegmentation()
idxTotalApproved = f"{approvalCount}/{self.getTotalNumImages()}"
return idxTotalApproved
def invalidFilterCombination(self, segmented: bool, notSegmented: bool, approved: bool, flagged: bool) -> bool:
return (
(notSegmented is True and segmented is True)
or (approved is True and flagged is True)
or (notSegmented is True and approved is True)
or (notSegmented is True and flagged is True)
)
def getAllImageData(self, segmented=False, notSegmented=False, approved=False, flagged=False) -> List[ImageData]:
"""
returns fitered list of imageData which are filtered according to input parameters
"""
if self.invalidFilterCombination(segmented, notSegmented, approved, flagged):
logging.warning(
"{}: Selected filter options are not valid: segmented='{}' | notSegmented='{}' | approved='{}' | flagged='{}')".format(
self.getCurrentTime(), segmented, notSegmented, approved, flagged
)
)
return None
if notSegmented is False and segmented is False and approved is False and flagged is False:
return [*self.nameToImageData.values()]
selectedImageData = []
for imagedata in self.nameToImageData.values():
if notSegmented is True and segmented is False and imagedata.isSegemented() is False:
selectedImageData.append(imagedata)
continue
if imagedata.isSegemented() is segmented and imagedata.isApproved() is True and approved is True:
selectedImageData.append(imagedata)
continue
if (
imagedata.isSegemented() is segmented
# and imagedata.isApproved() is approved
and imagedata.isFlagged() is True
and flagged is True
):
selectedImageData.append(imagedata)
continue
return selectedImageData
def getImageDataByClientId(self, clientId: str, approved=False, flagged=False) -> List[ImageData]:
"""
returns fitered list of imageData which are filtered according to client (=annotator) and parameters (approved, flagged)
"""
if clientId == "":
return None
if approved and flagged:
logging.warning(
"{}: Selected filter options are not valid: approved='{}' and flagged='{}')".format(
self.getCurrentTime(), approved, flagged
)
)
return None
imageIds = self.clientToImageIds[clientId]
if approved is False and flagged is False:
return self.extractImageDataByIds(imageIds)
else:
return self.extractImageDataByApprovedAndFlaggedStatus(clientId, approved, flagged, imageIds)
def extractImageDataByIds(self, imageIds: List[str]) -> List[ImageData]:
imageDataList = []
for id in imageIds:
imageData = self.nameToImageData[id]
imageDataList.append(imageData)
return imageDataList
def extractImageDataByApprovedAndFlaggedStatus(
self, clientId: str, approved: bool, flagged: bool, imageIds: List[str]
) -> List[ImageData]:
imageDataList = []
for id in imageIds:
if id not in self.nameToImageData:
logging.error(
"{}: Image data [id = {}] not found for [clientId = {}] ".format(
self.getCurrentTime(), id, clientId
)
)
continue
imageData = self.nameToImageData[id]
if imageData.hasSegmentationMeta() is False:
continue
if approved and imageData.isApproved() is False:
continue
if flagged and imageData.isFlagged() is False:
continue
imageDataList.append(imageData)
return imageDataList
def getImageDataByClientAndReviewer(
self, clientId: str, reviewerId: str, approved=False, flagged=False
) -> List[ImageData]:
"""
returns fitered list of imageData which are filtered according to client (=annotator) and reviewer and parameters (approved, flagged)
"""
imageDatas = self.getImageDataByClientId(clientId, approved, flagged)
filteredByRewiewer = list(filter(lambda imageData: (imageData.getApprovedBy() == reviewerId), imageDatas))
return filteredByRewiewer
def getImageDataByReviewer(self, reviewerId: str, approved=False, flagged=False) -> List[ImageData]:
if reviewerId == "":
return None
if approved and flagged:
logging.warning(
"{}: Selected filter options are not valid: approved='{}' and flagged='{}')".format(
self.getCurrentTime(), approved, flagged
)
)
return None
filteredImageDataList = []
for imageData in self.nameToImageData.values():
if imageData.isSegemented() is False:
continue
if approved and imageData.isApproved() is False:
continue
if flagged and imageData.isFlagged() is False:
continue
if imageData.getApprovedBy() == reviewerId:
filteredImageDataList.append(imageData)
return filteredImageDataList
def getImageDataByLevel(self, isEasy: bool, isMedium: bool, isHard: bool) -> Dict[str, ImageData]:
"""
returns fitered list of imageData which are filtered according to level of difficulty (regarding segmentation): easy, medium, hard
"""
filteredImageData = {}
for id, imagedata in self.nameToImageData.items():
if imagedata is None:
continue
if imagedata.isSegemented() is False:
continue
if isEasy and imagedata.getLevel() == self.LEVEL.EASY:
filteredImageData[id] = imagedata
continue
if isMedium and imagedata.getLevel() == self.LEVEL.MEDIUM:
filteredImageData[id] = imagedata
continue
if isHard and imagedata.getLevel() == self.LEVEL.HARD:
filteredImageData[id] = imagedata
return filteredImageData
def getSingleImageDataById(self, imageId: str) -> ImageData:
"""
returns imageData by given imageId
"""
if self.isBlank(imageId):
return None
if imageId not in self.nameToImageData:
logging.warning(f"{self.getCurrentTime()}: Image data for requested id [{imageId}] not found")
return None
return self.nameToImageData[imageId]
def getMultImageDataByIds(self, ids: List[str]) -> Dict[str, ImageData]:
"""
returns multiple imageData by given list of imageId
"""
idToimageData: Dict[str, ImageData] = {}
if len(ids) == 0:
logging.warning(f"{self.getCurrentTime()}: Given id list is empty.")
return {}
for id in ids:
imageData = self.getSingleImageDataById(id)
if imageData is None:
continue
idToimageData[imageData.getName()] = imageData
return idToimageData
def getNumApprovedSegmentation(self) -> int:
"""
returns total number of imageData which are approved
"""
count = self.countApprovedSegmentation(self.nameToImageData.values())
return count
def countApprovedSegmentation(self, imageDatas: List[ImageData]) -> int:
if imageDatas is None:
return 0
approvedCount = 0
for imageData in imageDatas:
if imageData is None:
continue
if imageData.isApproved():
approvedCount += 1
return approvedCount
def getPercentageApproved(self, clientId: str):
"""
returns the percentage of images that have already been approved by given client (=Annotator)
and the value: (total number of images approved by given client (=Annotator))/(total number of imageData)
"""
listImageData = self.getImageDataByClientId(clientId=clientId)
approvedCount = self.countApprovedSegmentation(listImageData)
if len(listImageData) == 0:
logging.warning(f"{self.getCurrentTime()}: There are no images")
return 0
fraction = approvedCount / len(listImageData)
precentage = int(fraction * 100)
idxApprovedOfClient: str = f"{approvedCount}/{len(listImageData)}"
return precentage, idxApprovedOfClient
def getPercentageSemgmentedByClient(self, clientId: str):
"""
returns the percentage of images that have already been segmented by given client (=Annotator)
and the value: (total number of images segmented by given client (=Annotator))/(total number of imageData)
"""
numSegementedByClient = len(self.clientToImageIds[clientId])
fraction = numSegementedByClient / self.getTotalNumImages()
precentage = int(fraction * 100)
idxSegmentedByClient: str = f"{numSegementedByClient}/{self.getTotalNumImages()}"
return precentage, idxSegmentedByClient
def getApprovedSegmentationIds(self) -> List[str]:
"""
returns list of ids of all approved imageData
"""
idsOfApprovedSementations = []
for imageId, imageData in self.nameToImageData.items():
if imageData.isApproved():
idsOfApprovedSementations.append(imageId)
return idsOfApprovedSementations
def getSegmentedImageIds(self) -> List[str]:
"""
returns list of ids of all segmented imageData
"""
idsOfSegmented = []
for imageId, imageData in self.nameToImageData.items():
if imageData.isSegemented():
idsOfSegmented.append(imageId)
return idsOfSegmented
def isBlank(self, string) -> bool:
return not (string and string.strip())
|
3cec52ba2ef0b178a274f1fde490dd79180f6197
|
6180ebca3ef7f42c66fef007fb221e340816b47e
|
/pyro4/pyro_service.py
|
7082ae8b42d36c69aedf9ba999071f1b38157154
|
[
"MIT"
] |
permissive
|
openquantumhardware/qick
|
3af30c30cffb8f3fb67b0cf4d3cab9731e6930d6
|
3d91844e34de86bc0c05f973583ac29e64c00e1a
|
refs/heads/main
| 2023-09-04T15:21:27.126805
| 2023-08-24T22:12:38
| 2023-08-24T22:12:38
| 398,393,723
| 126
| 48
|
MIT
| 2023-09-13T21:36:17
| 2021-08-20T20:31:23
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 808
|
py
|
pyro_service.py
|
#!/usr/bin/env python3
"""This file starts a pyro nameserver and the proxying server."""
from pathlib import Path
import subprocess
import time
from qick.pyro import start_server
HERE = Path(__file__).parent
############
# parameters
############
bitfile = '../qick_lib/qick/qick_4x2.bit'
proxy_name ='rfsoc'
ns_port = 8000
# set to 0.0.0.0 to allow access from outside systems
ns_host = 'localhost'
############
# start the nameserver process
ns_proc = subprocess.Popen(
[f'PYRO_SERIALIZERS_ACCEPTED=pickle PYRO_PICKLE_PROTOCOL_VERSION=4 pyro4-ns -n {ns_host} -p {ns_port}'],
shell=True
)
# wait for the nameserver to start up
time.sleep(5)
# start the qick proxy server
start_server(
bitfile=str(HERE / bitfile),
proxy_name=proxy_name,
ns_host='localhost',
ns_port=ns_port
)
|
f2ac7e5e96e1a0c946409307df871fa78031a74e
|
40d8473719d5c1fabdd9e488edd4c482aed2f5da
|
/prob_mbrl/models/core.py
|
1cfa815fd0f283f6984499bd9628fe60103a78a4
|
[
"MIT"
] |
permissive
|
mcgillmrl/prob_mbrl
|
2ac0309b221a45e7feda99b6cdb24d45330755f2
|
0bfb70b31675310539f241f62bba8427d940d551
|
refs/heads/master
| 2021-06-03T14:10:10.130320
| 2021-04-14T16:45:04
| 2021-04-14T16:45:04
| 139,202,209
| 113
| 15
|
MIT
| 2020-09-25T19:49:29
| 2018-06-29T22:43:43
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 10,701
|
py
|
core.py
|
import copy
import inspect
import multiprocessing
import numpy as np
import torch
from .modules import BDropout, BSequential, SpectralNorm
from collections import OrderedDict, Iterable
from functools import partial
from ..utils.angles import to_complex
from prob_mbrl.models import activations
from ..utils.core import sin_squashing_fn
def mlp(input_dims,
output_dims,
hidden_dims=[200, 200],
nonlin=torch.nn.ReLU,
output_nonlin=None,
weights_initializer=partial(torch.nn.init.xavier_normal_,
gain=torch.nn.init.calculate_gain('relu')),
biases_initializer=partial(torch.nn.init.uniform_, a=-1e-1, b=1e-1),
hidden_biases=True,
output_biases=True,
dropout_layers=BDropout,
input_dropout=None,
spectral_norm=False,
spectral_norm_output=False,
layer_norm=False):
'''
Utility function for creating multilayer perceptrons of varying depth.
'''
dims = [input_dims] + hidden_dims
if not isinstance(dropout_layers, Iterable):
dropout_layers = [copy.deepcopy(dropout_layers)] * (len(hidden_dims))
if not isinstance(nonlin, Iterable):
nonlin = [nonlin] * (len(hidden_dims))
modules = OrderedDict()
# add input dropout
if inspect.isclass(input_dropout):
input_dropout = input_dropout(name='drop_input')
if input_dropout is not None:
modules['drop_input'] = input_dropout
# add hidden layers
for i, (din, dout) in enumerate(zip(dims[:-1], dims[1:])):
drop_i = dropout_layers[i]
if inspect.isclass(drop_i):
drop_i = drop_i(name='drop%d' % i)
# fully connected layer
fc = torch.nn.Linear(din, dout, bias=hidden_biases)
if spectral_norm:
fc = SpectralNorm(fc)
modules['fc%d' % i] = fc
# layer norm pre_activations
if layer_norm:
modules['ln%d' % i] = torch.nn.LayerNorm(dout)
# activation
if callable(nonlin[i]):
modules['nonlin%d' % i] = nonlin[i]()
# dropout (regularizes next layer)
if drop_i is not None:
modules['drop%d' % i] = drop_i
# project to output dimensions
fc_out = torch.nn.Linear(dims[-1], output_dims, bias=output_biases)
if spectral_norm_output:
fc_out = SpectralNorm(fc_out)
modules['fc_out'] = fc_out
# add output activation, if specified
if callable(output_nonlin):
modules['fc_nonlin'] = output_nonlin()
# build module
net = BSequential(modules)
# initialize weights
def reset_fn():
if callable(weights_initializer):
def fn(module):
if hasattr(module, 'weight') and not isinstance(
module, torch.nn.LayerNorm):
weights_initializer(module.weight)
net.apply(fn)
if callable(biases_initializer):
def fn(module):
if hasattr(module, 'bias') and module.bias is not None:
biases_initializer(module.bias)
net.apply(fn)
reset_fn()
net.float()
return net
class ModelEnsemble(torch.nn.Module):
def __init__(self, model, N_ensemble=5):
super(ModelEnsemble, self).__init__()
self.N_ensemble = N_ensemble
for i in range(N_ensemble):
setattr(self, 'model_%d' % i, copy.deepcopy(model))
def f(self, args):
x, i, args, kwargs = args
model = getattr(self, 'model_%d' % i)
return model(x, *args, **kwargs)
def forward(self, x, *args, **kwargs):
pool = multiprocessing.Pool(processes=3)
ret = pool.map(self.f)
pool.close
return ret
class Regressor(torch.nn.Module):
def __init__(self, model, output_density=None, angle_dims=[]):
super(Regressor, self).__init__()
self.model = model
self.output_density = output_density
self.register_buffer('angle_dims', torch.tensor(angle_dims).long())
self.register_buffer('X', torch.ones([1, 1]))
self.register_buffer('Y', torch.ones([1, 1]))
self.register_buffer('mx', torch.zeros([1, 1]))
self.register_buffer('Sx', torch.ones([1, 1]))
self.register_buffer('iSx', torch.ones([1, 1]))
self.register_buffer('my', torch.zeros([1, 1]))
self.register_buffer('Sy', torch.ones([1, 1]))
self.register_buffer('iSy', torch.ones([1, 1]))
def set_dataset(self, X, Y, N_ensemble=-1, p=0.5):
if len(self.angle_dims):
self.X.data = to_complex(X, self.angle_dims)
else:
self.X.data = X
self.Y.data = Y
self.mx.data = self.X.mean(0, keepdim=True)
self.Sx.data = 4.0 * self.X.std(0, keepdim=True)
self.Sx.data[self.Sx == 0] = 4.0
self.iSx.data = self.Sx.reciprocal()
self.my.data = self.Y.mean(0, keepdim=True)
self.Sy.data = 4.0 * self.Y.std(0, keepdim=True)
self.Sy.data[self.Sy == 0] = 4.0
self.iSy.data = self.Sy.reciprocal()
if N_ensemble > 1:
self.masks.data = torch.bernoulli(
p * torch.ones(X.shape[0], N_ensemble))
def load(self, state_dict):
params = dict(self.named_parameters())
params.update(self.named_buffers())
for k, v in state_dict.items():
if k in params:
params[k].data = v.data.clone()
def regularization_loss(self):
return self.model.regularization_loss()
def resample(self, *args, **kwargs):
self.model.resample(*args, **kwargs)
if self.output_density is not None:
self.output_density.resample(*args, **kwargs)
def forward(self, x, normalize=True, **kwargs):
''' This assumes that the newtork outputs the parameters for
an isotropic Gaussian predictive distribution, for each
batch sample'''
if len(self.angle_dims) > 0:
x = to_complex(x, self.angle_dims)
# scale and center inputs
if normalize:
x = (x - self.mx) * self.iSx
outs = self.model(x, **kwargs)
if callable(self.output_density):
scaling_params = (self.my, self.Sy) if normalize else None
outs = self.output_density(outs,
scaling_params=scaling_params,
**kwargs)
else:
outs = outs * self.Sy + self.my
return outs
class Policy(torch.nn.Module):
def __init__(
self,
model,
maxU=1.0,
minU=None,
angle_dims=[],
):
super(Policy, self).__init__()
self.model = model
self.register_buffer('angle_dims', torch.tensor(angle_dims).long())
if minU is None:
minU = -maxU
scale = 0.5 * (maxU - minU)
bias = 0.5 * (maxU + minU)
self.register_buffer('scale', torch.tensor(scale).squeeze())
self.register_buffer('bias', torch.tensor(bias).squeeze())
def regularization_loss(self):
return self.model.regularization_loss()
def resample(self, *args, **kwargs):
self.model.resample(*args, **kwargs)
def load(self, state_dict):
params = dict(self.named_parameters())
params.update(self.named_buffers())
for k, v in state_dict.items():
if k in params:
params[k].data = v.data.clone()
def forward(self, x, **kwargs):
return_numpy = isinstance(x, np.ndarray)
kwargs['resample'] = kwargs.get('resample', True)
kwargs['return_samples'] = kwargs.get('return_samples', True)
if return_numpy:
x = torch.tensor(x,
dtype=self.scale.dtype,
device=self.scale.device)
else:
x = x.to(dtype=self.scale.dtype, device=self.scale.device)
if x.dim() == 1:
x = x[None, :]
if len(self.angle_dims) > 0:
x = to_complex(x, self.angle_dims)
u = self.model(x, **kwargs)
if isinstance(u, tuple):
u, unoise = u
u = u + unoise
# saturate output
# u = self.scale * sin_squashing_fn(u * 2 / 3.0) + self.bias
u = self.scale * u.tanh() + self.bias
if return_numpy:
return u.detach().cpu().numpy()
else:
return u
class DynamicsModel(Regressor):
def __init__(self, model, reward_func=None, predict_done=False, **kwargs):
super(DynamicsModel, self).__init__(model, **kwargs)
self.register_buffer('maxR', torch.ones([1, 1]))
self.register_buffer('minR', torch.ones([1, 1]))
self.reward_func = reward_func
def set_dataset(self, X, Y):
super(DynamicsModel, self).set_dataset(X, Y)
D = self.Y.shape[-1] - 1
R = self.Y.index_select(-1, torch.tensor(D, device=self.Y.device))
self.maxR.data = R.max()
self.minR.data = R.min()
def forward(self, inputs, separate_outputs=False, deltas=True, **kwargs):
inputs_as_tuple = isinstance(inputs, tuple) or isinstance(inputs, list)
if inputs_as_tuple:
prev_states, actions = inputs[0], inputs[1]
inputs = torch.cat([prev_states, actions], -1)
# forward pass on model
outs = super(DynamicsModel, self).forward(inputs, **kwargs)
# if not returning samples, outs will be a tuple consisting of the
# parameters of the output distribution
return_samples = kwargs.get("return_samples", False)
if not return_samples:
return outs
# if we are returning samples, the output will be either:
# 1) a tensor whose last dimension is of size D+1, when the reward
# function is being learned, or of size D, when an external reward
# function is available.
# 2) a tuple where the first element is the tensor described above and
# the a corresponding tuple of measurement noise samples
if not inputs_as_tuple:
D = outs.shape[-1]
prev_states, actions = inputs.split(D, -1)
if callable(self.reward_func):
# if we have a known reward function
dstates = outs
rewards = self.reward_func(prev_states + dstates, actions)
else:
# assume rewards come from the last dimension of the output
dstates, rewards = outs.split(D, -1)
states = dstates if deltas else prev_states + dstates
if separate_outputs:
return states, rewards
return torch.cat([states, rewards], -1)
|
6d903dee6549cd10df040e35af937fc469810020
|
380face90c8169eeca9e61af566ab1ba7590c05b
|
/vimdoc/args.py
|
12fcb204e1b509bb727c4bf4048637b07538b9f0
|
[
"Apache-2.0"
] |
permissive
|
google/vimdoc
|
778d41b742e611aeea7e6e5645a360395f9c8bbe
|
ed17321a17a8cf4c21d2f7f77352fc9b0fb42e66
|
refs/heads/main
| 2023-08-26T05:16:55.444522
| 2022-11-26T18:56:21
| 2022-11-26T18:56:21
| 17,838,265
| 250
| 37
|
Apache-2.0
| 2023-04-10T00:01:12
| 2014-03-17T18:12:29
|
Python
|
UTF-8
|
Python
| false
| false
| 806
|
py
|
args.py
|
import argparse
import os
import vimdoc
try:
import shtab
except ImportError:
from . import _shtab as shtab
def Source(path):
if not os.path.isdir(path):
raise argparse.ArgumentTypeError('{} not found'.format(path))
if not os.access(path, os.R_OK):
raise argparse.ArgumentTypeError('Cannot access {}'.format(path))
return path
parser = argparse.ArgumentParser(
'vimdoc',
formatter_class=argparse.RawTextHelpFormatter,
description='''\
Generate vim helpfiles
Basic usage:
%(prog)s vim-someplugin/
(or %(prog)s .)''')
shtab.add_argument_to(parser)
parser.add_argument(
'plugin', type=Source, metavar='PLUGIN',
help='a vim plugin directory').complete = shtab.DIR
parser.add_argument('--version', action='version',
version='%(prog)s ' + vimdoc.__version__)
|
5f975472428eedee10faaba92c1b0d561dcc4e86
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/PhysicsTools/PatAlgos/python/selectionLayer1/electronCountFilter_cfi.py
|
80af141c59547eefb55cd53baaca26468ed67b02
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 262
|
py
|
electronCountFilter_cfi.py
|
import FWCore.ParameterSet.Config as cms
# module to filter on the number of Electrons
countPatElectrons = cms.EDFilter("PATCandViewCountFilter",
minNumber = cms.uint32(0),
maxNumber = cms.uint32(999999),
src = cms.InputTag("cleanPatElectrons")
)
|
cab60a580c195f5cee977051a0d2f59d671c89ff
|
cc8988324ad4b0e161f4b12d16aea4dada52de55
|
/elektronn3/logger.py
|
6412703fc3b73bdc602e829b1cd285aa99d494ce
|
[
"MIT"
] |
permissive
|
ELEKTRONN/elektronn3
|
54e06dc7909a90718adc1844b1925842fae79a1a
|
a8b8fa0b68735a106cc4d947bdb0d6647e991fb3
|
refs/heads/master
| 2023-07-12T06:54:53.627926
| 2023-06-21T15:39:54
| 2023-06-21T15:39:54
| 113,795,977
| 167
| 34
|
MIT
| 2022-05-20T10:58:43
| 2017-12-11T00:53:48
|
Python
|
UTF-8
|
Python
| false
| false
| 2,559
|
py
|
logger.py
|
# -*- coding: utf-8 -*-
# ELEKTRONN3 - Neural Network Toolkit
#
# Copyright (c) 2017 - now
# Max Planck Institute of Neurobiology, Munich, Germany
# Authors: Martin Drawitsch, Philipp Schubert, Marius Killinger
import logging
import os
import getpass
import sys
import uuid
import colorlog
def logger_setup():
# Formats for colorlog.LevelFormatter
log_level_formats = {'DEBUG': '%(log_color)s%(msg)s (%(module)s:%(lineno)d)',
'INFO': '%(log_color)s%(msg)s',
'WARNING': '%(log_color)sWARNING: %(msg)s (%(module)s:%(lineno)d)',
'ERROR': '%(log_color)sERROR: %(msg)s (%(module)s:%(lineno)d)',
'CRITICAL': '%(log_color)sCRITICAL: %(msg)s (%(module)s:%(lineno)d)',}
log_colors = {'DEBUG': 'blue', 'INFO': 'cyan', 'WARNING': 'bold_yellow',
'ERROR': 'red', 'CRITICAL': 'red,bg_white'}
# Initialize logger that can be used
user_name = getpass.getuser()
logger = logging.getLogger('elektronn3log')
# Only set up the logger if it hasn't already been initialised before:
if not len(logger.handlers) > 0:
logger.setLevel(logging.DEBUG)
lfile_formatter = logging.Formatter(
'[%(asctime)s] [%(levelname)s]\t%(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
uu = uuid.uuid4()
# Temporary log file path, to be changed later.
if os.path.isdir(f'/ptmp/{user_name}'):
lfile_path = os.path.abspath(f'/ptmp/{user_name}/{uu}_elektronn3.log')
else:
lfile_path = os.path.abspath(f'/tmp/{user_name}_{uu}_elektronn3.log')
lfile_level = logging.DEBUG
lfile_handler = logging.FileHandler(lfile_path)
lfile_handler.setLevel(lfile_level)
lfile_handler.setFormatter(lfile_formatter)
logger.addHandler(lfile_handler)
lstream_handler = colorlog.StreamHandler(sys.stdout)
lstream_handler.setFormatter(
colorlog.LevelFormatter(fmt=log_level_formats,
log_colors=log_colors))
# set this to logging.DEBUG to enable output for logger.debug() calls
lstream_level = logging.INFO
lstream_handler.setLevel(lstream_level)
logger.addHandler(lstream_handler)
logger.propagate = False
if False: # Test log levels:
logger.critical('== critical')
logger.error('== error')
logger.warning('== warn')
logger.info('== info')
logger.debug('== debug')
|
f3a47dbd5a1afe1f8d3ea9e04bfae79ee7a9e67f
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/Rasterio_osgeo_shapely_PIL_pyproj_numpy/source/rasterio/rio/convert.py
|
90d6041e99c97727241e586a216db4cec43577e9
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 3,282
|
py
|
convert.py
|
"""File translation command"""
import logging
import click
from cligj import format_opt
import numpy as np
from .helpers import resolve_inout
from . import options
import rasterio
@click.command(short_help="Copy and convert raster dataset.")
@click.argument(
'files',
nargs=-1,
type=click.Path(resolve_path=True),
required=True,
metavar="INPUT OUTPUT")
@options.output_opt
@format_opt
@options.dtype_opt
@click.option('--scale-ratio', type=float, default=None,
help="Source to destination scaling ratio.")
@click.option('--scale-offset', type=float, default=None,
help="Source to destination scaling offset.")
@options.rgb_opt
@options.creation_options
@click.pass_context
def convert(
ctx, files, output, driver, dtype, scale_ratio, scale_offset,
photometric, creation_options):
"""Copy and convert raster datasets to other data types and formats.
Data values may be linearly scaled when copying by using the
--scale-ratio and --scale-offset options. Destination raster values
are calculated as
dst = scale_ratio * src + scale_offset
For example, to scale uint16 data with an actual range of 0-4095 to
0-255 as uint8:
$ rio convert in16.tif out8.tif --dtype uint8 --scale-ratio 0.0625
Format specific creation options may also be passed using --co. To
tile a new GeoTIFF output file, do the following.
--co tiled=true --co blockxsize=256 --co blockysize=256
To compress it using the LZW method, add
--co compress=LZW
"""
verbosity = (ctx.obj and ctx.obj.get('verbosity')) or 1
with rasterio.Env(CPL_DEBUG=verbosity > 2):
outputfile, files = resolve_inout(files=files, output=output)
inputfile = files[0]
with rasterio.open(inputfile) as src:
# Use the input file's profile, updated by CLI
# options, as the profile for the output file.
profile = src.profile
if 'affine' in profile:
profile['transform'] = profile.pop('affine')
if driver:
profile['driver'] = driver
if dtype:
profile['dtype'] = dtype
dst_dtype = profile['dtype']
if photometric:
creation_options['photometric'] = photometric
profile.update(**creation_options)
with rasterio.open(outputfile, 'w', **profile) as dst:
data = src.read()
if scale_ratio:
# Cast to float64 before multiplying.
data = data.astype('float64', casting='unsafe', copy=False)
np.multiply(
data, scale_ratio, out=data, casting='unsafe')
if scale_offset:
# My understanding of copy=False is that this is a
# no-op if the array was cast for multiplication.
data = data.astype('float64', casting='unsafe', copy=False)
np.add(
data, scale_offset, out=data, casting='unsafe')
# Cast to the output dtype and write.
result = data.astype(dst_dtype, casting='unsafe', copy=False)
dst.write(result)
|
c7752659d8135808218a63090edb2feaeab69cc9
|
9205772e83dc18e0489296f683892ff40373e48e
|
/locust_plugins/transaction_manager.py
|
e524495f643183379959db7391e8698a45d95e5c
|
[
"Apache-2.0"
] |
permissive
|
SvenskaSpel/locust-plugins
|
e50657bca6469b56232b6439c0fbc8b30c05c6ba
|
b39d600a4c33964e7da32c6cebf0635ad071ccaa
|
refs/heads/master
| 2023-08-19T03:38:53.585422
| 2023-08-10T13:28:29
| 2023-08-10T13:28:29
| 209,288,596
| 426
| 140
|
Apache-2.0
| 2023-09-13T10:14:08
| 2019-09-18T11:10:44
|
Python
|
UTF-8
|
Python
| false
| false
| 10,293
|
py
|
transaction_manager.py
|
from locust import events
from time import time
from datetime import datetime
from csv import writer as csv_writer
from io import StringIO
import locust.stats
from locust.runners import WorkerRunner
class TransactionManager:
"""
Transaction Manager allows transactions spanning multiple tasks to be logged
using start_transaction and end_transaction methods
Stats are written to file when using --log-transactions-in-file
otherwise, two web endpoints are available to collect full and summary stats:
/stats/transactions/csv and /stats/transactions/all/csv
when running in master/worker mode, all data is sent to the master during execution
"""
transactions_filename = ""
transactions_summary_filename = ""
flush_size = 100
field_delimiter = ","
row_delimiter = "\n"
timestamp_format = "%Y-%m-%d %H:%M:%S"
transactions = []
completed_transactions = {}
user_count = 0
csv_headers = [
"start_time",
"duration",
"transaction_name",
"user_count",
"success",
"failure_message",
]
def __init__(self):
self.transaction_count = 0
self.inprogress_transactions = {}
def start_transaction(self, transaction_name):
now = time()
transaction = {}
transaction["transaction_name"] = transaction_name
transaction["start_time"] = now
transaction["user_count"] = self.runner.user_count if self.runner else 0
self.inprogress_transactions[transaction_name] = transaction
return transaction_name
def end_transaction(self, transaction_name, success=True, failure_message=""):
now = time()
t = self.inprogress_transactions[transaction_name]
t["end_time"] = now
start_time = t["start_time"]
t["duration"] = round((now - start_time) * 1000)
t["success"] = success
t["failure_message"] = failure_message
self.transactions.append(
[
datetime.fromtimestamp(t["start_time"]).strftime(self.timestamp_format),
round(t["duration"]),
t["transaction_name"],
t["user_count"],
t["success"],
t["failure_message"],
]
)
if t["transaction_name"] not in self.completed_transactions:
self.completed_transactions[t["transaction_name"]] = []
self.completed_transactions[t["transaction_name"]].append(t)
del self.inprogress_transactions[transaction_name]
if (
len(self.transactions) >= self.flush_size
and self.log_transactions_in_file
and not isinstance(self.env.runner, WorkerRunner)
):
self._flush_to_log()
@classmethod
def _command_line_parser(cls, parser):
group = None
if parser._action_groups:
group = next((x for x in parser._action_groups if x.title == "Request statistics options"), None)
if not group:
group = parser.add_argument_group(title="Request statistics options")
group.add_argument(
"--log-transactions-in-file",
help="Log transactions in a file rather than using the web ui (added by locust-plugins)",
action="store_true",
default=False,
)
@classmethod
def _create_results_log(cls):
cls.results_file = open(cls.transactions_filename, "w")
cls.results_file_writer = cls._create_csv_writer(cls.results_file)
cls.results_file_writer.writerow(cls.csv_headers)
cls.results_file.flush()
@classmethod
def _create_csv_writer(cls, buffer):
return csv_writer(buffer, delimiter=cls.field_delimiter, lineterminator=cls.row_delimiter)
@classmethod
def _flush_to_log(cls):
cls.results_file_writer.writerows(cls.transactions)
cls.results_file.flush()
cls.transactions = []
@classmethod
def _write_final_log(cls, **kwargs):
if not isinstance(cls.env.runner, WorkerRunner):
if cls.log_transactions_in_file and not cls.results_file.closed:
cls.results_file_writer.writerows(cls.transactions)
cls.results_file.close()
# also write summary file in stats.py style
with open(cls.transactions_summary_filename, "w") as f:
writer = cls._create_csv_writer(f)
writer.writerows(cls._get_transactions_summary())
@classmethod
def _init_filenames(cls):
# determine whether to output to file, (if options parsed)
if cls.env.parsed_options:
cls.log_transactions_in_file = cls.env.parsed_options.log_transactions_in_file
if cls.env.parsed_options.csv_prefix:
cls.transactions_filename = f"{cls.env.parsed_options.csv_prefix}_transactions.csv"
cls.transactions_summary_filename = f"{cls.env.parsed_options.csv_prefix}_transactions_summary.csv"
else:
cls.log_transactions_in_file = False
if cls.log_transactions_in_file and not cls.transactions_filename:
timestamp = datetime.fromtimestamp(time()).strftime("%Y_%m_%d_%H_%M_%S")
cls.transactions_filename = f"transactions_{timestamp}.csv"
cls.transactions_summary_filename = f"transactions_summary_{timestamp}.csv"
@classmethod
def on_locust_init(cls, environment, runner, **kwargs):
cls.env = environment
cls.runner = runner
cls._init_filenames()
if cls.log_transactions_in_file and not isinstance(cls.env.runner, WorkerRunner):
cls._create_results_log()
if cls.env.web_ui:
# this route available if a csv isn't being written to (--log-transactions-in-file is omitted)
@cls.env.web_ui.app.route("/stats/transactions/all/csv")
def _transactions_results_page():
headers = {}
headers["Content-type"] = "text/csv"
headers["Content-disposition"] = f"attachment;filename={cls.transactions_filename}"
with StringIO() as buffer:
writer = cls._create_csv_writer(buffer)
writer.writerows([cls.csv_headers] + cls.transactions)
response = buffer.getvalue()
return cls.env.web_ui.app.response_class(
response=response,
headers=headers,
status=200,
mimetype="text/csv",
)
# provides summary stats like requests endpoint
@cls.env.web_ui.app.route("/stats/transactions/csv")
def _transactions_summary_page():
headers = {}
headers["Content-type"] = "text/csv"
headers["Content-disposition"] = f"attachment;filename={cls.transactions_summary_filename}"
with StringIO() as buffer:
writer = cls._create_csv_writer(buffer)
writer.writerows(cls._get_transactions_summary())
response = buffer.getvalue()
return cls.env.web_ui.app.response_class(
response=response,
headers=headers,
status=200,
mimetype="text/csv",
)
@classmethod
def _get_transactions_summary(cls):
# create a summary in the same format as used for requests in stats.py
summary = []
summary.append(
[
"Type",
"Name",
"Request Count",
"Failure Count",
"Median Response Time",
"Average Response Time",
"Min Response Time",
"Max Response Time",
]
+ locust.stats.get_readable_percentiles(locust.stats.PERCENTILES_TO_REPORT)
)
for tname in cls.completed_transactions: # pylint: disable=consider-using-dict-items
fields = []
# fill the field that holds request method
fields.append("Transaction")
fields.append(tname)
durations = [sub["duration"] for sub in cls.completed_transactions[tname]]
fields.append(str(len(durations)))
successes = [sub["success"] for sub in cls.completed_transactions[tname]]
failure_count = successes.count(False)
fields.append(str(failure_count))
sorted_durations = sorted(durations)
median = sorted_durations[int(0.5 * len(sorted_durations))]
fields.append(str(median))
avg = round(sum(sorted_durations) / len(sorted_durations))
fields.append(str(avg))
fields.append(str(round(sorted_durations[0])))
fields.append(str(round(sorted_durations[-1])))
# loop through the other metrics set out in stats.py
for p in locust.stats.PERCENTILES_TO_REPORT:
fields.append(str(sorted_durations[int(p * len(sorted_durations)) - 1]))
summary.append(fields)
return summary
@classmethod
def _report_to_master(cls, data, **kwargs):
data["transactions"] = cls.transactions
cls.transactions = []
data["completed_transactions"] = cls.completed_transactions
cls.completed_transactions = {}
@classmethod
def _worker_report(cls, data, **kwargs):
if "transactions" in data:
transactions = data["transactions"]
cls.transactions += transactions
completed_transactions = data["completed_transactions"]
for t in completed_transactions:
if t not in cls.completed_transactions:
cls.completed_transactions[t] = []
cls.completed_transactions[t] += completed_transactions[t]
events.init.add_listener(TransactionManager.on_locust_init)
events.init_command_line_parser.add_listener(TransactionManager._command_line_parser)
events.report_to_master.add_listener(TransactionManager._report_to_master)
events.worker_report.add_listener(TransactionManager._worker_report)
events.test_stop.add_listener(TransactionManager._write_final_log)
|
003f310d0f7b1d5e1396e3bab96617c327c83e06
|
d1f0538cd9110a587dce9279a2e3ba4a9137e5e4
|
/fill_skillset.py
|
e13e2a70bff660f317d82d8e9b66e3db3de99c10
|
[
"MIT"
] |
permissive
|
umihico/PortfolioHub
|
79176e64dc3e5f9f5c8d6e02fc9a3c6af7903e0a
|
aabd4b32129df5f6cff526f9440d0509d3fc6db5
|
refs/heads/master
| 2023-04-03T02:11:52.813428
| 2023-01-03T23:53:19
| 2023-01-03T23:53:19
| 157,170,634
| 186
| 20
| null | 2023-03-15T02:59:02
| 2018-11-12T07:06:26
|
PHP
|
UTF-8
|
Python
| false
| false
| 3,453
|
py
|
fill_skillset.py
|
from api import get
from db import get_db
import time
import json
from pprint import pprint
def fill_skillset():
conn = get_db()
cur = conn.cursor()
usernames = select_targets(cur)
if len(usernames) == 0:
return 'zero'
username = usernames[0]
skill_set_dict = username_to_skills(username, cur)
print(username, skill_set_dict)
update_database(username, skill_set_dict, cur)
return {'username': username, 'skill_set_dict': skill_set_dict}
def select_targets(cur):
cur.execute("select user from portfolios where gif is not null and ( skills_updated_at<date_sub(curdate(), interval 10 day) or skills_updated_at is null) order by datediff(repository_updated_at, ifnull(skills_updated_at, STR_TO_DATE('1900,1,1','%Y,%m,%d'))) desc limit 100")
return list(set([record[0] for record in cur.fetchall()]))
def username_to_skills(username, cur):
url = f'https://api.github.com/users/{username}/repos?per_page=100&page=1&sort=pushed'
try:
res = get(url)
res.raise_for_status()
except Exception:
if res.status_code == 404:
cur.execute(
"DELETE FROM portfolios WHERE user = %(user)s",
{'user': username}
)
else:
pprint(res.json())
print('status_code', res.status_code)
raise
return calc_skillset(res.json())
class RepositoryScore():
def __init__(self, name, language, size):
self.name = name
self.language = language
self.size = size
self.date_point = None
self.size_point = None
self.total_point = None
def calc_skillset(raw_repositories):
if 'message' in raw_repositories and raw_repositories['message'] == 'Not Found':
return {}
self_repositories = [r for r in raw_repositories if not r['fork']]
has_lang_repositories = [
r for r in self_repositories if r['language'] is not None]
repositories = [RepositoryScore(r['name'], r['language'], r['size'])
for r in has_lang_repositories]
for date_point, repo in enumerate(reversed(repositories), start=1):
repo.date_point = date_point
sizes = [r.size for r in repositories]
size_to_points_dict = {size: p for p,
size in enumerate(sorted(sizes), start=1)}
for repo in repositories:
repo.size_point = size_to_points_dict[repo.size]
for repo in repositories:
repo.total_point = repo.size_point * repo.date_point
lang_point_sum_dict = {}
for repo in repositories:
lang = repo.language
lang_point_sum_dict[lang] = lang_point_sum_dict.get(
lang, 0) + repo.total_point
total = sum(lang_point_sum_dict.values())
if total == 0:
return {}
for key in lang_point_sum_dict:
lang_point_sum_dict[key] = int(
(lang_point_sum_dict[key] * 100) / total)
lang_point_sum_dict = {lang: size for lang,
size in lang_point_sum_dict.items() if size > 0}
return lang_point_sum_dict
def update_database(username, skill_set_dict, cur):
record = {"skills": json.dumps(skill_set_dict), "skills_updated_at": time.strftime(
'%Y-%m-%d %H:%M:%S'), "user": username}
cur.execute(
'UPDATE portfolios SET skills=%(skills)s, skills_updated_at=%(skills_updated_at)s WHERE user=%(user)s', record)
if __name__ == '__main__':
fill_skillset()
|
58572af8bcfc2544adef3f0611deb0db5fb77b16
|
cd4be8b6bee2964d063b332c0c8784ab6c89c8e5
|
/opacus/grad_sample/utils.py
|
8b5e8ff50b05a9373bba9d7f48e79be19476aca3
|
[
"Apache-2.0"
] |
permissive
|
pytorch/opacus
|
d55f9c3627943a3c067528849401663cfaf7d622
|
79bdfac28afb526430a938d38513c46936f8670a
|
refs/heads/main
| 2023-09-04T01:03:50.533043
| 2023-08-01T19:37:56
| 2023-08-01T19:37:56
| 226,441,159
| 1,358
| 291
|
Apache-2.0
| 2023-09-11T13:29:37
| 2019-12-07T01:58:09
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,795
|
py
|
utils.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Sequence, Type, Union
import torch.nn as nn
from .grad_sample_module import GradSampleModule
from .gsm_base import AbstractGradSampleModule
from .gsm_exp_weights import GradSampleModuleExpandedWeights
from .gsm_no_op import GradSampleModuleNoOp
def register_grad_sampler(
target_class_or_classes: Union[Type[nn.Module], Sequence[Type[nn.Module]]]
):
"""
Registers the decorated function as the ``grad_sampler`` of ``target_class_or_classes``, which is
the function that will be invoked every time you want to compute a per-sample gradient
of ``target_class_or_classes``. The signature of every grad_sampler is always the same:
>>> @register_grad_sampler(MyCustomModel)
... def compute_grad_sample(module, activations, backprops):
... pass
It may help you to take a look at the existing grad_samplers inside Opacus, under ``opacus.grad_sample.``
"""
def decorator(f):
target_classes = (
target_class_or_classes
if isinstance(target_class_or_classes, Sequence)
else [target_class_or_classes]
)
for target_class in target_classes:
GradSampleModule.GRAD_SAMPLERS[target_class] = f
return f
return decorator
def wrap_model(model: nn.Module, grad_sample_mode: str, *args, **kwargs):
cls = get_gsm_class(grad_sample_mode)
if grad_sample_mode == "functorch":
kwargs["force_functorch"] = True
return cls(model, *args, **kwargs)
def get_gsm_class(grad_sample_mode: str) -> Type[AbstractGradSampleModule]:
"""
Returns AbstractGradSampleModule subclass correspinding to the input mode.
See README for detailed comparison between grad sample modes.
:param grad_sample_mode:
:return:
"""
if grad_sample_mode in ["hooks", "functorch"]:
return GradSampleModule
elif grad_sample_mode == "ew":
return GradSampleModuleExpandedWeights
elif grad_sample_mode == "no_op":
return GradSampleModuleNoOp
else:
raise ValueError(
f"Unexpected grad_sample_mode: {grad_sample_mode}. "
f"Allowed values: hooks, ew"
)
|
ec13c6bafc99e594b9c4501e03a77b6ad3fa77e3
|
5bed30209083d1dce7f8fb0488ae44e3ef79d190
|
/test_model_Projection.py
|
232506f62294bb5bb7e6dca283dd6abb3e753b3f
|
[
"Apache-2.0"
] |
permissive
|
IGLICT/DeepFaceEditing-Jittor
|
bdaf69e127e5ca99982405a93fb59610fe6b401a
|
85e307504dff41e1902937035d1f37862d78e63e
|
refs/heads/main
| 2023-01-24T23:08:20.642926
| 2022-10-20T14:13:24
| 2022-10-20T14:13:24
| 364,292,696
| 269
| 37
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,056
|
py
|
test_model_Projection.py
|
import jittor as jt
from jittor import Module
from jittor import nn
import numpy as np
import jittor.transform as transform
from PIL import Image
from combine_model import Combine_Model_Projection
import networks
from argparse import ArgumentParser
img_size = 512
transform_image = transform.Compose([
transform.Resize(size = img_size),
transform.ImageNormalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])
def read_img(path):
img = Image.open(path).convert('RGB')
img = transform_image(img)
img = jt.array(img)
img = img.unsqueeze(0)
return img
def save_img(image, path):
image = image.squeeze(0).detach().numpy()
image = (np.transpose(image, (1, 2, 0)) + 1) / 2.0 * 255.0
image = np.clip(image, 0, 255).astype(np.uint8)
image = Image.fromarray(image)
image.save(path)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("--geo", type=str, default = "./images/CoarseSketch.jpg", help = "the path of geometry image")
parser.add_argument("--appear", type=str, default = "./images/29042.jpg", help = "the path of appearance image")
parser.add_argument("--output", type=str, default = "./results/sketch_gen.png", help = "the path of output image")
parser.add_argument("--gender", type=int, default = 0, help = "gender of images: 0, female, 1, man")
parser.add_argument("--cuda", type=int, default = 1, help = "use cuda or cpu: 0 , cpu; 1 , gpu")
args = parser.parse_args()
jt.flags.use_cuda = args.cuda
geo_img = read_img(args.geo)
appear_img = read_img(args.appear)
geo_img = geo_img[:,0:1,:,:]
model = Combine_Model_Projection()
model.initialize()
gender = args.gender
part_weights = {'bg': 1.0,
'eye1': 1.0,
'eye2': 1.0,
'nose': 1.0,
'mouth': 1.0}
image_result = model.inference(geo_img, appear_img, gender, part_weights)
save_img(image_result, args.output)
|
a47d865cd779e9e9056b158c94d9d38c9bbe27b0
|
307d3837d31f9e3728af2b62ca51ebf63fe6ec6b
|
/hall_of_fame/kimdonghun/[BOJ]2447_DrawingStar.py
|
75597d0a247084e8515e3e32d48b869807d41b74
|
[] |
no_license
|
ellynhan/challenge100-codingtest-study
|
905043497d154b8a7333ca536e536d013f6e7454
|
bcdc6d04f13b12ba80b42e066f9d244d7c2cc698
|
refs/heads/master
| 2023-09-01T14:10:13.481013
| 2023-08-27T14:38:52
| 2023-08-27T14:38:52
| 401,561,230
| 162
| 176
| null | 2023-09-09T14:56:25
| 2021-08-31T03:30:36
|
C++
|
UTF-8
|
Python
| false
| false
| 458
|
py
|
[BOJ]2447_DrawingStar.py
|
import sys
def draw_star(n, l):
ans = []
if n == 3:
return l
else:
for i in l:
ans.append(i * 3)
for i in l:
ans.append(i + ' ' * len(l) + i)
for i in l:
ans.append(i * 3)
#print(ans)
return draw_star(n//3, ans)
N = int(sys.stdin.readline())
base = ['***', '* *', '***']
res = draw_star(N, base)
for i in res:
print(i)
|
ddc3e71773db3cfaa0bf9793900363a2f80a4862
|
5095200e9ca55cd3a37af34ed44448c02e2a1bb5
|
/paddlehub/datasets/pascalvoc.py
|
9efed98d888cd1a0cda882071f4a92f3dcc3eb84
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/PaddleHub
|
8712603ef486c45e83eb0bc5725b0b3ed3ddbbde
|
b402610a6f0b382a978e82473b541ea1fc6cf09a
|
refs/heads/develop
| 2023-07-24T06:03:13.172978
| 2023-03-28T11:49:55
| 2023-03-28T11:49:55
| 162,672,577
| 12,914
| 2,239
|
Apache-2.0
| 2023-07-06T21:38:19
| 2018-12-21T06:00:48
|
Python
|
UTF-8
|
Python
| false
| false
| 6,958
|
py
|
pascalvoc.py
|
# coding:utf-8
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import copy
from typing import Callable
import paddle
import numpy as np
from paddlehub.env import DATA_HOME
from pycocotools.coco import COCO
class DetectCatagory:
"""Load label name, id and map from detection dataset.
Args:
attrbox(Callable): Method to get detection attributes of images.
data_dir(str): Image dataset path.
Returns:
label_names(List(str)): The dataset label names.
label_ids(List(int)): The dataset label ids.
category_to_id_map(dict): Mapping relations of category and id for images.
"""
def __init__(self, attrbox: Callable, data_dir: str):
self.attrbox = attrbox
self.img_dir = data_dir
def __call__(self):
self.categories = self.attrbox.loadCats(self.attrbox.getCatIds())
self.num_category = len(self.categories)
label_names = []
label_ids = []
for category in self.categories:
label_names.append(category['name'])
label_ids.append(int(category['id']))
category_to_id_map = {v: i for i, v in enumerate(label_ids)}
return label_names, label_ids, category_to_id_map
class ParseImages:
"""Prepare images for detection.
Args:
attrbox(Callable): Method to get detection attributes of images.
data_dir(str): Image dataset path.
category_to_id_map(dict): Mapping relations of category and id for images.
Returns:
imgs(dict): The input for detection model, it is a dict.
"""
def __init__(self, attrbox: Callable, data_dir: str, category_to_id_map: dict):
self.attrbox = attrbox
self.img_dir = data_dir
self.category_to_id_map = category_to_id_map
self.parse_gt_annotations = GTAnotations(self.attrbox, self.category_to_id_map)
def __call__(self):
image_ids = self.attrbox.getImgIds()
image_ids.sort()
imgs = copy.deepcopy(self.attrbox.loadImgs(image_ids))
for img in imgs:
img['image'] = os.path.join(self.img_dir, img['file_name'])
assert os.path.exists(img['image']), "image {} not found.".format(img['image'])
box_num = 50
img['gt_boxes'] = np.zeros((box_num, 4), dtype=np.float32)
img['gt_labels'] = np.zeros((box_num), dtype=np.int32)
img = self.parse_gt_annotations(img)
return imgs
class GTAnotations:
"""Set gt boxes and gt labels for train.
Args:
attrbox(Callable): Method for get detection attributes for images.
category_to_id_map(dict): Mapping relations of category and id for images.
img(dict): Input for detection model.
Returns:
img(dict): Set specific value on the attributes of 'gt boxes' and 'gt labels' for input.
"""
def __init__(self, attrbox: Callable, category_to_id_map: dict):
self.attrbox = attrbox
self.category_to_id_map = category_to_id_map
def box_to_center_relative(self, box: list, img_height: int, img_width: int) -> np.ndarray:
"""
Convert COCO annotations box with format [x1, y1, w, h] to
center mode [center_x, center_y, w, h] and divide image width
and height to get relative value in range[0, 1]
"""
assert len(box) == 4, "box should be a len(4) list or tuple"
x, y, w, h = box
x1 = max(x, 0)
x2 = min(x + w - 1, img_width - 1)
y1 = max(y, 0)
y2 = min(y + h - 1, img_height - 1)
x = (x1 + x2) / 2 / img_width
y = (y1 + y2) / 2 / img_height
w = (x2 - x1) / img_width
h = (y2 - y1) / img_height
return np.array([x, y, w, h])
def __call__(self, img: dict):
img_height = img['height']
img_width = img['width']
anno = self.attrbox.loadAnns(self.attrbox.getAnnIds(imgIds=img['id'], iscrowd=None))
gt_index = 0
for target in anno:
if target['area'] < -1:
continue
if 'ignore' in target and target['ignore']:
continue
box = self.box_to_center_relative(target['bbox'], img_height, img_width)
if box[2] <= 0 and box[3] <= 0:
continue
img['gt_boxes'][gt_index] = box
img['gt_labels'][gt_index] = \
self.category_to_id_map[target['category_id']]
gt_index += 1
if gt_index >= 50:
break
return img
class DetectionData(paddle.io.Dataset):
"""
Dataset for image detection.
Args:
transform(callmethod) : The method of preprocess images.
mode(str): The mode for preparing dataset.
Returns:
DataSet: An iterable object for data iterating
"""
def __init__(self, transform: Callable, size: int = 416, mode: str = 'train'):
self.mode = mode
self.transform = transform
self.size = size
if self.mode == 'train':
train_file_list = 'annotations/instances_train2017.json'
train_data_dir = 'train2017'
self.train_file_list = os.path.join(DATA_HOME, 'voc', train_file_list)
self.train_data_dir = os.path.join(DATA_HOME, 'voc', train_data_dir)
self.COCO = COCO(self.train_file_list)
self.img_dir = self.train_data_dir
elif self.mode == 'test':
val_file_list = 'annotations/instances_val2017.json'
val_data_dir = 'val2017'
self.val_file_list = os.path.join(DATA_HOME, 'voc', val_file_list)
self.val_data_dir = os.path.join(DATA_HOME, 'voc', val_data_dir)
self.COCO = COCO(self.val_file_list)
self.img_dir = self.val_data_dir
parse_dataset_catagory = DetectCatagory(self.COCO, self.img_dir)
self.label_names, self.label_ids, self.category_to_id_map = parse_dataset_catagory()
parse_images = ParseImages(self.COCO, self.img_dir, self.category_to_id_map)
self.data = parse_images()
def __getitem__(self, idx: int):
img = self.data[idx]
im, data = self.transform(img)
out_img, gt_boxes, gt_labels, gt_scores = im, data['gt_boxes'], data['gt_labels'], data['gt_scores']
return out_img, gt_boxes, gt_labels, gt_scores
def __len__(self):
return len(self.data)
|
25c7b30efc09f5b02afc2461df0e2b17784dff21
|
fd659f8f10e08ba0a744c62388b46bdf4b0ef6bd
|
/common/imp/__init__.py
|
fbba64a2569e41564831b85baa1ec69720efc5f0
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
doosan-robotics/doosan-robot
|
a2f0e90ea00f599b9469e0d534650e67908dad25
|
560fcb8631726543124a410aba948cbd2b2cefb9
|
refs/heads/noetic-devel
| 2023-05-25T18:15:51.467579
| 2023-05-22T01:47:25
| 2023-05-22T01:47:25
| 167,874,506
| 123
| 56
|
BSD-3-Clause
| 2023-05-22T01:47:38
| 2019-01-28T00:22:06
|
C++
|
UTF-8
|
Python
| false
| false
| 44
|
py
|
__init__.py
|
import sys
sys.dont_write_bytecode = True
|
5e0db789233f0cc40f7fc476a9a0a956406fe5d8
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/tests/components/nest/test_init.py
|
ecfe412bdbf7c0deaf7d80884bfe7bac31953623
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 9,749
|
py
|
test_init.py
|
"""Test for setup methods for the SDM API.
The tests fake out the subscriber/devicemanager and simulate setup behavior
and failure modes.
By default all tests use test fixtures that run in each possible configuration
mode (e.g. yaml, ConfigEntry, etc) however some tests override and just run in
relevant modes.
"""
import logging
from typing import Any
from unittest.mock import patch
from google_nest_sdm.exceptions import (
ApiException,
AuthException,
ConfigurationException,
SubscriberException,
)
import pytest
from homeassistant.components.nest import DOMAIN
from homeassistant.config_entries import ConfigEntryState
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
from .common import (
PROJECT_ID,
SUBSCRIBER_ID,
TEST_CONFIG_ENTRY_LEGACY,
TEST_CONFIG_LEGACY,
TEST_CONFIGFLOW_APP_CREDS,
FakeSubscriber,
YieldFixture,
)
from tests.common import MockConfigEntry
PLATFORM = "sensor"
@pytest.fixture
def platforms() -> list[str]:
"""Fixture to setup the platforms to test."""
return ["sensor"]
@pytest.fixture
def error_caplog(caplog):
"""Fixture to capture nest init error messages."""
with caplog.at_level(logging.ERROR, logger="homeassistant.components.nest"):
yield caplog
@pytest.fixture
def warning_caplog(caplog):
"""Fixture to capture nest init warning messages."""
with caplog.at_level(logging.WARNING, logger="homeassistant.components.nest"):
yield caplog
@pytest.fixture
def subscriber_side_effect() -> None:
"""Fixture to inject failures into FakeSubscriber start."""
return None
@pytest.fixture
def failing_subscriber(subscriber_side_effect: Any) -> YieldFixture[FakeSubscriber]:
"""Fixture overriding default subscriber behavior to allow failure injection."""
subscriber = FakeSubscriber()
with patch(
"homeassistant.components.nest.api.GoogleNestSubscriber.start_async",
side_effect=subscriber_side_effect,
):
yield subscriber
async def test_setup_success(hass: HomeAssistant, error_caplog, setup_platform) -> None:
"""Test successful setup."""
await setup_platform()
assert not error_caplog.records
entries = hass.config_entries.async_entries(DOMAIN)
assert len(entries) == 1
assert entries[0].state is ConfigEntryState.LOADED
@pytest.mark.parametrize("subscriber_id", [("invalid-subscriber-format")])
async def test_setup_configuration_failure(
hass: HomeAssistant,
caplog: pytest.LogCaptureFixture,
subscriber_id,
setup_base_platform,
) -> None:
"""Test configuration error."""
await setup_base_platform()
entries = hass.config_entries.async_entries(DOMAIN)
assert len(entries) == 1
assert entries[0].state is ConfigEntryState.SETUP_ERROR
# This error comes from the python google-nest-sdm library, as a check added
# to prevent common misconfigurations (e.g. confusing topic and subscriber)
assert "Subscription misconfigured. Expected subscriber_id" in caplog.text
@pytest.mark.parametrize("subscriber_side_effect", [SubscriberException()])
async def test_setup_susbcriber_failure(
hass: HomeAssistant, warning_caplog, failing_subscriber, setup_base_platform
) -> None:
"""Test configuration error."""
await setup_base_platform()
assert "Subscriber error:" in warning_caplog.text
entries = hass.config_entries.async_entries(DOMAIN)
assert len(entries) == 1
assert entries[0].state is ConfigEntryState.SETUP_RETRY
async def test_setup_device_manager_failure(
hass: HomeAssistant, warning_caplog, setup_base_platform
) -> None:
"""Test device manager api failure."""
with patch(
"homeassistant.components.nest.api.GoogleNestSubscriber.start_async"
), patch(
"homeassistant.components.nest.api.GoogleNestSubscriber.async_get_device_manager",
side_effect=ApiException(),
):
await setup_base_platform()
assert "Device manager error:" in warning_caplog.text
entries = hass.config_entries.async_entries(DOMAIN)
assert len(entries) == 1
assert entries[0].state is ConfigEntryState.SETUP_RETRY
@pytest.mark.parametrize("subscriber_side_effect", [AuthException()])
async def test_subscriber_auth_failure(
hass: HomeAssistant,
caplog: pytest.LogCaptureFixture,
setup_base_platform,
failing_subscriber,
) -> None:
"""Test subscriber throws an authentication error."""
await setup_base_platform()
entries = hass.config_entries.async_entries(DOMAIN)
assert len(entries) == 1
assert entries[0].state is ConfigEntryState.SETUP_ERROR
flows = hass.config_entries.flow.async_progress()
assert len(flows) == 1
assert flows[0]["step_id"] == "reauth_confirm"
@pytest.mark.parametrize("subscriber_id", [(None)])
async def test_setup_missing_subscriber_id(
hass: HomeAssistant, warning_caplog, setup_base_platform
) -> None:
"""Test missing susbcriber id from configuration."""
await setup_base_platform()
assert "Configuration option" in warning_caplog.text
entries = hass.config_entries.async_entries(DOMAIN)
assert len(entries) == 1
assert entries[0].state is ConfigEntryState.SETUP_ERROR
@pytest.mark.parametrize("subscriber_side_effect", [(ConfigurationException())])
async def test_subscriber_configuration_failure(
hass: HomeAssistant, error_caplog, setup_base_platform, failing_subscriber
) -> None:
"""Test configuration error."""
await setup_base_platform()
assert "Configuration error: " in error_caplog.text
entries = hass.config_entries.async_entries(DOMAIN)
assert len(entries) == 1
assert entries[0].state is ConfigEntryState.SETUP_ERROR
@pytest.mark.parametrize("nest_test_config", [TEST_CONFIGFLOW_APP_CREDS])
async def test_empty_config(
hass: HomeAssistant, error_caplog, config, setup_platform
) -> None:
"""Test setup is a no-op with not config."""
await setup_platform()
assert not error_caplog.records
entries = hass.config_entries.async_entries(DOMAIN)
assert len(entries) == 0
async def test_unload_entry(hass: HomeAssistant, setup_platform) -> None:
"""Test successful unload of a ConfigEntry."""
await setup_platform()
entries = hass.config_entries.async_entries(DOMAIN)
assert len(entries) == 1
entry = entries[0]
assert entry.state is ConfigEntryState.LOADED
assert await hass.config_entries.async_unload(entry.entry_id)
assert entry.state == ConfigEntryState.NOT_LOADED
async def test_remove_entry(
hass: HomeAssistant,
setup_base_platform,
) -> None:
"""Test successful unload of a ConfigEntry."""
with patch(
"homeassistant.components.nest.api.GoogleNestSubscriber",
return_value=FakeSubscriber(),
):
await setup_base_platform()
entries = hass.config_entries.async_entries(DOMAIN)
assert len(entries) == 1
entry = entries[0]
assert entry.state is ConfigEntryState.LOADED
# Assert entry was imported if from configuration.yaml
assert entry.data.get("subscriber_id") == SUBSCRIBER_ID
assert entry.data.get("project_id") == PROJECT_ID
with patch(
"homeassistant.components.nest.api.GoogleNestSubscriber.subscriber_id"
), patch(
"homeassistant.components.nest.api.GoogleNestSubscriber.delete_subscription",
) as delete:
assert await hass.config_entries.async_remove(entry.entry_id)
assert delete.called
entries = hass.config_entries.async_entries(DOMAIN)
assert not entries
async def test_remove_entry_delete_subscriber_failure(
hass: HomeAssistant, setup_base_platform
) -> None:
"""Test a failure when deleting the subscription."""
with patch(
"homeassistant.components.nest.api.GoogleNestSubscriber",
return_value=FakeSubscriber(),
):
await setup_base_platform()
entries = hass.config_entries.async_entries(DOMAIN)
assert len(entries) == 1
entry = entries[0]
assert entry.state is ConfigEntryState.LOADED
with patch(
"homeassistant.components.nest.api.GoogleNestSubscriber.delete_subscription",
side_effect=SubscriberException(),
) as delete:
assert await hass.config_entries.async_remove(entry.entry_id)
assert delete.called
entries = hass.config_entries.async_entries(DOMAIN)
assert not entries
@pytest.mark.parametrize("config_entry_unique_id", [DOMAIN, None])
async def test_migrate_unique_id(
hass: HomeAssistant,
error_caplog,
setup_platform,
config_entry,
config_entry_unique_id,
) -> None:
"""Test successful setup."""
assert config_entry.state is ConfigEntryState.NOT_LOADED
assert config_entry.unique_id == config_entry_unique_id
await setup_platform()
assert config_entry.state is ConfigEntryState.LOADED
assert config_entry.unique_id == PROJECT_ID
@pytest.mark.parametrize("nest_test_config", [TEST_CONFIG_LEGACY])
async def test_legacy_works_with_nest_yaml(
hass: HomeAssistant,
config: dict[str, Any],
config_entry: MockConfigEntry,
) -> None:
"""Test integration won't start with legacy works with nest yaml config."""
config_entry.add_to_hass(hass)
assert not await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
@pytest.mark.parametrize("nest_test_config", [TEST_CONFIG_ENTRY_LEGACY])
async def test_legacy_works_with_nest_cleanup(
hass: HomeAssistant, setup_platform
) -> None:
"""Test legacy works with nest config entries are silently removed once yaml is removed."""
await setup_platform()
entries = hass.config_entries.async_entries(DOMAIN)
assert len(entries) == 0
|
eab87f1c42d5b2f4e7e7f179ab8df24dc1732963
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/shared/view_helpers/UsersInfoController.py
|
f0a0ff6cd704047a6bcbc2206bb2f3e3459ac7bf
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 3,466
|
py
|
UsersInfoController.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/shared/view_helpers/UsersInfoController.py
import BigWorld
from AccountCommands import isCodeValid
from debug_utils import LOG_WARNING
from shared_utils import CONST_CONTAINER
from gui.shared.utils.decorators import ReprInjector
from gui.shared.utils.requesters import RequestCtx
from gui.shared.utils.requesters.RequestsController import RequestsController
from messenger.storage import storage_getter
from messenger.proto import proto_getter, PROTO_TYPE
_GR_MAX_CHUNK_SIZE = 20
_NAMES_MAX_CHUNK_SIZE = 50
class USER_INFO_RQ_TYPE(CONST_CONTAINER):
GET_GLOBAL_RATINGS = 1
GET_NICKNAMES = 2
@ReprInjector.withParent(('getDbIDs', 'dbIDs'))
class _GetGlobalRatingCtx(RequestCtx):
def __init__(self, dbIDs, waitingID=''):
super(_GetGlobalRatingCtx, self).__init__(waitingID=waitingID)
self.__dbIDs = dbIDs
def getDbIDs(self):
return self.__dbIDs
def getRequestType(self):
return USER_INFO_RQ_TYPE.GET_GLOBAL_RATINGS
def getCooldown(self):
pass
@ReprInjector.withParent(('getDbIDs', 'dbIDs'))
class _GetNicknamesCtx(RequestCtx):
def __init__(self, dbIDs, waitingID=''):
super(_GetNicknamesCtx, self).__init__(waitingID=waitingID)
self.__dbIDs = dbIDs
def getDbIDs(self):
return self.__dbIDs
def getRequestType(self):
return USER_INFO_RQ_TYPE.GET_NICKNAMES
def getCooldown(self):
pass
class UsersInfoController(RequestsController):
def __init__(self):
super(UsersInfoController, self).__init__(None)
self.__handlers = {USER_INFO_RQ_TYPE.GET_GLOBAL_RATINGS: self._getGlobalRatings,
USER_INFO_RQ_TYPE.GET_NICKNAMES: self._getNicknames}
return
def fini(self):
self.__handlers.clear()
super(UsersInfoController, self).fini()
@proto_getter(PROTO_TYPE.XMPP)
def proto(self):
return None
@storage_getter('users')
def users(self):
return None
def requestNicknames(self, accountDbIDs, callback):
while accountDbIDs:
self.request(_GetNicknamesCtx(accountDbIDs[:_NAMES_MAX_CHUNK_SIZE]), callback, allowDelay=True)
del accountDbIDs[:_NAMES_MAX_CHUNK_SIZE]
def requestGlobalRatings(self, accountDbIDs, callback):
while accountDbIDs:
self.request(_GetGlobalRatingCtx(accountDbIDs[:_GR_MAX_CHUNK_SIZE]), callback, allowDelay=True)
del accountDbIDs[:_GR_MAX_CHUNK_SIZE]
def _getGlobalRatings(self, ctx, callback=None):
getter = self.users.getUser
def _ratingsCallback(code, errStr, ratings):
if isCodeValid(code):
for userDbID, rating in (ratings or {}).iteritems():
user = getter(userDbID)
if user:
user.update(globalRating=rating)
else:
LOG_WARNING('Error occurred while getting global ratings from server', code, errStr, ratings)
callback(ratings or {})
BigWorld.player().requestPlayersGlobalRating(ctx.getDbIDs(), _ratingsCallback)
return True
def _getNicknames(self, ctx, callback=None):
return self.proto.nicknames.resolve(ctx.getDbIDs(), callback)
def _getHandlerByRequestType(self, requestTypeID):
return self.__handlers.get(requestTypeID)
def _getRequestTimeOut(self):
pass
|
92c6b06160e04a44294966c19ee62784f3909b66
|
0e0ddc095823c54877c143adacbfcdd6355261de
|
/libqtile/widget/khal_calendar.py
|
9e4824dc0507870b74190e561616024886201839
|
[
"MIT"
] |
permissive
|
qtile/qtile
|
b19108ca632871104a0783a4afbe7350a17b97db
|
3f8a00082ad880042d396477d9445954e8d29cf2
|
refs/heads/master
| 2023-09-01T19:31:09.419767
| 2023-09-01T19:10:00
| 2023-09-01T19:10:00
| 47,476
| 4,203
| 986
|
MIT
| 2023-09-11T21:21:56
| 2008-08-30T00:16:40
|
Python
|
UTF-8
|
Python
| false
| false
| 4,795
|
py
|
khal_calendar.py
|
# -*- coding: utf-8 -*-
###################################################################
# This widget will display the next appointment on your calendar in
# the qtile status bar. Appointments within the "reminder" time will be
# highlighted. Authentication credentials are stored on disk.
#
# This widget uses the khal command line calendar utility available at
# https://github.com/geier/khal
#
# This widget also requires the dateutil.parser module.
# If you get a strange "AttributeError: 'module' object has no attribute
# GoogleCalendar" error, you are probably missing a module. Check
# carefully.
#
# Thanks to the creator of the YahooWeather widget (dmpayton). This code
# borrows liberally from that one.
#
# Copyright (c) 2016 by David R. Andersen <k0rx@RXcomm.net>
# New khal output format adjustment, 2016 Christoph Lassner
# Licensed under the Gnu Public License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###################################################################
import datetime
import string
import subprocess
import dateutil.parser
from libqtile.widget import base
class KhalCalendar(base.ThreadPoolText):
"""Khal calendar widget
This widget will display the next appointment on your Khal calendar in the
qtile status bar. Appointments within the "reminder" time will be
highlighted.
Widget requirements: dateutil_.
.. _dateutil: https://pypi.org/project/python-dateutil/
"""
defaults = [
("reminder_color", "FF0000", "color of calendar entries during reminder time"),
("foreground", "FFFF33", "default foreground color"),
("remindertime", 10, "reminder time in minutes"),
("lookahead", 7, "days to look ahead in the calendar"),
]
def __init__(self, **config):
base.ThreadPoolText.__init__(self, "", **config)
self.add_defaults(KhalCalendar.defaults)
self.text = "Calendar not initialized."
self.default_foreground = self.foreground
def poll(self):
# get today and tomorrow
now = datetime.datetime.now()
# get reminder time in datetime format
remtime = datetime.timedelta(minutes=self.remindertime)
# parse khal output for the next seven days
# and get the next event
args = ["khal", "list", "now", str(self.lookahead) + "d"]
cal = subprocess.Popen(args, stdout=subprocess.PIPE)
output = cal.communicate()[0].decode("utf-8")
if output == "No events\n":
return "No appointments in next " + str(self.lookahead) + " days"
output = output.split("\n")
date = "unknown"
starttime = None
endtime = None
# output[0] = 'Friday, 15/04/1976'
outputsplitted = output[0].split(" ")
date = outputsplitted[1]
# output[1] = '[ ][12:00-13:00] dentist'
try:
output_nb = output[1].strip(" ")
starttime = dateutil.parser.parse(date + " " + output_nb[:5], ignoretz=True)
endtime = dateutil.parser.parse(date + " " + output_nb[6:11], ignoretz=True)
except ValueError:
# all day event output contains no start nor end time.
starttime = dateutil.parser.parse(date + " 00:00", ignoretz=True)
endtime = starttime + datetime.timedelta(hours=23, minutes=59)
data = output[0].replace(",", "") + " " + output[1]
# get rid of any garbage in appointment added by khal
data = "".join(filter(lambda x: x in string.printable, data))
# colorize the event if it is within reminder time
if (starttime - remtime <= now) and (endtime > now):
self.foreground = self.reminder_color
else:
self.foreground = self.default_foreground
return data
|
7f67d331c4655e951fc5b23c40cb66fa266a8403
|
eb222db3a5a7155ecd1e4515eb111183cfdae2bc
|
/ch08/fetch_prediction_requests.py
|
206bb909dad023702e94505657aa3ad2b97ef427
|
[
"MIT"
] |
permissive
|
rjurney/Agile_Data_Code_2
|
1335140504d59f4a63920f3b2db203d4222decab
|
51a2f0bb60552a2a821cd2d2571ea0dc680a5460
|
refs/heads/master
| 2023-07-05T04:37:07.770591
| 2023-03-22T01:31:12
| 2023-03-22T01:31:12
| 54,056,503
| 483
| 337
|
MIT
| 2023-03-22T01:31:14
| 2016-03-16T18:25:28
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,044
|
py
|
fetch_prediction_requests.py
|
#!/usr/bin/env python
import sys, os, re
import json
import datetime, iso8601
# Save to Mongo
from bson import json_util
import pymongo_spark
pymongo_spark.activate()
# Pass date and base path to main() from airflow
def main(iso_date, base_path):
APP_NAME = "fetch_prediction_requests.py"
# If there is no SparkSession, create the environment
try:
sc and spark
except NameError as e:
import findspark
findspark.init()
import pyspark
import pyspark.sql
sc = pyspark.SparkContext()
spark = pyspark.sql.SparkSession(sc).builder.appName(APP_NAME).getOrCreate()
# Get today and tomorrow's dates as iso strings to scope query
today_dt = iso8601.parse_date(iso_date)
rounded_today = today_dt.date()
iso_today = rounded_today.isoformat()
rounded_tomorrow_dt = rounded_today + datetime.timedelta(days=1)
iso_tomorrow = rounded_tomorrow_dt.isoformat()
# Create mongo query string for today's data
mongo_query_string = """{{
"Timestamp": {{
"$gte": "{iso_today}",
"$lte": "{iso_tomorrow}"
}}
}}""".format(
iso_today=iso_today,
iso_tomorrow=iso_tomorrow
)
mongo_query_string = mongo_query_string.replace('\n', '')
# Create the config object with the query string
mongo_query_config = dict()
mongo_query_config["mongo.input.query"] = mongo_query_string
# Load the day's requests using pymongo_spark
prediction_requests = sc.mongoRDD(
'mongodb://localhost:27017/agile_data_science.prediction_tasks',
config=mongo_query_config
)
# Build the day's output path: a date based primary key directory structure
today_output_path = "{}/data/prediction_tasks_daily.json/{}".format(
base_path,
iso_today
)
# Generate json records
prediction_requests_json = prediction_requests.map(json_util.dumps)
# Write/replace today's output path
os.system("rm -rf {}".format(today_output_path))
prediction_requests_json.saveAsTextFile(today_output_path)
if __name__ == "__main__":
main(sys.argv[1], sys.argv[2])
|
77d73b253efaaab7f9eb19b138890c3868fc7e86
|
bc6e87f8e9a3f6c35f8080718ac409801dab3b24
|
/server/workers/api/src/apis/export.py
|
203abc382a94d95d5ac7dc3b262cdabaed839527
|
[
"MIT"
] |
permissive
|
OpenKnowledgeMaps/Headstart
|
b7f56d8562d044e8d96a08f9f7ae0bc6de1076cd
|
94dcc248e1892de7b603d5a4dad175f5d8a128db
|
refs/heads/master
| 2023-08-31T20:06:34.485558
| 2023-08-25T17:34:03
| 2023-08-25T17:34:03
| 15,936,466
| 132
| 36
|
MIT
| 2023-08-25T17:34:05
| 2014-01-15T13:52:50
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 9,951
|
py
|
export.py
|
import os
from flask import Blueprint, request, make_response, jsonify, abort
from flask_restx import Namespace, Resource, fields
from bibtexparser.bwriter import BibTexWriter
from bibtexparser.bibdatabase import BibDatabase
import dateutil.parser as parser
import pytz
import re
export_ns = Namespace("export", description="metadata export API operations")
def parse_date(date):
parsed_date = {}
dt = parser.parse(date)
dt = dt.astimezone(pytz.utc)
parsed_date["year"] = str(dt.year)
if len(date) > 4:
parsed_date["month"] = str(dt.month)
parsed_date["day"] = str(dt.day)
return parsed_date
def create_authorfield(authors_objects):
authors = []
for ao in authors_objects:
if "lastName" in ao:
author = ao["lastName"]
if "lastName" in ao and "firstName" in ao:
author = "{lastName}, {firstName}".format(**ao)
authors.append(author)
authorstring = " and ".join(authors)
return authorstring
def transform2bibtex(metadata):
# TODO: add mapping from resulttype to ARTICLETYPE
# possible published_in parser
# field renaming, e.g. paper_abstract to abstract
# choose correct fields, e.g. author_string for author
# use different field for ID
title = metadata.get("title", "")
try:
author = create_authorfield(metadata.get("authors_objects", []))
except Exception:
author = metadata.get("authors", "")
doi = metadata.get("doi", "")
try:
doi = re.sub("https://|http://|dx.doi.org/|doi.org/", "", doi)
except Exception:
pass
id = metadata.get("id", "")
published_in = metadata.get("published_in", "")
url = metadata.get("list_link", {}).get("address", "")
bibtextypes = [get_bibtextype(r) for r in metadata.get("resulttype")]
if len(bibtextypes) == 0 or set(bibtextypes) == {"misc"}:
entrytype = "misc"
else:
entrytype = list(filter(lambda x: x != "misc", bibtextypes))[0]
fields = {
"title": title,
"author": author,
"ID": id
}
if "year" in metadata:
try:
parsed_date = parse_date(metadata.get("year", ""))
for k,v in parsed_date.items():
fields[k] = v
except Exception:
fields["year"] = ""
else:
fields["year"] = ""
if doi != "":
fields["doi"] = doi
if url != "":
fields["url"] = url
if entrytype == "article" and published_in != "":
fields["journal"] = published_in
if entrytype == "misc" and published_in != "":
entrytype = "article"
fields["journal"] = published_in
if entrytype == "book":
fields["publisher"] = published_in
if entrytype == "inbook":
fields["publisher"] = published_in
if entrytype == "inproceedings":
fields["booktitle"] = published_in
if entrytype == "phdthesis":
fields["school"] = published_in
if entrytype == "mastersthesis":
fields["school"] = published_in
if entrytype == "conference":
fields["booktitle"] = published_in
if entrytype == "video":
fields["publisher"] = published_in
if entrytype == "software":
fields["publisher"] = published_in
if entrytype == "thesis":
fields["school"] = published_in
fields["ENTRYTYPE"] = entrytype
db = BibDatabase()
writer = BibTexWriter()
db.entries.append(fields)
export = writer.write(db)
result = {
"format": "bibtex",
"export": export
}
return result
def parse_published_in(published_in):
pass
def transform2ris(metadata):
pass
@export_ns.route('/<format>')
class exportMetadata(Resource):
def post(self, format):
try:
metadata = request.json
export_ns.logger.debug(metadata)
if format == "bibtex":
result = transform2bibtex(metadata)
headers = {'ContentType': 'application/text'}
code = 200
elif format == "ris":
result = transform2ris(metadata)
headers = {'ContentType': 'application/text'}
code = 200
else:
result = {"status": "error",
"reason": "output format not recognized, must bei either bibtex or ris"}
code = 400
export_ns.logger.debug(result)
headers = {'ContentType': 'application/json'}
return make_response(jsonify(result),
code,
headers)
except Exception as e:
export_ns.logger.error(e)
result = {'success': False, 'reason': str(e)}
headers = {'ContentType': 'application/json'}
return make_response(jsonify(result),
500,
headers)
@export_ns.route('/service_version')
class ServiceVersion(Resource):
def get(self):
result = {"service_version": os.getenv("SERVICE_VERSION")}
return make_response(result, 200, {"Content-Type": "application/json"})
@export_ns.route('/healthcheck')
class Healthcheck(Resource):
def get(self):
result = {"status": "I'm good"}
return make_response(result, 200, {"Content-Type": "application/json"})
def get_bibtextype(resulttype):
mapper = {
"Audio": "audio",
"Book": "book",
"Book part": "inbook",
"Conference object": "inproceedings",
"Course material": "misc",
"Dataset": "misc",
"Image/video": "misc",
"Journal/newspaper": "misc",
"Journal/newspaper article": "article",
"Journal/newspaper other content": "misc",
"Lecture": "misc",
"Manuscript": "unpublished",
"Map": "misc",
"Moving image/video": "video",
"Musical notation": "misc",
"Other/Unknown material": "misc",
"Patent": "patent",
"Report": "techreport",
"Review": "misc",
"Software": "software",
"Still image": "misc",
"Text": "misc",
"Thesis": "thesis",
"Thesis: bachelor": "thesis",
"Thesis: doctoral and postdoctoral": "phdthesis",
"Thesis: master": "mastersthesis",
"Adaptive Clinical Trial": "misc",
"Address": "misc",
"Autobiography": "book",
"Bibliography": "misc",
"Biography": "book",
"Book Illustrations": "misc",
"Case Reports": "misc",
"Classical Article": "article",
"Clinical Conference": "conference",
"Clinical Study": "misc",
"Clinical Trial": "misc",
"Clinical Trial Protocol": "misc",
"Clinical Trial, Phase I": "misc",
"Clinical Trial, Phase II": "misc",
"Clinical Trial, Phase III": "misc",
"Clinical Trial, Phase IV": "misc",
"Clinical Trial, Veterinary": "misc",
"Collected Work": "misc",
"Collected Works": "misc",
"Comment": "misc",
"Comparative Study": "misc",
"Congress": "conference",
"Consensus Development Conference": "conference",
"Consensus Development Conference, NIH": "conference",
"Controlled Clinical Trial": "misc",
"Corrected and Republished Article": "article",
"Dataset": "misc",
"Dictionary": "misc",
"Directory": "misc",
"Duplicate Publication": "misc",
"Editorial": "article",
"Electronic Supplementary Materials": "electronic",
"English Abstract": "misc",
"Ephemera": "misc",
"Equivalence Trial": "misc",
"Evaluation Studies": "misc",
"Evaluation Study": "misc",
"Expression of Concern": "misc",
"Festschrift": "misc",
"Government Publication": "misc",
"Guideline": "misc",
"Historical Article": "article",
"Interactive Tutorial": "misc",
"Interview": "misc",
"Introductory Journal Article": "article",
"Journal Article": "article",
"Lecture": "misc",
"Legal Case": "misc",
"Legislation": "misc",
"Letter": "misc",
"Manuscript": "unpublished",
"Meta-Analysis": "misc",
"Multicenter Study": "misc",
"News": "misc",
"Newspaper Article": "article",
"Observational Study": "misc",
"Observational Study, Veterinary": "misc",
"Overall": "misc",
"Patient Education Handout": "misc",
"Periodical Index": "misc",
"Personal Narrative": "misc",
"Pictorial Work": "misc",
"Popular Work": "misc",
"Portrait": "misc",
"Practice Guideline": "misc",
"Pragmatic Clinical Trial": "misc",
"Preprint": "misc",
"Publication Components": "misc",
"Publication Formats": "misc",
"Publication Type Category": "misc",
"Published Erratum": "misc",
"Randomized Controlled Trial": "misc",
"Randomized Controlled Trial, Veterinary": "misc",
"Research Support, American Recovery and Reinvestment Act": "misc",
"Research Support, N.I.H., Extramural": "misc",
"Research Support, N.I.H., Intramural": "misc",
"Research Support, Non-U.S. Gov't": "misc",
"Research Support, U.S. Gov't, Non-P.H.S.": "misc",
"Research Support, U.S. Gov't, P.H.S.": "misc",
"Retracted Publication": "misc",
"Retraction of Publication": "misc",
"Review": "misc",
"Scientific Integrity Review": "misc",
"Study Characteristics": "misc",
"Support of Research": "misc",
"Systematic Review": "misc",
"Technical Report": "techreport",
"Twin Study": "misc",
"Validation Study": "misc",
"Video Audio Media": "video",
"Webcasts": "misc"
}
return mapper.get(resulttype, "article")
|
8ae1e9a11b651ac7c712054da85485f6688d4a62
|
fdbb74a95924e2677466614f6ab6e2bb13b2a95a
|
/third_party/python/Lib/test/test_unittest.py
|
bfc3ded6f128da498ad170680264e3cef11fdc42
|
[
"ISC",
"Python-2.0",
"GPL-1.0-or-later",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-copyleft"
] |
permissive
|
jart/cosmopolitan
|
fb11b5658939023977060a7c6c71a74093d9cb44
|
0d748ad58e1063dd1f8560f18a0c75293b9415b7
|
refs/heads/master
| 2023-09-06T09:17:29.303607
| 2023-09-02T03:49:13
| 2023-09-02T03:50:18
| 272,457,606
| 11,887
| 435
|
ISC
| 2023-09-14T17:47:58
| 2020-06-15T14:16:13
|
C
|
UTF-8
|
Python
| false
| false
| 286
|
py
|
test_unittest.py
|
import unittest.test
from test import support
def test_main():
# used by regrtest
support.run_unittest(unittest.test.suite())
support.reap_children()
def load_tests(*_):
# used by unittest
return unittest.test.suite()
if __name__ == "__main__":
test_main()
|
bf0644adafae5ad13ab5a93a3c53649d714fbac7
|
385b36f7cf1e545b4518bafd2b3097f2931693b7
|
/setup.py
|
99c510ddb4c532aab48c171d7e6ef28fa1f47769
|
[
"MIT"
] |
permissive
|
machine-intelligence-laboratory/TopicNet
|
8a90b9362a24f91ea15bda4b4a6d7fd162cf09e1
|
88963c16c65b90789739419ec1697843c9a97129
|
refs/heads/master
| 2023-04-10T14:31:19.071893
| 2022-11-18T10:26:54
| 2022-11-18T10:26:54
| 206,595,209
| 141
| 18
|
MIT
| 2022-11-18T10:26:55
| 2019-09-05T15:20:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,602
|
py
|
setup.py
|
from distutils.core import setup
setup(
name='topicnet',
packages=[
'topicnet',
'topicnet.cooking_machine',
'topicnet.cooking_machine.cubes',
'topicnet.cooking_machine.models',
'topicnet.cooking_machine.recipes',
'topicnet.dataset_manager',
'topicnet.viewers',
],
version='0.8.0',
license='MIT',
description='TopicNet is a module for topic modelling using ARTM algorithm',
author='Machine Intelligence Laboratory',
author_email='alex.goncharov@phystech.edu',
url='https://github.com/machine-intelligence-laboratory/TopicNet',
download_url='https://github.com/machine-intelligence-laboratory/TopicNet/archive/v0.8.0.tar.gz',
keywords=[
'ARTM',
'topic modeling',
'regularization',
'multimodal learning',
'document vector representation',
],
install_requires=[
'bigartm',
'colorlover',
'dask[dataframe]',
'dill',
'ipython',
'jinja2',
'numba',
'numexpr',
'numpy',
'pandas',
'plotly',
'pytest',
'scikit-learn',
'scipy',
'six',
'strictyaml',
'toolz',
'tqdm',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Information Analysis',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.7',
],
)
|
6772981271c540942ace87fe17621647bbc609fd
|
e196fe807b2720eb7f08ad9ca914887341bd9b44
|
/tests/plugins/test_lrt.py
|
dd6a3e3aa6f0771c62c7e52285a27227d0395e01
|
[
"BSD-2-Clause"
] |
permissive
|
streamlink/streamlink
|
ab2ce4a8d71d2abd67f300628f04ce960e7696d0
|
561f7ef854e3ec076e5bd3efb3e7f8efe5df32df
|
refs/heads/master
| 2023-08-29T15:03:17.008502
| 2023-08-26T19:24:39
| 2023-08-27T11:02:30
| 68,402,336
| 9,529
| 1,385
|
BSD-2-Clause
| 2023-09-13T13:37:33
| 2016-09-16T17:52:50
|
Python
|
UTF-8
|
Python
| false
| false
| 740
|
py
|
test_lrt.py
|
from streamlink.plugins.lrt import LRT
from tests.plugins import PluginCanHandleUrl
class TestPluginCanHandleUrlLRT(PluginCanHandleUrl):
__plugin__ = LRT
should_match = [
"https://www.lrt.lt/mediateka/tiesiogiai/lrt-opus",
"https://www.lrt.lt/mediateka/tiesiogiai/lrt-klasika",
"https://www.lrt.lt/mediateka/tiesiogiai/lrt-radijas",
"https://www.lrt.lt/mediateka/tiesiogiai/lrt-lituanica",
"https://www.lrt.lt/mediateka/tiesiogiai/lrt-plius",
"https://www.lrt.lt/mediateka/tiesiogiai/lrt-televizija",
]
should_not_match = [
"https://www.lrt.lt",
"https://www.lrt.lt/mediateka/irasas/1013694276/savanoriai-tures-galimybe-pamatyti-popieziu-is-arciau",
]
|
1771cb68256246e11592c258f6a43508ad8437d0
|
e9869359c839c8c175ae7877bc35dcfdfe4058f8
|
/kornia/losses/psnr.py
|
37688a304da733188eee72ab2c3ef31cc8f2b57c
|
[
"Apache-2.0"
] |
permissive
|
kornia/kornia
|
80f93eae6a70b8bc0c9784f92a842ab9a6ab54ae
|
1e0f8baa7318c05b17ea6dbb48605691bca8972f
|
refs/heads/master
| 2023-08-31T06:32:45.960859
| 2023-08-30T21:59:41
| 2023-08-30T21:59:41
| 145,693,916
| 7,351
| 833
|
Apache-2.0
| 2023-09-12T21:59:29
| 2018-08-22T10:31:37
|
Python
|
UTF-8
|
Python
| false
| false
| 1,776
|
py
|
psnr.py
|
from __future__ import annotations
import torch
from torch import nn
from kornia import metrics
def psnr_loss(input: torch.Tensor, target: torch.Tensor, max_val: float) -> torch.Tensor:
r"""Function that computes the PSNR loss.
The loss is computed as follows:
.. math::
\text{loss} = -\text{psnr(x, y)}
See :meth:`~kornia.losses.psnr` for details abut PSNR.
Args:
input: the input image with shape :math:`(*)`.
labels : the labels image with shape :math:`(*)`.
max_val: The maximum value in the input tensor.
Return:
the computed loss as a scalar.
Examples:
>>> ones = torch.ones(1)
>>> psnr_loss(ones, 1.2 * ones, 2.) # 10 * log(4/((1.2-1)**2)) / log(10)
tensor(-20.0000)
"""
return -1.0 * metrics.psnr(input, target, max_val)
class PSNRLoss(nn.Module):
r"""Create a criterion that calculates the PSNR loss.
The loss is computed as follows:
.. math::
\text{loss} = -\text{psnr(x, y)}
See :meth:`~kornia.losses.psnr` for details abut PSNR.
Args:
max_val: The maximum value in the input tensor.
Shape:
- Input: arbitrary dimensional tensor :math:`(*)`.
- Target: arbitrary dimensional tensor :math:`(*)` same shape as input.
- Output: a scalar.
Examples:
>>> ones = torch.ones(1)
>>> criterion = PSNRLoss(2.)
>>> criterion(ones, 1.2 * ones) # 10 * log(4/((1.2-1)**2)) / log(10)
tensor(-20.0000)
"""
def __init__(self, max_val: float) -> None:
super().__init__()
self.max_val: float = max_val
def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
return psnr_loss(input, target, self.max_val)
|
6d5097a131beb01e00f5d64cf4307e50252d719f
|
4207610c48cbb9021f4420791a9c9d07550b72d9
|
/JUCE/docs/doxygen/process_source_files.py
|
d9862ff708e69b381f0d5a2ef7a61305ec1ea74e
|
[
"GPL-1.0-or-later",
"LicenseRef-scancode-proprietary-license",
"GPL-3.0-only",
"ISC",
"LicenseRef-scancode-unknown-license-reference",
"GPL-2.0-only",
"BSD-3-Clause"
] |
permissive
|
RomanKubiak/ctrlr
|
d98a910eb46f6cf4324da3fc3ae664cc7dd5aec9
|
8aa00d82127acda42ad9ac9b7b479461e9436aa4
|
refs/heads/master
| 2023-02-13T00:21:04.546585
| 2022-06-24T10:53:16
| 2022-06-24T10:53:16
| 14,671,067
| 435
| 82
|
BSD-3-Clause
| 2022-05-31T18:18:23
| 2013-11-24T22:53:11
|
C++
|
UTF-8
|
Python
| false
| false
| 6,593
|
py
|
process_source_files.py
|
#!/usr/bin/env python
import os
import shutil
import re
import argparse
def get_curly_brace_scope_end(string, start_pos):
"""Given a string and a starting position of an opening brace, find the
position of the closing brace.
"""
start_pos += 1
string_end = len(string)
bracket_counter = 1
while start_pos < string_end:
if string[start_pos] == "{":
bracket_counter += 1
elif string[start_pos] == "}":
bracket_counter -= 1
if bracket_counter == 0:
return start_pos
start_pos += 1
return -1
def remove_juce_namespaces(source):
"""Return a string of source code with any juce namespaces removed.
"""
namespace_regex = re.compile(r"\s+namespace\s+juce\s*{")
match = namespace_regex.search(source)
while (match is not None):
source = source[:match.start()] + source[match.end():]
end = get_curly_brace_scope_end(source, match.start() - 1)
if end != -1:
source = source[:end] + source[end + 1:]
match = namespace_regex.search(source)
continue
else:
raise ValueError("failed to find the end of the "
+ match.group(1) + " namespace")
return source
def add_doxygen_group(path, group_name):
"""Add a Doxygen group to the file at 'path'.
The addition of juce namespacing code to all of the source files breaks
backwards compatibility by changing the doc URLs, so we need to remove
the namespaces.
"""
filename = os.path.basename(path)
if re.match(r"^juce_.*\.(h|dox)", filename):
with open(path, "r") as f:
content = f.read()
with open(path, "w") as f:
f.write("\r\n/** @weakgroup " + group_name + "\r\n * @{\r\n */\r\n")
f.write(remove_juce_namespaces(content))
f.write("\r\n/** @}*/\r\n")
###############################################################################
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("source_dir",
help="the directory to search for source files")
parser.add_argument("dest_dir",
help="the directory in which to place processed files")
parser.add_argument("--subdirs",
help="if specified, only include these comma separated"
"subdirectories")
args = parser.parse_args()
try:
shutil.rmtree(args.dest_dir)
except OSError:
pass
except FileNotFoundError:
pass
# Get the list of JUCE modules to include.
if args.subdirs:
juce_modules = args.subdirs.split(",")
else:
juce_modules = []
for item in os.listdir(args.source_dir):
if os.path.isdir(os.path.join(args.source_dir, item)):
juce_modules.append(item)
# Copy the JUCE modules to the temporary directory, and process the source
# files.
module_definitions = []
for module_name in juce_modules:
# Copy the required modules.
original_module_dir = os.path.join(args.source_dir, module_name)
module_path = os.path.join(args.dest_dir, module_name)
shutil.copytree(original_module_dir, module_path)
# Parse the module header to get module information.
module_header = os.path.join(module_path, module_name + ".h")
with open(module_header, "r") as f:
content = f.read()
block_info_result = re.match(r".*BEGIN_JUCE_MODULE_DECLARATION"
"(.*)"
"END_JUCE_MODULE_DECLARATION.*",
content,
re.DOTALL)
detail_lines = []
for line in block_info_result.group(1).split("\n"):
stripped_line = line.strip()
if stripped_line:
result = re.match(r"^.*?description:\s*(.*)$", stripped_line)
if result:
short_description = result.group(1)
else:
detail_lines.append(stripped_line)
# The module header causes problems for Doxygen, so delete it.
os.remove(module_header)
# Create a Doxygen group definition for the module.
module_definiton = []
module_definiton.append("/** @defgroup {n} {n}".format(n=module_name))
module_definiton.append(" {d}".format(d=short_description))
module_definiton.append("")
for line in detail_lines:
module_definiton.append(" - {l}".format(l=line))
module_definiton.append("")
module_definiton.append(" @{")
module_definiton.append("*/")
# Create a list of the directories in the module that we can use as
# subgroups and create the Doxygen group hierarchy string.
dir_contents = os.listdir(module_path)
# Ignore "native" folders as these are excluded by doxygen.
try:
dir_contents.remove("native")
except ValueError:
pass
subdirs = []
for item in dir_contents:
if (os.path.isdir(os.path.join(module_path, item))):
subdirs.append(item)
module_groups = {}
for subdir in subdirs:
subgroup_name = "{n}-{s}".format(n=module_name, s=subdir)
module_groups[subgroup_name] = os.path.join(module_path, subdir)
module_definiton.append("")
module_definiton.append(
"/** @defgroup {tag} {n} */".format(tag=subgroup_name, n=subdir)
)
module_definiton.append("")
module_definiton.append("/** @} */")
module_definitions.append("\r\n".join(module_definiton))
# Put the top level files into the main group.
for filename in (set(dir_contents) - set(subdirs)):
add_doxygen_group(os.path.join(module_path, filename), module_name)
# Put subdirectory files into their respective groups.
for group_name in module_groups:
for dirpath, dirnames, filenames in os.walk(module_groups[group_name]):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
add_doxygen_group(filepath, group_name)
# Create an extra header file containing the module hierarchy.
with open(os.path.join(args.dest_dir, "juce_modules.dox"), "w") as f:
f.write("\r\n\r\n".join(module_definitions))
|
087e4f262283d7de6aeeca5ae0d3f0bc50c9ae6e
|
72d3d1cda13859b867d398c952ffd8f5cbe90906
|
/test/abstractions/databases/test_singleton_database.py
|
c71ba51e4b17b265e93f20068c5edb8a24309ee5
|
[
"MIT"
] |
permissive
|
facebookresearch/Mephisto
|
a55d553316b4f141a090048ce2ed79de05f6bebb
|
1f540f9bd866d5fd625be4a4d61ad6bce564f1ed
|
refs/heads/main
| 2023-08-19T01:41:05.683789
| 2023-08-17T21:05:06
| 2023-08-17T21:05:06
| 203,213,045
| 281
| 78
|
MIT
| 2023-09-14T21:10:44
| 2019-08-19T16:50:20
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,048
|
py
|
test_singleton_database.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import shutil
import os
import tempfile
from mephisto.abstractions.test.data_model_database_tester import BaseDatabaseTests
from mephisto.abstractions.databases.local_singleton_database import MephistoSingletonDB
class TestMephistoSingletonDB(BaseDatabaseTests):
"""
Unit testing for the MephistoSingletonDB
Inherits all tests directly from BaseDataModelTests, and
writes no additional tests.
"""
is_base = False
def setUp(self):
self.data_dir = tempfile.mkdtemp()
database_path = os.path.join(self.data_dir, "mephisto.db")
self.db = MephistoSingletonDB(database_path)
def tearDown(self):
self.db.shutdown()
shutil.rmtree(self.data_dir)
# TODO(#97) are there any other unit tests we'd like to have?
if __name__ == "__main__":
unittest.main()
|
fd92886c56870f38212e1ff4d463b7f4393988d2
|
d3ef2463f556d6cd166eb29d3a5f5b210a6402e7
|
/tests/cupy_tests/testing_tests/test_helper.py
|
3c6c0b308e5e999aa228cd5b0e229a97665a6a76
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
cupy/cupy
|
ce7a010a57504dbfe4fb5af10d354a22e79f4907
|
96105afb78aa3f8380834d2516184b8365e23fcb
|
refs/heads/main
| 2023-08-31T00:36:47.967611
| 2023-08-30T09:19:27
| 2023-08-30T09:19:27
| 72,523,920
| 7,505
| 1,072
|
MIT
| 2023-09-14T01:04:42
| 2016-11-01T09:54:45
|
Python
|
UTF-8
|
Python
| false
| false
| 5,941
|
py
|
test_helper.py
|
import unittest
import numpy
import pytest
import cupy
from cupy import testing
class TestPackageRequirements:
def test_installed(self):
assert testing.installed('cupy')
assert testing.installed('cupy>9', 'numpy>=1.12')
assert testing.installed('numpy>=1.10,<=2.0')
assert not testing.installed('numpy>=2.0')
assert not testing.installed('numpy>1.10,<1.9')
def test_numpy_satisfies(self):
assert testing.numpy_satisfies('>1.10')
assert not testing.numpy_satisfies('>=2.10')
@testing.with_requires('numpy>2.0')
def test_with_requires(self):
assert False, 'this should not happen'
@testing.parameterize(*testing.product({
'xp': [numpy, cupy],
'shape': [(3, 2), (), (3, 0, 2)],
}))
class TestShapedRandom(unittest.TestCase):
@testing.for_all_dtypes()
def test_shape_and_dtype(self, dtype):
a = testing.shaped_random(self.shape, self.xp, dtype)
assert isinstance(a, self.xp.ndarray)
assert a.shape == self.shape
assert a.dtype == dtype
@testing.for_all_dtypes(no_bool=True, no_complex=True)
def test_value_range(self, dtype):
a = testing.shaped_random(self.shape, self.xp, dtype)
assert self.xp.all(0 <= a)
assert self.xp.all(a < 10)
@testing.for_complex_dtypes()
def test_complex(self, dtype):
a = testing.shaped_random(self.shape, self.xp, dtype)
assert self.xp.all(0 <= a.real)
assert self.xp.all(a.real < 10)
assert self.xp.all(0 <= a.imag)
assert self.xp.all(a.imag < 10)
if 0 not in self.shape:
assert self.xp.any(a.imag)
@testing.parameterize(*testing.product({
'xp': [numpy, cupy],
}))
class TestShapedRandomBool(unittest.TestCase):
def test_bool(self):
a = testing.shaped_random(10000, self.xp, numpy.bool_)
assert 4000 < self.xp.sum(a) < 6000
@testing.parameterize(*testing.product({
'dtype': [
numpy.float16, numpy.float32, numpy.float64,
numpy.complex64, numpy.complex128,
],
'xp': [numpy, cupy],
'x_s_shapes': [
((0, 0), (0,)),
((2, 2), (2,)),
((2, 3), (2,)),
((3, 2), (2,)),
# broadcast
((2, 2), ()),
],
}))
class TestGenerateMatrix(unittest.TestCase):
def test_generate_matrix(self):
dtype = self.dtype
x_shape, s_shape = self.x_s_shapes
sv = self.xp.random.uniform(
0.5, 1.5, s_shape).astype(dtype().real.dtype)
x = testing.generate_matrix(
x_shape, xp=self.xp, dtype=dtype, singular_values=sv)
assert x.shape == x_shape
if 0 in x_shape:
return
s = self.xp.linalg.svd(
x.astype(numpy.complex128), full_matrices=False, compute_uv=False,
)
sv = self.xp.broadcast_to(sv, s.shape)
sv_sorted = self.xp.sort(sv, axis=-1)[..., ::-1]
rtol = 1e-3 if dtype == numpy.float16 else 1e-7
self.xp.testing.assert_allclose(s, sv_sorted, rtol=rtol)
class TestGenerateMatrixInvalid(unittest.TestCase):
def test_no_singular_values(self):
with self.assertRaises(TypeError):
testing.generate_matrix((2, 2))
def test_invalid_shape(self):
with self.assertRaises(ValueError):
testing.generate_matrix((2,), singular_values=1)
def test_invalid_dtype_singular_values(self):
with self.assertRaises(TypeError):
testing.generate_matrix((2, 2), singular_values=1 + 0j)
def test_invalid_dtype(self):
with self.assertRaises(TypeError):
testing.generate_matrix(
(2, 2), dtype=numpy.int32, singular_values=1)
def test_negative_singular_values(self):
with self.assertRaises(ValueError):
testing.generate_matrix((2, 2), singular_values=[1, -1])
def test_shape_mismatch(self):
with self.assertRaises(ValueError):
testing.generate_matrix(
(2, 2), singular_values=numpy.ones(3))
def test_shape_mismatch_2(self):
with self.assertRaises(ValueError):
testing.generate_matrix(
(0, 2, 2), singular_values=numpy.ones(3))
class TestAssertFunctionIsCalled(unittest.TestCase):
def test_patch_ndarray(self):
orig = cupy.ndarray
with testing.AssertFunctionIsCalled('cupy.ndarray'):
a = cupy.ndarray((2, 3), numpy.float32)
assert cupy.ndarray is orig
assert not isinstance(a, cupy.ndarray)
def test_spy_ndarray(self):
orig = cupy.ndarray
with testing.AssertFunctionIsCalled(
'cupy.ndarray', wraps=cupy.ndarray):
a = cupy.ndarray((2, 3), numpy.float32)
assert cupy.ndarray is orig
assert isinstance(a, cupy.ndarray)
def test_fail_not_called(self):
orig = cupy.ndarray
with pytest.raises(AssertionError):
with testing.AssertFunctionIsCalled('cupy.ndarray'):
pass
assert cupy.ndarray is orig
def test_fail_called_twice(self):
orig = cupy.ndarray
with pytest.raises(AssertionError):
with testing.AssertFunctionIsCalled('cupy.ndarray'):
cupy.ndarray((2, 3), numpy.float32)
cupy.ndarray((2, 3), numpy.float32)
assert cupy.ndarray is orig
def test_times_called(self):
orig = cupy.ndarray
with testing.AssertFunctionIsCalled('cupy.ndarray', times_called=2):
cupy.ndarray((2, 3), numpy.float32)
cupy.ndarray((2, 3), numpy.float32)
assert cupy.ndarray is orig
def test_inner_error(self):
orig = cupy.ndarray
with pytest.raises(numpy.AxisError):
with testing.AssertFunctionIsCalled('cupy.ndarray'):
cupy.ndarray((2, 3), numpy.float32)
raise numpy.AxisError('foo')
assert cupy.ndarray is orig
|
22749c4cdfe30c9eded6cd71fa737af0532a4af9
|
41198b450282c36a1d39f361dd99fe423de989da
|
/mantraml/data/finders.py
|
7a9bd3307b500511c5dbc7a9011311721cf1d2b1
|
[
"Apache-2.0"
] |
permissive
|
RJT1990/mantra
|
4ff49742e68471a11fc6b6060a9b8b1cd3ab3b88
|
7db4d272a1625c33eaa681b8c2e75c0aa57c6952
|
refs/heads/master
| 2022-12-10T07:29:27.803262
| 2019-12-10T23:05:23
| 2019-12-10T23:05:23
| 146,776,545
| 332
| 22
|
Apache-2.0
| 2022-12-08T03:00:07
| 2018-08-30T16:25:10
|
Python
|
UTF-8
|
Python
| false
| false
| 3,522
|
py
|
finders.py
|
from .Dataset import Dataset
from .ImageDataset import ImageDataset
from .TabularDataset import TabularDataset
from mantraml.models import MantraModel
from mantraml.tasks.Task import Task
# Bade model and data for training
BASE_MODEL_CLASSES = ['MantraModel']
BASE_DATA_CLASSES = ['Dataset', 'TabularDataset', 'ImageDataset']
BASE_TASK_CLASSES = ['Task']
def find_dataset_class(data_module):
"""
Find a dataset class. Returns the first dataset class it finds - Dataset object should be a singleton within a data project
Parameters
-----------
data_module - module
Module that potentially contains mantraml.Dataset type objects
Returns
------------
mantraml.Dataset object - any Dataset objects found within the modle
"""
dataset = None
for obj_key, obj_value in data_module.__dict__.items():
if obj_key in BASE_DATA_CLASSES:
continue
elif hasattr(data_module.__dict__[obj_key], '__bases__'):
if data_module.__dict__[obj_key].__bases__[0] in [Dataset, TabularDataset, ImageDataset]:
dataset = data_module.__dict__[obj_key]
break
return dataset
def find_model_class(model_module):
"""
Find a model class. Returns the first model class it finds - Model object should be a singleton within a data project
Parameters
-----------
model_module - module
Module that potentially contains mantraml.BaseModel type objects
Returns
------------
mantraml.BaseModel type object - any BaseModel type objects found within the modle
"""
model = None
for obj_key, obj_value in model_module.__dict__.items():
if obj_key in BASE_MODEL_CLASSES:
continue
elif hasattr(model_module.__dict__[obj_key], '__bases__'):
if model_module.__dict__[obj_key].__bases__[0] in [MantraModel]:
model = model_module.__dict__[obj_key]
return model
def find_task_class(task_module):
"""
Find a task class. Returns the first task class it finds - task object should be a singleton within a task project
Parameters
-----------
tasl_module - module
Module that potentially contains mantraml.Task type objects
Returns
------------
mantraml.Task object - any Task objects found within the module
"""
task = None
for obj_key, obj_value in task_module.__dict__.items():
if obj_key in BASE_TASK_CLASSES:
continue
elif hasattr(task_module.__dict__[obj_key], '__bases__'):
if task_module.__dict__[obj_key].__bases__[0] in [Task]:
task = task_module.__dict__[obj_key]
break
return task
def find_framework(model_module):
"""
Find the deep learning framework used from a module - based on items on the module
Parameters
-----------
model_module - module
Module that contains the core model logic
Returns
------------
str - with the framework name
"""
tensorflow_imports = len([module for module in dir(model_module) if 'tensorflow' in module or 'tf' in module])
torch_imports = len([module for module in dir(model_module) if 'torch' in module])
if tensorflow_imports or torch_imports:
if tensorflow_imports > torch_imports:
return 'tensorflow'
elif torch_imports > tensorflow_imports:
return 'pytorch'
else:
return 'tensorflow'
else:
return 'none'
|
3738f918b17732a947f082fb06b0d77c2e54a1d6
|
3a6a211ea0d32405497fbd6486c490bb147e25f9
|
/systrace/systrace/tracing_agents/agents_unittest.py
|
b9596ef6cebef52c0fd52aec0ec7d1882f556341
|
[
"BSD-3-Clause"
] |
permissive
|
catapult-project/catapult
|
e2cbdd5eb89f3b1492fc8752494e62ea1df4bae0
|
53102de187a48ac2cfc241fef54dcbc29c453a8e
|
refs/heads/main
| 2021-05-25T07:37:22.832505
| 2021-05-24T08:01:49
| 2021-05-25T06:07:38
| 33,947,548
| 2,032
| 742
|
BSD-3-Clause
| 2022-08-26T16:01:18
| 2015-04-14T17:49:05
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,479
|
py
|
agents_unittest.py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from systrace import util
from devil.android import device_utils
from devil.android.sdk import intent
from devil.android.sdk import keyevent
class BaseAgentTest(unittest.TestCase):
def setUp(self):
devices = device_utils.DeviceUtils.HealthyDevices()
self.browser = 'stable'
self.package_info = util.get_supported_browsers()[self.browser]
self.device = devices[0]
curr_browser = self.GetChromeProcessID()
if curr_browser is None:
self.StartBrowser()
def tearDown(self):
# Stop the browser after each test to ensure that it doesn't interfere
# with subsequent tests, e.g. by holding the devtools socket open.
self.device.ForceStop(self.package_info.package)
def StartBrowser(self):
# Turn on the device screen.
self.device.SetScreen(True)
# Unlock device.
self.device.SendKeyEvent(keyevent.KEYCODE_MENU)
# Start browser.
self.device.StartActivity(
intent.Intent(activity=self.package_info.activity,
package=self.package_info.package,
data='about:blank',
extras={'create_new_tab': True}),
blocking=True, force_stop=True)
def GetChromeProcessID(self):
return self.device.GetApplicationPids(
self.package_info.package, at_most_one=True)
|
796244a20420f0c16b7651bb314133bad23420ad
|
1ca288c3f3c54db93fe4828214a81f6687105a1e
|
/qa/rpc-tests/test_framework/constants.py
|
a1af20d5f2bb16815f071114601517ed7ce39971
|
[
"MIT"
] |
permissive
|
BitcoinUnlimited/BitcoinUnlimited
|
489c91a9184bdbad3824a2ce3126d2e9c0786e5d
|
05de381c02eb4bfca94957733acadfa217527f25
|
refs/heads/release
| 2023-06-01T08:11:18.920865
| 2021-03-29T15:58:02
| 2021-03-29T15:58:02
| 18,613,259
| 546
| 301
|
MIT
| 2021-01-04T01:05:24
| 2014-04-09T21:03:00
|
C++
|
UTF-8
|
Python
| false
| false
| 120
|
py
|
constants.py
|
# signature types
SIGHASH_ALL = 1
SIGHASH_NONE = 2
SIGHASH_SINGLE = 3
SIGHASH_FORKID = 0x40
SIGHASH_ANYONECANPAY = 0x80
|
6a84839880bfa98ac4b2db9287bf64a0aa64d640
|
807b63a4dda1e4bcf33a9b456cb9535eb69625fc
|
/python/stencila/types/image_object_or_str.py
|
fc1b6fde6c76ce935153f012eaa32901d22dc980
|
[
"Apache-2.0"
] |
permissive
|
stencila/stencila
|
4d63a5653adb67a45dd5eb11c7a27d569f57a49e
|
eac602910d009d7db7048b28b4049ecc952ecd32
|
refs/heads/main
| 2023-08-30T18:34:19.055238
| 2023-08-30T07:14:02
| 2023-08-30T07:14:02
| 4,503,128
| 719
| 51
|
Apache-2.0
| 2023-09-14T21:35:38
| 2012-05-31T02:43:31
|
Rust
|
UTF-8
|
Python
| false
| false
| 216
|
py
|
image_object_or_str.py
|
# Generated file; do not edit. See the Rust `schema-gen` crate.
from .prelude import *
ImageObject = ForwardRef("ImageObject")
ImageObjectOrStr = Union[
ImageObject,
str,
]
"""
`ImageObject` or `str`
"""
|
490e101d52de75bcf3caa2224df81c4e8a3a9283
|
59b374ac54ced5e006bd33773de4526112a88a27
|
/src/unittest/modules/test_autocomplete.py
|
45779e9e32df48416c4eb6b04f16ef7566409acd
|
[
"MIT"
] |
permissive
|
gabfl/vault
|
318cc53865c9d1786f304cc2f1605332a102b075
|
dbf49f42ae391128399682dfb6910ffd4f8856a1
|
refs/heads/main
| 2023-06-21T21:27:06.959402
| 2023-05-08T23:43:14
| 2023-05-08T23:43:14
| 95,264,151
| 188
| 64
|
MIT
| 2023-09-07T23:56:29
| 2017-06-23T23:47:27
|
Python
|
UTF-8
|
Python
| false
| false
| 2,228
|
py
|
test_autocomplete.py
|
from unittest.mock import patch
from ..base import BaseTest
from ...modules import autocomplete
class Test(BaseTest):
def test_set_parameters(self):
autocomplete.set_parameters(
list_=['four', 'five', 'six'],
case_sensitive=False
)
assert autocomplete.completion_list == ['four', 'five', 'six']
assert autocomplete.is_case_sensitive is False
def test_autocomplete(self):
autocomplete.set_parameters([
'one_thing', 'one_other_thing', 'third_thing'])
with patch('readline.get_line_buffer', return_value='one'):
assert autocomplete.autocomplete('on', state=0) == 'one_thing'
assert autocomplete.autocomplete(
'on', state=1) == 'one_other_thing'
assert autocomplete.autocomplete('on', state=2) is None
def test_autocomplete_2(self):
autocomplete.set_parameters([
'one_thing', 'one_other_thing', 'third_thing'])
with patch('readline.get_line_buffer', return_value='ONE'):
assert autocomplete.autocomplete('on', state=0) is None
def test_autocomplete_3(self):
autocomplete.set_parameters(list_=[
'one_thing', 'one_other_thing', 'third_thing'],
case_sensitive=False)
with patch('readline.get_line_buffer', return_value='ONE'):
assert autocomplete.autocomplete('on', state=0) == 'one_thing'
assert autocomplete.autocomplete(
'on', state=1) == 'one_other_thing'
assert autocomplete.autocomplete('on', state=2) is None
def test_find_breaking_strings(self):
assert autocomplete.find_breaking_strings('abc') == 0
assert autocomplete.find_breaking_strings('abc@def') == 3
assert autocomplete.find_breaking_strings('abc@def*ghi') == 7
assert autocomplete.find_breaking_strings(
'some@email.com somethingelse') == 14
def test_get_input_autocomplete(self):
autocomplete.set_parameters([
'one_thing', 'one_other_thing', 'third_thing'])
with patch('builtins.input', return_value='some_value'):
assert autocomplete.get_input_autocomplete() == 'some_value'
|
c8f034e2732f818793d8414b1296e7bedcccefd5
|
a8ca3225e24c8b093056ce6baa1db6ba3aea8f97
|
/examples/_archived/plot_inv_grav_linear.py
|
d84bcc5bd9c0eb1f5560737782815f01965ce96d
|
[
"MIT"
] |
permissive
|
simpeg/simpeg
|
3e8779392d7b26fe576a7a665205068989d8f4d8
|
ebde5856c318f7b4deb92d755b4fefe19012c48e
|
refs/heads/main
| 2023-09-03T18:49:03.545965
| 2023-08-27T15:45:50
| 2023-08-27T15:45:50
| 14,727,320
| 437
| 268
|
MIT
| 2023-09-10T18:16:22
| 2013-11-26T19:46:36
|
Python
|
UTF-8
|
Python
| false
| false
| 8,961
|
py
|
plot_inv_grav_linear.py
|
"""
PF: Gravity: Inversion Linear
=============================
Create a synthetic block model and invert
with a compact norm
"""
import numpy as np
import matplotlib.pyplot as plt
from discretize import TensorMesh
from discretize.utils import active_from_xyz
from SimPEG.potential_fields import gravity
from SimPEG import (
maps,
data,
data_misfit,
regularization,
optimization,
inverse_problem,
directives,
inversion,
)
from SimPEG import utils
from SimPEG.utils import plot2Ddata
def run(plotIt=True):
# Create a mesh
dx = 5.0
hxind = [(dx, 5, -1.3), (dx, 15), (dx, 5, 1.3)]
hyind = [(dx, 5, -1.3), (dx, 15), (dx, 5, 1.3)]
hzind = [(dx, 5, -1.3), (dx, 7), (3.5, 1), (2, 5)]
mesh = TensorMesh([hxind, hyind, hzind], "CCC")
# Get index of the center
midx = int(mesh.shape_cells[0] / 2)
midy = int(mesh.shape_cells[1] / 2)
# Lets create a simple Gaussian topo and set the active cells
[xx, yy] = np.meshgrid(mesh.nodes_x, mesh.nodes_y)
zz = -np.exp((xx**2 + yy**2) / 75**2) + mesh.nodes_z[-1]
# We would usually load a topofile
topo = np.c_[utils.mkvc(xx), utils.mkvc(yy), utils.mkvc(zz)]
# Go from topo to array of indices of active cells
actv = active_from_xyz(mesh, topo, "N")
actv = np.where(actv)[0]
nC = len(actv)
# Create and array of observation points
xr = np.linspace(-30.0, 30.0, 20)
yr = np.linspace(-30.0, 30.0, 20)
X, Y = np.meshgrid(xr, yr)
# Move the observation points 5m above the topo
Z = -np.exp((X**2 + Y**2) / 75**2) + mesh.nodes_z[-1] + 0.1
# Create a GRAVsurvey
rxLoc = np.c_[utils.mkvc(X.T), utils.mkvc(Y.T), utils.mkvc(Z.T)]
rxLoc = gravity.receivers.Point(rxLoc)
srcField = gravity.sources.SourceField([rxLoc])
survey = gravity.survey.Survey(srcField)
# We can now create a density model and generate data
# Here a simple block in half-space
model = np.zeros(mesh.shape_cells)
model[(midx - 5) : (midx - 1), (midy - 2) : (midy + 2), -10:-6] = 0.75
model[(midx + 1) : (midx + 5), (midy - 2) : (midy + 2), -10:-6] = -0.75
model = utils.mkvc(model)
model = model[actv]
# Create active map to go from reduce set to full
actvMap = maps.InjectActiveCells(mesh, actv, -100)
# Create reduced identity map
idenMap = maps.IdentityMap(nP=nC)
# Create the forward simulation
simulation = gravity.simulation.Simulation3DIntegral(
survey=survey, mesh=mesh, rhoMap=idenMap, ind_active=actv
)
# Compute linear forward operator and compute some data
d = simulation.fields(model)
# Add noise and uncertainties
# We add some random Gaussian noise (1nT)
synthetic_data = d + np.random.randn(len(d)) * 1e-3
wd = np.ones(len(synthetic_data)) * 1e-3 # Assign flat uncertainties
data_object = data.Data(survey, dobs=synthetic_data, noise_floor=wd)
m0 = np.ones(nC) * 1e-4 # Starting model
# Create sensitivity weights from our linear forward operator
rxLoc = survey.source_field.receiver_list[0].locations
# Create a regularization
reg = regularization.Sparse(mesh, indActive=actv, mapping=idenMap)
reg.norms = [0, 0, 0, 0]
# Data misfit function
dmis = data_misfit.L2DataMisfit(data=data_object, simulation=simulation)
dmis.W = utils.sdiag(1 / wd)
# Add directives to the inversion
opt = optimization.ProjectedGNCG(
maxIter=100, lower=-1.0, upper=1.0, maxIterLS=20, maxIterCG=10, tolCG=1e-3
)
invProb = inverse_problem.BaseInvProblem(dmis, reg, opt)
betaest = directives.BetaEstimate_ByEig(beta0_ratio=1e-1)
# Here is where the norms are applied
# Use pick a threshold parameter empirically based on the distribution of
# model parameters
update_IRLS = directives.Update_IRLS(
f_min_change=1e-4,
max_irls_iterations=30,
coolEpsFact=1.5,
beta_tol=1e-2,
)
saveDict = directives.SaveOutputEveryIteration(save_txt=False)
update_Jacobi = directives.UpdatePreconditioner()
sensitivity_weights = directives.UpdateSensitivityWeights(everyIter=False)
inv = inversion.BaseInversion(
invProb,
directiveList=[
update_IRLS,
sensitivity_weights,
betaest,
update_Jacobi,
saveDict,
],
)
# Run the inversion
mrec = inv.run(m0)
if plotIt:
# Here is the recovered susceptibility model
ypanel = midx
zpanel = -7
m_l2 = actvMap * invProb.l2model
m_l2[m_l2 == -100] = np.nan
m_lp = actvMap * mrec
m_lp[m_lp == -100] = np.nan
m_true = actvMap * model
m_true[m_true == -100] = np.nan
vmin, vmax = mrec.min(), mrec.max()
# Plot the data
plot2Ddata(rxLoc, data_object.dobs)
plt.figure()
# Plot L2 model
ax = plt.subplot(321)
mesh.plot_slice(
m_l2, ax=ax, normal="Z", ind=zpanel, grid=True, clim=(vmin, vmax)
)
plt.plot(
([mesh.cell_centers_x[0], mesh.cell_centers_x[-1]]),
([mesh.cell_centers_y[ypanel], mesh.cell_centers_y[ypanel]]),
color="w",
)
plt.title("Plan l2-model.")
plt.gca().set_aspect("equal")
plt.ylabel("y")
ax.xaxis.set_visible(False)
plt.gca().set_aspect("equal", adjustable="box")
# Vertical section
ax = plt.subplot(322)
mesh.plot_slice(m_l2, ax=ax, normal="Y", ind=midx, grid=True, clim=(vmin, vmax))
plt.plot(
([mesh.cell_centers_x[0], mesh.cell_centers_x[-1]]),
([mesh.cell_centers_z[zpanel], mesh.cell_centers_z[zpanel]]),
color="w",
)
plt.title("E-W l2-model.")
plt.gca().set_aspect("equal")
ax.xaxis.set_visible(False)
plt.ylabel("z")
plt.gca().set_aspect("equal", adjustable="box")
# Plot Lp model
ax = plt.subplot(323)
mesh.plot_slice(
m_lp, ax=ax, normal="Z", ind=zpanel, grid=True, clim=(vmin, vmax)
)
plt.plot(
([mesh.cell_centers_x[0], mesh.cell_centers_x[-1]]),
([mesh.cell_centers_y[ypanel], mesh.cell_centers_y[ypanel]]),
color="w",
)
plt.title("Plan lp-model.")
plt.gca().set_aspect("equal")
ax.xaxis.set_visible(False)
plt.ylabel("y")
plt.gca().set_aspect("equal", adjustable="box")
# Vertical section
ax = plt.subplot(324)
mesh.plot_slice(m_lp, ax=ax, normal="Y", ind=midx, grid=True, clim=(vmin, vmax))
plt.plot(
([mesh.cell_centers_x[0], mesh.cell_centers_x[-1]]),
([mesh.cell_centers_z[zpanel], mesh.cell_centers_z[zpanel]]),
color="w",
)
plt.title("E-W lp-model.")
plt.gca().set_aspect("equal")
ax.xaxis.set_visible(False)
plt.ylabel("z")
plt.gca().set_aspect("equal", adjustable="box")
# Plot True model
ax = plt.subplot(325)
mesh.plot_slice(
m_true, ax=ax, normal="Z", ind=zpanel, grid=True, clim=(vmin, vmax)
)
plt.plot(
([mesh.cell_centers_x[0], mesh.cell_centers_x[-1]]),
([mesh.cell_centers_y[ypanel], mesh.cell_centers_y[ypanel]]),
color="w",
)
plt.title("Plan true model.")
plt.gca().set_aspect("equal")
plt.xlabel("x")
plt.ylabel("y")
plt.gca().set_aspect("equal", adjustable="box")
# Vertical section
ax = plt.subplot(326)
mesh.plot_slice(
m_true, ax=ax, normal="Y", ind=midx, grid=True, clim=(vmin, vmax)
)
plt.plot(
([mesh.cell_centers_x[0], mesh.cell_centers_x[-1]]),
([mesh.cell_centers_z[zpanel], mesh.cell_centers_z[zpanel]]),
color="w",
)
plt.title("E-W true model.")
plt.gca().set_aspect("equal")
plt.xlabel("x")
plt.ylabel("z")
plt.gca().set_aspect("equal", adjustable="box")
# Plot convergence curves
plt.figure()
axs = plt.subplot()
axs.plot(saveDict.phi_d, "k", lw=2)
axs.plot(
np.r_[update_IRLS.iterStart, update_IRLS.iterStart],
np.r_[0, np.max(saveDict.phi_d)],
"k:",
)
twin = axs.twinx()
twin.plot(saveDict.phi_m, "k--", lw=2)
axs.text(
update_IRLS.iterStart,
np.max(saveDict.phi_d) / 2.0,
"IRLS Steps",
va="bottom",
ha="center",
rotation="vertical",
size=12,
bbox={"facecolor": "white"},
)
axs.set_ylabel(r"$\phi_d$", size=16, rotation=0)
axs.set_xlabel("Iterations", size=14)
twin.set_ylabel(r"$\phi_m$", size=16, rotation=0)
if __name__ == "__main__":
run()
plt.show()
|
dbd5251540993fb7900cf1ec37fd10d248ed15cb
|
0910b22407c106e00cc1fc9d986afa9e94c50266
|
/jarvis/analysis/thermodynamics/energetics.py
|
0ff9cfdf5ea65d2581677f48d0adc081279dd48b
|
[
"NIST-PD"
] |
permissive
|
usnistgov/jarvis
|
86f3253efc4a7b308c3d5918a60af3b969d800bc
|
e43b65a1bb85bf88286a009671fa620519677a68
|
refs/heads/master
| 2023-08-23T09:50:20.734795
| 2023-08-20T02:07:14
| 2023-08-20T02:07:14
| 95,149,886
| 252
| 118
|
NOASSERTION
| 2023-08-20T02:07:15
| 2017-06-22T19:34:02
|
Python
|
UTF-8
|
Python
| false
| false
| 14,613
|
py
|
energetics.py
|
"""Get formation energy, convex hull etc.."""
import os
from scipy.spatial import ConvexHull
import numpy as np
from jarvis.db.figshare import data
from jarvis.core.atoms import Atoms
from jarvis.db.jsonutils import loadjson
from collections import OrderedDict
from jarvis.core.composition import Composition
import re
# import matplotlib.pyplot as plt
def get_optb88vdw_energy():
"""Get OptB88vdW energy per atoms for elements."""
return loadjson(os.path.join(os.path.dirname(__file__), "unary.json"))
def get_unary_qe_tb_energy():
"""Get elemental chemical potential for GBRV tight-binding project."""
return loadjson(
os.path.join(os.path.dirname(__file__), "unary_qe_tb.json")
)
def isfloat(value):
"""Check if a number is float.
TODO: replace with isinstance.
"""
try:
float(value)
return True
except ValueError:
return False
def unary_energy(el="Na", chem_pots=None):
"""Provide energy per atoms of an element."""
if chem_pots is None:
chem_pots = get_optb88vdw_energy()
en = "na"
for i, j in chem_pots.items():
if str(i) == str(el):
en = j["energy"]
return en
def form_enp(atoms=None, total_energy=None, chem_pots=None):
"""
Calculate formation energy given the total energy and the atoms object.
Currently for OptB88vdW functional based chemical potential implemented
but can be generalized by changing unary_energy.
"""
dd = atoms.composition.to_dict()
# print ('dd',dd)
ref = 0.0
for k, v in dd.items():
e1 = unary_energy(el=k, chem_pots=chem_pots)
# print (k,v,e1,total_energy)
if e1 == "na":
ref = "na"
ValueError("Element reference does not exist", e1)
else:
ref = ref + float(v) * float(e1)
if isfloat(ref):
form_en = float(total_energy) - float(ref)
form_en = round(float(form_en) / float(atoms.num_atoms), 5)
return form_en
def get_twod_defect_energy(vrun="", jid="", atom=""):
"""Get mono 2D defect formation energy with OptB88vdW data."""
dft2d = data("dft_2d")
def get_enp_jid(jid=""):
for i in dft2d:
if i["jid"] == jid:
return (
i["optb88vdw_total_energy"]
/ Atoms.from_dict(i["atoms"]).num_atoms
)
# dir='JVASP-667_C_C_c'
# tmp=dir.split('_')
# jid=tmp[0]
# atom=tmp[2]
strt = vrun.all_structures[-1]
natoms = strt.num_atoms
fin_en = vrun.final_energy
chem_pot = unary_energy(atom)
bulk_en_pa = get_enp_jid(jid)
Ef = fin_en - (natoms + 1) * bulk_en_pa + chem_pot
return Ef
class PhaseDiagram:
"""Module for phase diagram."""
def __init__(
self,
entries,
verbose=False,
only_plot_stable=False,
only_label_stable=False,
):
"""Initialize Phase-diagram."""
# Adapted from ASE
self.species = OrderedDict()
# List of formula,formation energy,JID etc.
self.entries = entries
self.entries_dict = []
self.verbose = verbose
self.only_plot_stable = only_plot_stable
self.only_label_stable = only_label_stable
for i in self.entries:
name = i[0]
energy = i[1]
# jid = i[2]
count = Composition.from_string(name).to_dict()
natoms = 0
for symbol, n in count.items():
natoms += n
if symbol not in self.species:
self.species[symbol] = len(self.species)
self.entries_dict.append((count, energy, name, natoms))
ns = len(self.species)
self.symbols = [None] * ns
for symbol, id in self.species.items():
self.symbols[id] = symbol
if verbose:
print("Species:", ", ".join(self.symbols))
print("Entries:", len(self.entries_dict))
for i, (count, energy, name, natoms) in enumerate(
self.entries_dict
):
print("{:<5}{:10}{:10.3f}".format(i, name, energy))
self.points = np.zeros((len(self.entries_dict), ns + 1))
for s, (count, energy, name, natoms) in enumerate(self.entries_dict):
for symbol, n in count.items():
self.points[s, self.species[symbol]] = n / natoms
self.points[s, -1] = energy # / natoms
if len(self.points) == ns:
# Simple case that qhull would choke on:
self.simplices = np.arange(ns).reshape((1, ns))
self.hull = np.ones(ns, bool)
else:
# print("self.points[:, 1:]",self.points[:, 1:])
hull = ConvexHull(self.points[:, 1:])
# Find relevant simplices:
ok = hull.equations[:, -2] < 0
self.simplices = hull.simplices[ok]
# Create a mask for those points that are on the convex hull:
self.hull = np.zeros(len(self.points), bool)
for simplex in self.simplices:
self.hull[simplex] = True
def energy_above_hull(self, entry=[]):
"""Find energy above hull."""
formula = entry[0]
form_enp = entry[1]
kwargs = Composition.from_string(formula).to_dict()
point = np.zeros(len(self.species))
N = 0
for symbol, n in kwargs.items():
point[self.species[symbol]] = n
N += n
# print ('N',N)
# Find coordinates within each simplex:
X = self.points[self.simplices, 1:-1] - point[1:] / N
# Find the simplex with positive coordinates that sum to
# less than one:
eps = 1e-15
for i, Y in enumerate(X):
try:
x = np.linalg.solve((Y[1:] - Y[:1]).T, -Y[0])
except np.linalg.linalg.LinAlgError:
continue
if (x > -eps).all() and x.sum() < 1 + eps:
break
else:
assert False, X
indices = self.simplices[i]
points = self.points[indices]
scaledcoefs = [1 - x.sum()]
scaledcoefs.extend(x)
# print('scaledcoefs',scaledcoefs)
# print('points[:, -1]',points[:, -1])
energy = np.dot(scaledcoefs, points[:, -1]) # *N
coefs = []
results = []
for coef, s in zip(scaledcoefs, indices):
count, e, name, natoms = self.entries_dict[s]
coef *= N / natoms
coefs.append(coef)
results.append((name, coef, e))
# if self.verbose:
# print_results(results)
e_above_hull = form_enp - energy
return e_above_hull, energy, indices, np.array(coefs)
def get_ehull_all(self):
"""Find energy above hull for all entries."""
info = []
for i in self.entries:
# print('ent',i)
ehull, energy, indices, coefs = self.energy_above_hull(
entry=[i[0], i[1]]
)
info.append([i, ehull])
return info
def plot(self, ax=None, dims=None, show=False):
"""Make 2-d or 3-d plot of datapoints and convex hull.
Default is 2-d for 2- and 3-component diagrams and 3-d for a
4-component diagram.
"""
import matplotlib.pyplot as plt
N = len(self.species)
if dims is None:
if N <= 3:
dims = 2
else:
dims = 3
if ax is None:
projection = None
if dims == 3:
projection = "3d"
from mpl_toolkits.mplot3d import Axes3D
Axes3D # silence pyflakes
fig = plt.figure()
ax = fig.add_subplot(projection=projection)
else:
if dims == 3 and not hasattr(ax, "set_zlim"):
raise ValueError(
"Cannot make 3d plot unless axes projection " "is 3d"
)
if dims == 2:
if N == 2:
self.plot2d2(ax)
elif N == 3:
self.plot2d3(ax)
else:
raise ValueError(
"Can only make 2-d plots for 2 and 3 " "component systems!"
)
else:
if N == 3:
self.plot3d3(ax)
elif N == 4:
self.plot3d4(ax)
else:
raise ValueError(
"Can only make 3-d plots for 3 and 4 " "component systems!"
)
if show:
plt.show()
return ax
def plot2d2(self, ax=None):
"""Get 2D plot."""
x, e = self.points[:, 1:].T
names = [
re.sub(r"(\d+)", r"$_{\1}$", ref[2]) for ref in self.entries_dict
]
hull = self.hull
simplices = self.simplices
xlabel = self.symbols[1]
ylabel = "energy [eV/atom]"
extra = -min(e) / 10
if ax:
for i, j in simplices:
ax.plot(x[[i, j]], e[[i, j]], "-b")
ax.plot(x[hull], e[hull], "sg")
if not self.only_plot_stable:
ax.plot(x[~hull], e[~hull], "or")
if self.only_plot_stable or self.only_label_stable:
x = x[self.hull]
e = e[self.hull]
names = [name for name, h in zip(names, self.hull) if h]
for a, b, name in zip(x, e, names):
if b <= extra:
ax.text(a, b, name, ha="center", va="top")
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_ylim([min(e) - extra, extra])
return (x, e, names, hull, simplices, xlabel, ylabel)
def plot2d3(self, ax=None):
"""Get 2D plot for ternaries."""
x, y = self.points[:, 1:-1].T.copy()
x += y / 2
y *= 3 ** 0.5 / 2
names = [
re.sub(r"(\d+)", r"$_{\1}$", ref[2]) for ref in self.entries_dict
]
hull = self.hull
simplices = self.simplices
if ax:
for i, j, k in simplices:
ax.plot(x[[i, j, k, i]], y[[i, j, k, i]], "-b")
ax.plot(x[hull], y[hull], "og")
if not self.only_plot_stable:
ax.plot(x[~hull], y[~hull], "sr")
if self.only_plot_stable or self.only_label_stable:
x = x[self.hull]
y = y[self.hull]
names = [name for name, h in zip(names, self.hull) if h]
for a, b, name in zip(x, y, names):
ax.text(a, b, name, ha="center", va="top")
ax.set_xticks([])
ax.set_yticks([])
ax.axis("off")
return (x, y, names, hull, simplices)
def plot3d3(self, ax):
"""Get 3D plot for ternaries."""
x, y, e = self.points[:, 1:].T
ax.scatter(x[self.hull], y[self.hull], e[self.hull], c="g", marker="o")
if not self.only_plot_stable:
ax.scatter(
x[~self.hull], y[~self.hull], e[~self.hull], c="r", marker="s"
)
for a, b, c, ref in zip(x, y, e, self.entries_dict):
name = re.sub(r"(\d+)", r"$_{\1}$", ref[2])
ax.text(a, b, c, name, ha="center", va="bottom")
for i, j, k in self.simplices:
ax.plot(
x[[i, j, k, i]], y[[i, j, k, i]], zs=e[[i, j, k, i]], c="b"
)
ax.set_xlim3d(0, 1)
ax.set_ylim3d(0, 1)
ax.view_init(azim=115, elev=30)
ax.set_xlabel(self.symbols[1])
ax.set_ylabel(self.symbols[2])
ax.set_zlabel("energy [eV/atom]")
ax.set_xticks([])
ax.set_yticks([])
ax.axis("off")
def plot3d4(self, ax):
"""Get 3D plot for quaternaries."""
x, y, z = self.points[:, 1:-1].T
a = x / 2 + y + z / 2
b = 3 ** 0.5 * (x / 2 + y / 6)
c = (2 / 3) ** 0.5 * z
ax.scatter(a[self.hull], b[self.hull], c[self.hull], c="g", marker="o")
if not self.only_plot_stable:
ax.scatter(
a[~self.hull], b[~self.hull], c[~self.hull], c="r", marker="s"
)
for x, y, z, ref in zip(a, b, c, self.entries_dict):
name = re.sub(r"(\d+)", r"$_{\1}$", ref[2])
ax.text(x, y, z, name, ha="center", va="bottom")
for i, j, k, w in self.simplices:
ax.plot(
a[[i, j, k, i, w, k, j, w]],
b[[i, j, k, i, w, k, j, w]],
zs=c[[i, j, k, i, w, k, j, w]],
c="b",
)
ax.set_xlim3d(0, 1)
ax.set_ylim3d(0, 1)
ax.set_zlim3d(0, 1)
ax.view_init(azim=115, elev=30)
ax.set_xticks([])
ax.set_yticks([])
ax.axis("off")
def jid_hull(jid="", dataset=[]):
"""Get ehull for a jid and a dataset e.g. dft_3d."""
from jarvis.db.figshare import data
if isinstance(dataset, str):
dataset = data(dataset)
for i in dataset:
if i["jid"] == jid:
system = list(set(i["atoms"]["elements"]))
z = []
for i in dataset:
formula = i["formula"]
comp = Composition.from_string(formula)
# atom_frac = comp.atomic_fraction
all_elms = list(comp.to_dict())
if (set(all_elms)).issubset(set(system)):
z.append([i["formula"], i["formation_energy_peratom"], i["jid"]])
pdj = PhaseDiagram(z)
# pdj.plot()
info = pdj.get_ehull_all()
for i in info:
if i[0][2] == jid:
return i
def formula_hull(formula_energy_id=[], dataset=[]):
"""Get ehull for a formula_energy_id pair and a dataset e.g. dft_3d."""
# e.g. ["Al2O3",-1.0,"JVASP-xyz"]
# for i in dataset:
# if i["jid"] == jid:
# system = list(set(i["atoms"]["elements"]))
from jarvis.db.figshare import data
if isinstance(dataset, str):
dataset = data(dataset)
c = Composition.from_string(formula_energy_id[0])
system = list(c.to_dict().keys())
z = []
z.append(formula_energy_id)
for i in dataset:
formula = i["formula"]
comp = Composition.from_string(formula)
# atom_frac = comp.atomic_fraction
all_elms = list(comp.to_dict())
if (set(all_elms)).issubset(set(system)):
z.append([i["formula"], i["formation_energy_peratom"], i["jid"]])
pdj = PhaseDiagram(z)
# pdj.plot()
info = pdj.get_ehull_all()
for i in info:
if i[0][2] == formula_energy_id[-1]:
return i
|
3102234255ed7eb6e6440947a7dd87ac53e77f51
|
38d1c6a920b3d9534f191fa0bfcd6d7d4625d643
|
/my/instagram/android.py
|
8ebbf9fe891d88dabea301fe8a0f4db59c726628
|
[
"MIT",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
karlicoss/HPI
|
ab87dcbfd8e3af788144990c35e030577227966b
|
c283e542e3457ecd778fb09e54e725d67104a49a
|
refs/heads/master
| 2023-08-31T11:38:23.547022
| 2023-08-24T22:29:14
| 2023-08-24T22:46:23
| 209,134,309
| 1,252
| 65
|
MIT
| 2023-09-07T01:36:27
| 2019-09-17T18:59:05
|
Python
|
UTF-8
|
Python
| false
| false
| 6,357
|
py
|
android.py
|
"""
Bumble data from Android app database (in =/data/data/com.instagram.android/databases/direct.db=)
"""
from __future__ import annotations
from dataclasses import dataclass
from datetime import datetime
import json
from pathlib import Path
from typing import Iterator, Sequence, Optional, Dict, Union
from more_itertools import unique_everseen
from my.core import (
get_files,
Paths,
make_config,
make_logger,
datetime_naive,
Json,
Res,
assert_never,
)
from my.core.cachew import mcachew
from my.core.sqlite import sqlite_connect_immutable, select
from my.config import instagram as user_config
logger = make_logger(__name__)
@dataclass
class instagram_android_config(user_config.android):
# paths[s]/glob to the exported sqlite databases
export_path: Paths
# sadly doesn't seem easy to extract user's own handle/name from the db...
# todo maybe makes more sense to keep in parent class? not sure...
username: Optional[str] = None
full_name: Optional[str] = None
config = make_config(instagram_android_config)
def inputs() -> Sequence[Path]:
return get_files(config.export_path)
@dataclass(unsafe_hash=True)
class User:
id: str
username: str
full_name: str
# todo not sure about order of fields...
@dataclass
class _BaseMessage:
id: str
# NOTE: ffs, looks like they keep naive timestamps in the db (checked some random messages)
created: datetime_naive
text: str
thread_id: str
@dataclass(unsafe_hash=True)
class _Message(_BaseMessage):
user_id: str
# TODO ugh. can't figure out if dms have proper replies?
# reply_to_id: Optional[str]
@dataclass(unsafe_hash=True)
class Message(_BaseMessage):
user: User
# TODO could also extract Thread object? not sure if useful
# reply_to: Optional[Message]
# this is kinda expecrimental
# basically just using RuntimeError(msg_id, *rest) has an unfortunate consequence:
# there are way too many 'similar' errors (on different msg_id)
# however passing msg_id is nice as a means of supplying extra context
# so this is a compromise, the 'duplicate' errors will be filtered out by unique_everseen
class MessageError(RuntimeError):
def __init__(self, msg_id: str, *rest: str) -> None:
super().__init__(msg_id, *rest)
self.rest = rest
def __hash__(self, other):
return hash(self.rest)
def __eq__(self, other) -> bool:
if not isinstance(other, MessageError):
return False
return self.rest == other.rest
def _parse_message(j: Json) -> Optional[_Message]:
id = j['item_id']
t = j['item_type']
tid = j['thread_key']['thread_id']
uid = j['user_id']
created = datetime.fromtimestamp(int(j['timestamp']) / 1_000_000)
text: Optional[str] = None
if t == 'text':
text = j['text']
elif t == 'reel_share':
# TODO include reel_share -> media??
# the problem is that the links are deliberately expired by instagram..
text = j['reel_share']['text']
elif t == 'action_log':
# for likes this ends up as 'Liked a message' or reactions
# which isn't super useful by itself perhaps, but matches GDPR so lets us unify threads better
text = j['action_log']['description']
else:
raise MessageError(id, f"{t} isn't handled yet")
assert text is not None, j
return _Message(
id=id,
created=created,
text=text,
thread_id=tid,
user_id=uid,
# reply_to_id='FIXME',
)
def _entities() -> Iterator[Res[Union[User, _Message]]]:
# NOTE: definitely need to merge multiple, app seems to recycle old messages
# TODO: hmm hard to guarantee timestamp ordering when we use synthetic input data...
# todo use TypedDict?
dbs = inputs()
for f in dbs:
logger.info(f'{f} : processing...')
with sqlite_connect_immutable(f) as db:
# TODO ugh. seems like no way to extract username?
# sometimes messages (e.g. media_share) contain it in message field
# but generally it's not present. ugh
for (self_uid,) in select(('user_id',), 'FROM session', db=db):
yield User(
id=str(self_uid),
full_name=config.full_name or 'USERS_OWN_FULL_NAME',
username=config.full_name or 'USERS_OWN_USERNAME',
)
for (thread_json,) in select(('thread_info',), 'FROM threads', db=db):
j = json.loads(thread_json)
# todo in principle should leave the thread attached to the message?
# since thread is a group of users?
# inviter usually contains our own user
for r in [j['inviter'], *j['recipients']]:
# id disappeared and seems that pk_id is in use now (around december 2022)
uid = r.get('id') or r.get('pk_id')
assert uid is not None
yield User(
id=str(uid), # for some reason it's int in the db
full_name=r['full_name'],
username=r['username'],
)
for (msg_json,) in select(('message',), 'FROM messages ORDER BY timestamp', db=db):
# eh, seems to contain everything in json?
j = json.loads(msg_json)
try:
m = _parse_message(j)
if m is not None:
yield m
except Exception as e:
yield e
@mcachew(depends_on=inputs)
def messages() -> Iterator[Res[Message]]:
id2user: Dict[str, User] = {}
for x in unique_everseen(_entities()):
if isinstance(x, Exception):
yield x
continue
if isinstance(x, User):
id2user[x.id] = x
continue
if isinstance(x, _Message):
try:
user = id2user[x.user_id]
except Exception as e:
yield e
continue
yield Message(
id=x.id,
created=x.created,
text=x.text,
thread_id=x.thread_id,
user=user,
)
continue
assert_never(x)
|
5942aa5ab1a256c7339ee2fd7af967cf6dba77ee
|
820b6af9fd43b270749224bb278e5f714f655ac9
|
/Filters/Modeling/Testing/Python/TestImprintFilter3.py
|
d25b70b6cde0ff07a5a6832854e13312947b9590
|
[
"BSD-3-Clause"
] |
permissive
|
Kitware/VTK
|
49dee7d4f83401efce8826f1759cd5d9caa281d1
|
dd4138e17f1ed5dfe6ef1eab0ff6643fdc07e271
|
refs/heads/master
| 2023-09-01T10:21:57.496189
| 2023-09-01T08:20:15
| 2023-09-01T08:21:05
| 631,615
| 2,253
| 1,243
|
NOASSERTION
| 2023-09-14T07:53:03
| 2010-04-27T15:12:58
|
C++
|
UTF-8
|
Python
| false
| false
| 4,417
|
py
|
TestImprintFilter3.py
|
#!/usr/bin/env python
from vtkmodules.vtkCommonTransforms import vtkTransform
from vtkmodules.vtkFiltersGeneral import vtkTransformPolyDataFilter
from vtkmodules.vtkFiltersModeling import vtkImprintFilter
from vtkmodules.vtkFiltersSources import vtkPlaneSource
from vtkmodules.vtkRenderingCore import (
vtkActor,
vtkPolyDataMapper,
vtkRenderWindow,
vtkRenderWindowInteractor,
vtkRenderer,
)
import vtkmodules.vtkInteractionStyle
import vtkmodules.vtkRenderingFreeType
import vtkmodules.vtkRenderingOpenGL2
# Control the resolution of the test
res = 12
# Create pipeline. Use two plane sources:
# one plane imprints on the other plane.
#
# Also, exercise various permutations of the
# output types.
target = vtkPlaneSource()
target.SetXResolution(res)
target.SetYResolution(res)
target.SetOrigin(0,0,0)
target.SetPoint1(10,0,0)
target.SetPoint2(0,10,0)
plane2 = vtkPlaneSource()
plane2.SetXResolution(2*res)
plane2.SetYResolution(2*res)
plane2.SetOrigin(2.25,2.25,0)
plane2.SetPoint1(7.25,2.25,0)
plane2.SetPoint2(2.25,7.25,0)
xForm = vtkTransform()
xForm.RotateZ(-25)
xForm.Translate(-1.5,1.5,0)
xFormF = vtkTransformPolyDataFilter()
xFormF.SetInputConnection(plane2.GetOutputPort())
xFormF.SetTransform(xForm)
# Output candidate target cells
imp = vtkImprintFilter()
imp.SetTargetConnection(target.GetOutputPort())
imp.SetImprintConnection(xFormF.GetOutputPort())
imp.SetTolerance(0.001)
imp.SetOutputTypeToTargetCells()
imp.Update()
targetMapper = vtkPolyDataMapper()
targetMapper.SetInputConnection(imp.GetOutputPort())
targetActor = vtkActor()
targetActor.SetMapper(targetMapper)
targetActor.GetProperty().SetRepresentationToWireframe()
targetActor.GetProperty().SetColor(0,1,0)
imprintMapper = vtkPolyDataMapper()
imprintMapper.SetInputConnection(xFormF.GetOutputPort())
imprintActor = vtkActor()
imprintActor.SetMapper(imprintMapper)
imprintActor.GetProperty().SetRepresentationToWireframe()
imprintActor.GetProperty().SetColor(1,0,0)
# Output imprinted cells
imp2 = vtkImprintFilter()
imp2.SetTargetConnection(target.GetOutputPort())
imp2.SetImprintConnection(xFormF.GetOutputPort())
imp2.SetTolerance(0.001)
imp2.SetOutputTypeToImprintedCells()
imp2.Update()
targetMapper2 = vtkPolyDataMapper()
targetMapper2.SetInputConnection(imp2.GetOutputPort())
targetActor2 = vtkActor()
targetActor2.SetMapper(targetMapper2)
targetActor2.GetProperty().SetRepresentationToWireframe()
targetActor2.GetProperty().SetColor(0,1,0)
imprintMapper2 = vtkPolyDataMapper()
imprintMapper2.SetInputConnection(xFormF.GetOutputPort())
imprintActor2 = vtkActor()
imprintActor2.SetMapper(imprintMapper2)
imprintActor2.GetProperty().SetRepresentationToWireframe()
imprintActor2.GetProperty().SetColor(1,0,0)
# Output imprint with points projected onto target.
imp3 = vtkImprintFilter()
imp3.SetTargetConnection(target.GetOutputPort())
imp3.SetImprintConnection(xFormF.GetOutputPort())
imp3.SetTolerance(0.001)
imp3.SetOutputTypeToProjectedImprint()
imp3.Update()
imprintMapper3 = vtkPolyDataMapper()
imprintMapper3.SetInputConnection(imp3.GetOutputPort())
imprintActor3 = vtkActor()
imprintActor3.SetMapper(imprintMapper3)
imprintActor3.GetProperty().SetRepresentationToWireframe()
imprintActor3.GetProperty().SetColor(1,0,0)
# Output the imprinted region
imp4 = vtkImprintFilter()
imp4.SetTargetConnection(target.GetOutputPort())
imp4.SetImprintConnection(xFormF.GetOutputPort())
imp4.SetTolerance(0.001)
imp4.SetOutputTypeToImprintedRegion()
imp4.Update()
imprintMapper4 = vtkPolyDataMapper()
imprintMapper4.SetInputConnection(imp4.GetOutputPort())
imprintActor4 = vtkActor()
imprintActor4.SetMapper(imprintMapper4)
imprintActor4.GetProperty().SetRepresentationToWireframe()
imprintActor4.GetProperty().SetColor(1,0,0)
# Create the RenderWindow, Renderer
#
ren1 = vtkRenderer()
ren1.SetViewport(0,0,0.5,0.5)
ren2 = vtkRenderer()
ren2.SetViewport(0.5,0,1,0.5)
ren3 = vtkRenderer()
ren3.SetViewport(0,0.5,0.5,1.0)
ren4 = vtkRenderer()
ren4.SetViewport(0.5,0.5,1,1)
renWin = vtkRenderWindow()
renWin.AddRenderer( ren1 )
renWin.AddRenderer( ren2 )
renWin.AddRenderer( ren3 )
renWin.AddRenderer( ren4 )
renWin.SetSize(400,400)
iren = vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren1.AddActor(imprintActor)
ren1.AddActor(targetActor)
ren2.AddActor(imprintActor2)
ren2.AddActor(targetActor2)
ren3.AddActor(imprintActor3)
ren4.AddActor(imprintActor4)
renWin.Render()
iren.Start()
|
70bd2009e63a2dd60c11b0fbb578ef757f040719
|
975b2d421d3661e6770b601929d5f11d981d8985
|
/msgraph/generated/models/security/container_port_protocol.py
|
20c68474649ffc35b5547b8d6a64244948df0d91
|
[
"MIT"
] |
permissive
|
microsoftgraph/msgraph-sdk-python
|
a7c551b85daadeebf76ec4ae12668664ea639b42
|
27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949
|
refs/heads/main
| 2023-09-03T21:45:27.989672
| 2023-08-31T06:22:18
| 2023-08-31T06:22:18
| 534,665,999
| 135
| 18
|
MIT
| 2023-09-14T11:04:11
| 2022-09-09T14:00:17
|
Python
|
UTF-8
|
Python
| false
| false
| 164
|
py
|
container_port_protocol.py
|
from enum import Enum
class ContainerPortProtocol(str, Enum):
Udp = "udp",
Tcp = "tcp",
Sctp = "sctp",
UnknownFutureValue = "unknownFutureValue",
|
b80fb46e79d1a0bcc7814213bf146aa07a1746d5
|
b7d4fc29e02e1379b0d44a756b4697dc19f8a792
|
/deps/boost/libs/python/test/operators_wrapper.py
|
6c889b0a393d0c04232ad31da2fb946486fdeec5
|
[
"GPL-1.0-or-later",
"MIT",
"BSL-1.0"
] |
permissive
|
vslavik/poedit
|
45140ca86a853db58ddcbe65ab588da3873c4431
|
1b0940b026b429a10f310d98eeeaadfab271d556
|
refs/heads/master
| 2023-08-29T06:24:16.088676
| 2023-08-14T15:48:18
| 2023-08-14T15:48:18
| 477,156
| 1,424
| 275
|
MIT
| 2023-09-01T16:57:47
| 2010-01-18T08:23:13
|
C++
|
UTF-8
|
Python
| false
| false
| 164
|
py
|
operators_wrapper.py
|
from operators_wrapper_ext import *
class D2(vector): pass
d2 = D2()
for lhs in (v,d,d2):
-lhs
for rhs in (v,d,d2):
lhs + rhs
lhs += rhs
|
1594e3b84f80f888b12f89b57079d149b8ff88f3
|
8a87f5b889a9ce7d81421515f06d9c9cbf6ce64a
|
/3rdParty/V8/v7.9.317/tools/testrunner/testproc/shard.py
|
9475ea15f3ded525b93a15f7bb8806bfaa926915
|
[
"Apache-2.0",
"BSD-3-Clause",
"ICU",
"Zlib",
"GPL-1.0-or-later",
"OpenSSL",
"ISC",
"LicenseRef-scancode-gutenberg-2020",
"MIT",
"GPL-2.0-only",
"CC0-1.0",
"BSL-1.0",
"LicenseRef-scancode-autoconf-simple-exception",
"LicenseRef-scancode-pcre",
"Bison-exception-2.2",
"LicenseRef-scancode-public-domain",
"JSON",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-4-Clause",
"Python-2.0",
"LGPL-2.1-or-later",
"bzip2-1.0.6",
"SunPro"
] |
permissive
|
arangodb/arangodb
|
0980625e76c56a2449d90dcb8d8f2c485e28a83b
|
43c40535cee37fc7349a21793dc33b1833735af5
|
refs/heads/devel
| 2023-08-31T09:34:47.451950
| 2023-08-31T07:25:02
| 2023-08-31T07:25:02
| 2,649,214
| 13,385
| 982
|
Apache-2.0
| 2023-09-14T17:02:16
| 2011-10-26T06:42:00
|
C++
|
UTF-8
|
Python
| false
| false
| 1,037
|
py
|
shard.py
|
# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from . import base
# Alphabet size determines the hashing radix. Choosing a prime number prevents
# clustering of the hashes.
HASHING_ALPHABET_SIZE = 2 ** 7 -1
def radix_hash(capacity, key):
h = 0
for character in key:
h = (h * HASHING_ALPHABET_SIZE + ord(character)) % capacity
return h
class ShardProc(base.TestProcFilter):
"""Processor distributing tests between shards.
It hashes the unique test identifiers uses the hash to shard tests.
"""
def __init__(self, myid, shards_count):
"""
Args:
myid: id of the shard within [0; shards_count - 1]
shards_count: number of shards
"""
super(ShardProc, self).__init__()
assert myid >= 0 and myid < shards_count
self._myid = myid
self._shards_count = shards_count
def _filter(self, test):
return self._myid != radix_hash(self._shards_count, test.procid)
|
b494ebd8f2e96a99e67f1fa16c0eda11754e8003
|
e53dd2252d82e19794a7763c7aab015112546001
|
/python/test/base_test.py
|
a0d4efc07d3d78ed0de31b941efafe66f051bce6
|
[
"Apache-2.0"
] |
permissive
|
JohnSnowLabs/spark-nlp
|
5017f90629bf44219db4c4880da168830b63838f
|
438d9e65d791b0fa8914ab250815b29d55895325
|
refs/heads/master
| 2023-08-31T12:31:40.651128
| 2023-08-28T09:19:10
| 2023-08-28T09:19:10
| 104,670,986
| 3,506
| 737
|
Apache-2.0
| 2023-09-14T06:30:18
| 2017-09-24T19:36:44
|
Scala
|
UTF-8
|
Python
| false
| false
| 3,671
|
py
|
base_test.py
|
# Copyright 2017-2022 John Snow Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import pytest
from sparknlp.annotator import *
from sparknlp.base import *
from test.util import SparkContextForTest
"""
----
CREATE THE FOLLOWING SCALA CLASS IN ORDER TO RUN THIS TEST
----
package com.johnsnowlabs.nlp
import com.johnsnowlabs.nlp.annotators.TokenizerModel
import org.apache.spark.ml.PipelineModel
import org.apache.spark.sql.Dataset
class SomeApproachTest(override val uid: String) extends AnnotatorApproach[SomeModelTest] with HasRecursiveFit[SomeModelTest] {
override val description: String = "Some Approach"
override def train(dataset: Dataset[_], recursivePipeline: Option[PipelineModel]): SomeModelTest = {
require(recursivePipeline.isDefined)
require(recursivePipeline.get.stages.length == 2)
require(recursivePipeline.get.stages.last.isInstanceOf[TokenizerModel])
new SomeModelTest()
}
override val inputAnnotatorTypes: Array[String] = Array(AnnotatorType.TOKEN)
override val outputAnnotatorType: AnnotatorType = "BAR"
}
class SomeModelTest(override val uid: String) extends AnnotatorModel[SomeModelTest] with HasRecursiveTransform[SomeModelTest] {
def this() = this("bar_uid")
override def annotate(annotations: Seq[Annotation]): Seq[Annotation] = {
require(recursivePipeline.isDefined)
require(recursivePipeline.get.stages.length == 2)
require(recursivePipeline.get.stages.last.isInstanceOf[TokenizerModel])
Seq.empty
}
override val inputAnnotatorTypes: Array[String] = Array(AnnotatorType.TOKEN)
override val outputAnnotatorType: AnnotatorType = "BAR"
}
"""
class SomeAnnotatorTest(AnnotatorApproach, HasRecursiveFit):
inputAnnotatorTypes = [AnnotatorType.TOKEN]
outputAnnotatorType = AnnotatorType.DOCUMENT
def __init__(self):
super(SomeAnnotatorTest, self).__init__(classname="com.johnsnowlabs.nlp.SomeApproachTest")
def _create_model(self, java_model):
return SomeModelTest(java_model=java_model)
class SomeModelTest(AnnotatorModel, HasRecursiveTransform):
inputAnnotatorTypes = [AnnotatorType.TOKEN]
outputAnnotatorType = AnnotatorType.DOCUMENT
def __init__(self, classname="com.johnsnowlabs.nlp.SomeModelTest", java_model=None):
super(SomeModelTest, self).__init__(
classname=classname,
java_model=java_model
)
@pytest.mark.slow
@pytest.mark.skip(reason="Needs to be Fixed.")
class RecursiveTestSpec(unittest.TestCase):
def setUp(self):
self.data = SparkContextForTest.data
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
tokenizer = Tokenizer() \
.setInputCols(["document"]) \
.setOutputCol("token")
some_annotator = SomeAnnotatorTest() \
.setInputCols(['token']) \
.setOutputCol('baaar')
pipeline = RecursivePipeline().setStages([document_assembler, tokenizer, some_annotator])
model = pipeline.fit(self.data)
RecursivePipelineModel(model).transform(self.data).show()
|
9db5eb860731a033b98f761964b29fa09083883f
|
0e083f405af00029c9ec31849f0f7f81c56844b5
|
/mmdeploy/pytorch/symbolics/hardsigmoid.py
|
27561685ed96b41f6a272b0d66a49cdafd91617c
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmdeploy
|
39b9e7b611caab2c76a6142fcb99f0bf1d92ad24
|
5479c8774f5b88d7ed9d399d4e305cb42cc2e73a
|
refs/heads/main
| 2023-09-01T21:29:25.315371
| 2023-08-31T09:59:29
| 2023-08-31T09:59:29
| 441,467,833
| 2,164
| 605
|
Apache-2.0
| 2023-09-14T10:39:04
| 2021-12-24T13:04:44
|
Python
|
UTF-8
|
Python
| false
| false
| 528
|
py
|
hardsigmoid.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Modified from:
# https://github.com/pytorch/pytorch/blob/9ade03959392e5a90b74261012de1d806cab2253/torch/onnx/symbolic_opset9.py
from mmdeploy.core import SYMBOLIC_REWRITER
@SYMBOLIC_REWRITER.register_symbolic(
'hardsigmoid', is_pytorch=True, arg_descriptors=['v'])
def hardsigmoid__default(g, self):
"""Support export hardsigmoid This rewrite enable export hardsigmoid in
torch<=1.8.2."""
return g.op('HardSigmoid', self, alpha_f=1 / 6)
|
af39d5dc723026056e9f26407339fad324b1eba2
|
4805a71711625735215cc1a773a85712be305b59
|
/Cython/Compiler/AnalysedTreeTransforms.py
|
d4941606ef6f4c1072b5c68dcabe328ac6eaad3a
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
cython/cython
|
0a75b75b7eaf19eeedaaebca9d49adb603e3e8f5
|
6ba3daf319d94058de74e8e7f53f932092b38441
|
refs/heads/master
| 2023-09-04T11:09:56.569277
| 2023-09-04T07:45:47
| 2023-09-04T07:45:47
| 1,099,265
| 8,352
| 1,704
|
Apache-2.0
| 2023-09-14T06:33:34
| 2010-11-21T07:44:20
|
Python
|
UTF-8
|
Python
| false
| false
| 3,834
|
py
|
AnalysedTreeTransforms.py
|
from __future__ import absolute_import
from .Visitor import ScopeTrackingTransform
from .Nodes import StatListNode, SingleAssignmentNode, CFuncDefNode, DefNode
from .ExprNodes import DictNode, DictItemNode, NameNode, UnicodeNode
from .PyrexTypes import py_object_type
from .StringEncoding import EncodedString
from . import Symtab
class AutoTestDictTransform(ScopeTrackingTransform):
# Handles autotestdict directive
excludelist = ['__cinit__', '__dealloc__', '__richcmp__',
'__nonzero__', '__bool__',
'__len__', '__contains__']
def visit_ModuleNode(self, node):
if node.is_pxd:
return node
self.scope_type = 'module'
self.scope_node = node
if not self.current_directives['autotestdict']:
return node
self.all_docstrings = self.current_directives['autotestdict.all']
self.cdef_docstrings = self.all_docstrings or self.current_directives['autotestdict.cdef']
assert isinstance(node.body, StatListNode)
# First see if __test__ is already created
if u'__test__' in node.scope.entries:
# Do nothing
return node
pos = node.pos
self.tests = []
self.testspos = node.pos
test_dict_entry = node.scope.declare_var(EncodedString(u'__test__'),
py_object_type,
pos,
visibility='public')
create_test_dict_assignment = SingleAssignmentNode(pos,
lhs=NameNode(pos, name=EncodedString(u'__test__'),
entry=test_dict_entry),
rhs=DictNode(pos, key_value_pairs=self.tests))
self.visitchildren(node)
node.body.stats.append(create_test_dict_assignment)
return node
def add_test(self, testpos, path, doctest):
pos = self.testspos
keystr = u'%s (line %d)' % (path, testpos[1])
key = UnicodeNode(pos, value=EncodedString(keystr))
value = UnicodeNode(pos, value=doctest)
self.tests.append(DictItemNode(pos, key=key, value=value))
def visit_ExprNode(self, node):
# expressions cannot contain functions and lambda expressions
# do not have a docstring
return node
def visit_FuncDefNode(self, node):
if not node.doc or (isinstance(node, DefNode) and node.fused_py_func):
return node
if not self.cdef_docstrings:
if isinstance(node, CFuncDefNode) and not node.py_func:
return node
if not self.all_docstrings and '>>>' not in node.doc:
return node
pos = self.testspos
if self.scope_type == 'module':
path = node.entry.name
elif self.scope_type in ('pyclass', 'cclass'):
if isinstance(node, CFuncDefNode):
if node.py_func is not None:
name = node.py_func.name
else:
name = node.entry.name
else:
name = node.name
if self.scope_type == 'cclass' and name in self.excludelist:
return node
if self.scope_type == 'pyclass':
class_name = self.scope_node.name
else:
class_name = self.scope_node.class_name
if isinstance(node.entry.scope, Symtab.PropertyScope):
property_method_name = node.entry.scope.name
path = "%s.%s.%s" % (class_name, node.entry.scope.name,
node.entry.name)
else:
path = "%s.%s" % (class_name, node.entry.name)
else:
assert False
self.add_test(node.pos, path, node.doc)
return node
|
3824b5fc5dfbc27a467d3addc5aee0eee7ade2a8
|
59b374ac54ced5e006bd33773de4526112a88a27
|
/src/views/import_export.py
|
7689387bec9667fa827ade4cc524eff236dd4f49
|
[
"MIT"
] |
permissive
|
gabfl/vault
|
318cc53865c9d1786f304cc2f1605332a102b075
|
dbf49f42ae391128399682dfb6910ffd4f8856a1
|
refs/heads/main
| 2023-06-21T21:27:06.959402
| 2023-05-08T23:43:14
| 2023-05-08T23:43:14
| 95,264,151
| 188
| 64
|
MIT
| 2023-09-07T23:56:29
| 2017-06-23T23:47:27
|
Python
|
UTF-8
|
Python
| false
| false
| 4,601
|
py
|
import_export.py
|
# Import/export view
import sys
import json
from tabulate import tabulate
from . import menu, secrets, categories
from ..modules.misc import confirm
from ..modules.carry import global_scope
from ..lib.Encryption import Encryption
"""
Adding import or export formats:
To add an import or export format, you need to add a file format name to
`import_()` or `export()` and create an associated method
called `import_from_[some_format]()` or `export_to[some_format]()`.
The format name must also be added to `src/vault.py` in argparse choices.
If you create a format that can be useful to others, please fork the project
first and submit a merge request!
"""
def import_(format_, path):
"""
Routing to format specific import methods
"""
if format_ == 'json':
return import_from_json(path)
else:
raise ValueError('%s is not a supported file format' % (format_))
def export_(format_, path):
"""
Routing to format specific export methods
"""
if format_ == 'json':
return export_to_json(path)
else:
raise ValueError('%s is not a supported file format' % (format_))
def export_to_json(path):
"""
Export to a Json file
"""
# Ask user to unlock the vault
unlock()
# Create dict of secrets
out = []
for secret in secrets.all():
out.append({
'name': secret.name,
'url': secret.url,
'login': secret.login,
'password': secret.password,
'notes': secret.notes,
'category': categories.get_name(secret.category_id),
})
return save_file(path, json.dumps(out))
def import_from_json(path=None, rows=None):
"""
Import a Json file
"""
# Ask user to unlock the vault (except if its already unlocked in migration)
if not isinstance(global_scope['enc'], Encryption):
unlock()
if not rows: # If importing from a file
# Read content
content = read_file(path)
# Decode json
rows = json.loads(content)
# User view of items
print("The following items will be imported:")
print()
print(to_table(
[[row['name'], row['url'], row['login'], row['category']] for row in rows]))
print()
if confirm('Confirm import?', False):
return import_items(rows)
else:
print("Import cancelled.")
return False
def import_items(rows):
"""
Import items at the following format:
[{'name': '...', 'url': '...', 'login': '...', 'password': '...', 'notes': '...', 'category': '...'}]
"""
for row in rows:
# Set category ID
category_id = None
if row.get('category'):
# Search within existing categories
category_id = categories.get_id(row['category'])
# Or create a new one
if category_id is None:
categories.add(name=row['category'])
category_id = categories.get_id(row['category'])
# Create secret
secrets.add(name=row.get('name'),
url=row.get('url'),
login=row.get('login'),
password=row.get('password'),
notes=row.get('notes'),
category_id=category_id)
print('%d items have been imported.' % len(rows))
return True
def to_table(rows=[]):
"""
Transform rows in a table
"""
if len(rows) > 0:
return tabulate(rows, headers=['Name', 'URL', 'Login', 'Category'])
else:
return 'Empty!'
def read_file(path, mode='r'):
"""
Read an import file and return its content
"""
# Read import file
try:
file = open(path, mode=mode)
fileContent = file.read()
file.close()
return fileContent
except Exception as e:
print("The file `%s` could not be opened." % (path))
print(e)
sys.exit()
def save_file(path, content, mode='w'):
"""
Save exported items to a file
"""
# Save to file
try:
file = open(path, mode)
file.write(content)
file.close()
print("The vault has been exported to the file `%s`." % (path))
except Exception as e:
print("The vault could not be exported to the file `%s`." % (path))
print(e)
return False
return True
def unlock():
"""
Ask user to unlock the vault
"""
# `False` = don't load menu after unlocking
return menu.unlock(redirect_to_menu=False)
|
0fdf16e0d7c5e4b12f1f3fce80e05a08e5bf33cc
|
2481cde6506743565dff2b405a2396daf208ab3e
|
/src/true_coders/migrations/0054_auto_20230528_1859.py
|
28965321df113af9466c3f0329a165c50877ce11
|
[
"Apache-2.0"
] |
permissive
|
aropan/clist
|
4819a3036d179595e4df8c646aff2ed593b9dad3
|
5c805b2af71acee97f993f19d8d4e229f7f5b411
|
refs/heads/master
| 2023-08-31T11:15:17.987776
| 2023-08-27T21:51:14
| 2023-08-27T21:52:16
| 187,111,853
| 276
| 35
|
Apache-2.0
| 2023-09-06T18:42:53
| 2019-05-16T22:57:03
|
Python
|
UTF-8
|
Python
| false
| false
| 803
|
py
|
0054_auto_20230528_1859.py
|
# Generated by Django 3.1.14 on 2023-05-28 18:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('clist', '0103_remove_null_for_problems_times'),
('true_coders', '0053_auto_20230520_2152'),
]
operations = [
migrations.AlterField(
model_name='coderproblem',
name='coder',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='verdicts', to='true_coders.coder'),
),
migrations.AlterField(
model_name='coderproblem',
name='problem',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='verdicts', to='clist.problem'),
),
]
|
aa2af38eb3f03de344649ba95bd5e07a2e8b22c2
|
c2447258284f77943f67f1264f391e22de7603e9
|
/keystoneclient/v3/contrib/federation/saml.py
|
435e45dc695cc4b3f74c8a64fea5c26da9c63963
|
[
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
openstack/python-keystoneclient
|
07c386fafda1262c4c1bc63c0afa1aaaefea1009
|
141787ae8b0db7ac4dffce915e033a78d145d54e
|
refs/heads/master
| 2023-07-04T10:21:12.065327
| 2023-02-17T15:06:59
| 2023-04-27T12:33:24
| 3,030,986
| 118
| 143
|
NOASSERTION
| 2020-10-19T06:09:05
| 2011-12-22T00:50:05
|
Python
|
UTF-8
|
Python
| false
| false
| 2,859
|
py
|
saml.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient import base
SAML2_ENDPOINT = '/auth/OS-FEDERATION/saml2'
ECP_ENDPOINT = '/auth/OS-FEDERATION/saml2/ecp'
class SamlManager(base.Manager):
"""Manager class for creating SAML assertions."""
def create_saml_assertion(self, service_provider, token_id):
"""Create a SAML assertion from a token.
Equivalent Identity API call:
POST /auth/OS-FEDERATION/saml2
:param service_provider: Service Provider resource.
:type service_provider: string
:param token_id: Token to transform to SAML assertion.
:type token_id: string
:returns: SAML representation of token_id
:rtype: string
"""
headers, body = self._create_common_request(service_provider, token_id)
resp, body = self.client.post(SAML2_ENDPOINT, json=body,
headers=headers)
return self._prepare_return_value(resp, resp.text)
def create_ecp_assertion(self, service_provider, token_id):
"""Create an ECP wrapped SAML assertion from a token.
Equivalent Identity API call:
POST /auth/OS-FEDERATION/saml2/ecp
:param service_provider: Service Provider resource.
:type service_provider: string
:param token_id: Token to transform to SAML assertion.
:type token_id: string
:returns: SAML representation of token_id, wrapped in ECP envelope
:rtype: string
"""
headers, body = self._create_common_request(service_provider, token_id)
resp, body = self.client.post(ECP_ENDPOINT, json=body,
headers=headers)
return self._prepare_return_value(resp, resp.text)
def _create_common_request(self, service_provider, token_id):
headers = {'Content-Type': 'application/json'}
body = {
'auth': {
'identity': {
'methods': ['token'],
'token': {
'id': token_id
}
},
'scope': {
'service_provider': {
'id': base.getid(service_provider)
}
}
}
}
return (headers, body)
|
e6118079de6fb684dc38bf6dabd063f3bd87cd01
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/cv/AVA_hpa/src/eval_metrics.py
|
c3329708f8cb044205501559c1e011e8d875674e
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616
| 2023-07-17T11:43:18
| 2023-07-17T11:43:18
| 417,393,380
| 301
| 92
|
Apache-2.0
| 2023-05-17T11:22:28
| 2021-10-15T06:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 12,717
|
py
|
eval_metrics.py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""evaluation metrics"""
import os
from sklearn import metrics
import numpy as np
epsilon = 1e-8
def sklearn_auc_macro(gt, score):
return metrics.roc_auc_score(gt, score, average='macro')
def sklearn_auc_micro(gt, score):
return metrics.roc_auc_score(gt, score, average='micro')
def sklearn_f1_macro(gt, predict):
return metrics.f1_score(gt, predict, average='macro')
def sklearn_f1_micro(gt, predict):
return metrics.f1_score(gt, predict, average='micro')
def np_metrics(gt, predict, score=None, path=None):
"""numpy metrics"""
try:
sk_auc_macro = sklearn_auc_macro(gt, score)
except ValueError:
sk_auc_macro = -1
sk_auc_micro = sklearn_auc_micro(gt, score)
sk_f1_macro = sklearn_f1_macro(gt, predict)
sk_f1_micro = sklearn_f1_micro(gt, predict)
lab_sensitivity = label_sensitivity(gt, predict)
lab_specificity = label_specificity(gt, predict)
ex_subset_acc = example_subset_accuracy(gt, predict)
ex_acc = example_accuracy(gt, predict)
ex_precision = example_precision(gt, predict)
ex_recall = example_recall(gt, predict)
ex_f1 = compute_f1(ex_precision, ex_recall)
lab_acc_macro, lab_acc_macro_list = label_accuracy_macro(
gt, predict, average=False)
lab_precision_macro, lab_precision_macro_list = label_precision_macro(
gt, predict, average=False)
lab_recall_macro, lab_recall_macro_list = label_recall_macro(
gt, predict, average=False)
lab_f1_macro, f1_list, f1_list_mean = label_f1_macro(
gt, predict, average=False)
lab_acc_micro = label_accuracy_micro(gt, predict)
lab_precision_micro = label_precision_micro(gt, predict)
lab_recall_micro = label_recall_micro(gt, predict)
lab_f1_micro = compute_f1(lab_precision_micro, lab_recall_micro)
if not os.path.exists(path):
os.makedirs(path)
with open(os.path.join(path, "eval.txt"), 'a+') as f:
f.write("--------------------------------------------\n")
f.write("example_subset_accuracy: %.4f\n" % ex_subset_acc)
f.write("example_accuracy: %.4f\n" % ex_acc)
f.write("example_precision: %.4f\n" % ex_precision)
f.write("example_recall: %.4f\n" % ex_recall)
f.write("example_f1: %.4f\n" % ex_f1)
f.write("label_accuracy_macro: %.4f\n" % lab_acc_macro)
f.write("label_precision_macro: %.4f\n" % lab_precision_macro)
f.write("label_recall_macro: %.4f\n" % lab_recall_macro)
f.write("label_f1_macro: %.4f\n" % lab_f1_macro)
f.write("label_accuracy_micro: %.4f\n" % lab_acc_micro)
f.write("label_precision_micro: %.4f\n" % lab_precision_micro)
f.write("label_recall_micro: %.4f\n" % lab_recall_micro)
f.write("label_f1_micro: %.4f\n" % lab_f1_micro)
f.write("sk_auc_macro: %.4f\n" % sk_auc_macro)
f.write("sk_auc_micro: %.4f\n" % sk_auc_micro)
f.write("sk_f1_macro: %.4f\n" % sk_f1_macro)
f.write("sk_f1_micro: %.4f\n" % sk_f1_micro)
f.write("lab_sensitivity: %.4f\n" % lab_sensitivity)
f.write("lab_specificity: %.4f\n" % lab_specificity)
f.write("\nlabel_f1_average: %.4f\n" % f1_list_mean)
f.write("label_accuracy_macro: \n")
for i, v in enumerate(lab_acc_macro_list):
f.write("(label:%d,label_accuracy: %.4f)\n" % (i, v))
f.write("label_precious_macro: \n")
for i, v in enumerate(lab_precision_macro_list):
f.write("(label:%d,lab_precision: %.4f)\n" % (i, v))
f.write("label_recall_macro: \n")
for i, v in enumerate(lab_recall_macro_list):
f.write("(label:%d,lab_recall: %.4f)\n" % (i, v))
f.write("label_f1_macro: \n")
for i, v in enumerate(f1_list):
f.write("(label:%d,lab_f1: %.4f)\n" % (i, v))
return sk_f1_macro, sk_f1_micro, sk_auc_macro
def threshold_tensor_batch(predict, base=0.5):
'''make sure at least one label for batch'''
p_max = np.max(predict, axis=1)
pivot = np.ones(p_max.shape) * base
pivot = pivot.astype(np.float32)
threshold = np.minimum(p_max, pivot)
pd_threshold = np.greater_equal(predict, threshold[:, np.newaxis])
return pd_threshold
def compute_f1(precision, recall):
return 2 * precision * recall / (precision + recall + epsilon)
def example_subset_accuracy(gt, predict):
ex_equal = np.all(np.equal(gt, predict), axis=1).astype("float32")
return np.mean(ex_equal)
def example_accuracy(gt, predict):
ex_and = np.sum(np.logical_and(gt, predict), axis=1).astype("float32")
ex_or = np.sum(np.logical_or(gt, predict), axis=1).astype("float32")
return np.mean((ex_and + epsilon) / (ex_or + epsilon))
def example_precision(gt, predict):
ex_and = np.sum(np.logical_and(gt, predict), axis=1).astype("float32")
ex_predict = np.sum(predict, axis=1).astype("float32")
return np.mean((ex_and + epsilon) / (ex_predict + epsilon))
def example_recall(gt, predict):
ex_and = np.sum(np.logical_and(gt, predict), axis=1).astype("float32")
ex_gt = np.sum(gt, axis=1).astype("float32")
return np.mean((ex_and + epsilon) / (ex_gt + epsilon))
def example_f1(gt, predict):
p = example_precision(gt, predict)
r = example_recall(gt, predict)
return (2 * p * r) / (p + r + epsilon)
def _label_quantity(gt, predict):
tp = np.sum(np.logical_and(gt, predict), axis=0)
fp = np.sum(np.logical_and(1 - gt, predict), axis=0)
tn = np.sum(np.logical_and(1 - gt, 1 - predict), axis=0)
fn = np.sum(np.logical_and(gt, 1 - predict), axis=0)
return np.stack([tp, fp, tn, fn], axis=0).astype("float")
def label_accuracy_macro(gt, predict, average=True):
quantity = _label_quantity(gt, predict)
tp_tn = np.add(quantity[0], quantity[2])
tp_fp_tn_fn = np.sum(quantity, axis=0)
if average:
return np.mean((tp_tn + epsilon) / (tp_fp_tn_fn + epsilon))
return np.mean((tp_tn + epsilon) / (tp_fp_tn_fn + epsilon)), (tp_tn + epsilon) / (tp_fp_tn_fn + epsilon)
def label_precision_macro(gt, predict, average=True):
quantity = _label_quantity(gt, predict)
tp = quantity[0]
tp_fp = np.add(quantity[0], quantity[1])
if average:
return np.mean((tp + epsilon) / (tp_fp + epsilon))
return np.mean((tp + epsilon) / (tp_fp + epsilon)), (tp + epsilon) / (tp_fp + epsilon)
def label_recall_macro(gt, predict, average=True):
quantity = _label_quantity(gt, predict)
tp = quantity[0]
tp_fn = np.add(quantity[0], quantity[3])
if average:
return np.mean((tp + epsilon) / (tp_fn + epsilon))
return np.mean((tp + epsilon) / (tp_fn + epsilon)), (tp + epsilon) / (tp_fn + epsilon)
def label_f1_macro(gt, predict, average=True):
p, plist = label_precision_macro(gt, predict, average=False)
r, rlist = label_recall_macro(gt, predict, average=False)
f1_list = (2 * plist * rlist) / (plist + rlist + epsilon)
if average:
return (2 * p * r) / (p + r + epsilon)
return (2 * p * r) / (p + r + epsilon), f1_list, np.mean(f1_list)
def label_accuracy_micro(gt, predict):
quantity = _label_quantity(gt, predict)
sum_tp, sum_fp, sum_tn, sum_fn = np.sum(quantity, axis=1)
return (sum_tp + sum_tn + epsilon) / (
sum_tp + sum_fp + sum_tn + sum_fn + epsilon)
def label_precision_micro(gt, predict):
quantity = _label_quantity(gt, predict)
sum_tp, sum_fp, _, _ = np.sum(quantity, axis=1)
return (sum_tp + epsilon) / (sum_tp + sum_fp + epsilon)
def label_recall_micro(gt, predict):
quantity = _label_quantity(gt, predict)
sum_tp, _, _, sum_fn = np.sum(quantity, axis=1)
return (sum_tp + epsilon) / (sum_tp + sum_fn + epsilon)
def label_f1_micro(gt, predict):
p = label_precision_micro(gt, predict)
r = label_recall_micro(gt, predict)
return (2 * p * r) / (p + r + epsilon)
def label_sensitivity(gt, predict):
return label_recall_micro(gt, predict)
def label_specificity(gt, predict):
quantity = _label_quantity(gt, predict)
_, sum_fp, sum_tn, _ = np.sum(quantity, axis=1)
return (sum_tn + epsilon) / (sum_tn + sum_fp + epsilon)
def single_label_accuracy(gt, predict):
quantity = _label_quantity(gt, predict)
tp_tn = np.add(quantity[0], quantity[2])
tp_fp_tn_fn = np.sum(quantity, axis=0)
return (tp_tn + epsilon) / (tp_fp_tn_fn + epsilon)
def single_label_precision(gt, predict):
quantity = _label_quantity(gt, predict)
tp = quantity[0]
tp_fp = np.add(quantity[0], quantity[1])
return (tp + epsilon) / (tp_fp + epsilon)
def single_label_recall(gt, predict):
quantity = _label_quantity(gt, predict)
tp = quantity[0]
tp_fn = np.add(quantity[0], quantity[3])
return (tp + epsilon) / (tp_fn + epsilon)
def print_metrics(gt, predict):
"""print metrics results"""
ex_subset_acc = example_subset_accuracy(gt, predict)
ex_acc = example_accuracy(gt, predict)
ex_precision = example_precision(gt, predict)
ex_recall = example_recall(gt, predict)
ex_f1 = compute_f1(ex_precision, ex_recall)
lab_acc_macro = label_accuracy_macro(gt, predict)
lab_precision_macro = label_precision_macro(gt, predict)
lab_recall_macro = label_recall_macro(gt, predict)
lab_f1_macro = compute_f1(lab_precision_macro, lab_recall_macro)
lab_acc_micro = label_accuracy_micro(gt, predict)
lab_precision_micro = label_precision_micro(gt, predict)
lab_recall_micro = label_recall_micro(gt, predict)
lab_f1_micro = compute_f1(lab_precision_micro, lab_recall_micro)
print("example_subset_accuracy:", ex_subset_acc)
print("example_accuracy:", ex_acc)
print("example_precision:", ex_precision)
print("example_recall:", ex_recall)
print("example_f1:", ex_f1)
print("label_accuracy_macro:", lab_acc_macro)
print("label_precision_macro:", lab_precision_macro)
print("label_recall_macro:", lab_recall_macro)
print("label_f1_macro:", lab_f1_macro)
print("label_accuracy_micro:", lab_acc_micro)
print("label_precision_micro:", lab_precision_micro)
print("label_recall_micro:", lab_recall_micro)
print("label_f1_micro:", lab_f1_micro)
def write_metrics(gt, predict, path):
"""write metrics results"""
ex_subset_acc = example_subset_accuracy(gt, predict)
ex_acc = example_accuracy(gt, predict)
ex_precision = example_precision(gt, predict)
ex_recall = example_recall(gt, predict)
ex_f1 = compute_f1(ex_precision, ex_recall)
lab_acc_macro = label_accuracy_macro(gt, predict)
lab_precision_macro = label_precision_macro(gt, predict)
lab_recall_macro = label_recall_macro(gt, predict)
lab_f1_macro = compute_f1(lab_precision_macro, lab_recall_macro)
lab_acc_micro = label_accuracy_micro(gt, predict)
lab_precision_micro = label_precision_micro(gt, predict)
lab_recall_micro = label_recall_micro(gt, predict)
lab_f1_micro = compute_f1(lab_precision_micro, lab_recall_micro)
with open(path, 'w') as f:
f.write("example_subset_accuracy: %.4f\n" % ex_subset_acc)
f.write("example_accuracy: %.4f\n" % ex_acc)
f.write("example_precision: %.4f\n" % ex_precision)
f.write("example_recall: %.4f\n" % ex_recall)
f.write("example_f1: %.4f\n" % ex_f1)
f.write("label_accuracy_macro: %.4f\n" % lab_acc_macro)
f.write("label_precision_macro: %.4f\n" % lab_precision_macro)
f.write("label_recall_macro: %.4f\n" % lab_recall_macro)
f.write("label_f1_macro: %.4f\n" % lab_f1_macro)
f.write("label_accuracy_micro: %.4f\n" % lab_acc_micro)
f.write("label_precision_micro: %.4f\n" % lab_precision_micro)
f.write("label_recall_micro: %.4f\n" % lab_recall_micro)
f.write("label_f1_micro: %.4f\n" % lab_f1_micro)
|
a20dabe787f4e754c6bce4f8387af23c734572b8
|
974d04d2ea27b1bba1c01015a98112d2afb78fe5
|
/test/legacy_test/test_is_complex.py
|
f65546da552f05ce558ba1cc05001a68d86f0ea9
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/Paddle
|
b3d2583119082c8e4b74331dacc4d39ed4d7cff0
|
22a11a60e0e3d10a3cf610077a3d9942a6f964cb
|
refs/heads/develop
| 2023-08-17T21:27:30.568889
| 2023-08-17T12:38:22
| 2023-08-17T12:38:22
| 65,711,522
| 20,414
| 5,891
|
Apache-2.0
| 2023-09-14T19:20:51
| 2016-08-15T06:59:08
|
C++
|
UTF-8
|
Python
| false
| false
| 1,248
|
py
|
test_is_complex.py
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
class TestIsComplex(unittest.TestCase):
def test_for_integer(self):
x = paddle.arange(10)
self.assertFalse(paddle.is_complex(x))
def test_for_floating_point(self):
x = paddle.randn([2, 3])
self.assertFalse(paddle.is_complex(x))
def test_for_complex(self):
x = paddle.randn([2, 3]) + 1j * paddle.randn([2, 3])
self.assertTrue(paddle.is_complex(x))
def test_for_exception(self):
with self.assertRaises(TypeError):
paddle.is_complex(np.array([1, 2]))
if __name__ == '__main__':
unittest.main()
|
cbe97b71f9c11c2076b291ff231efda90c1ad685
|
3d063af394b4b55ea49ded7915d0793602015859
|
/python/media/photodedup.py
|
58a2f5598d989775bca4006aa9f098eec0d4ada5
|
[
"Apache-2.0"
] |
permissive
|
ringgaard/sling
|
00edad71195bfe71aa11e2e8dda97109c047e6e5
|
a612c5823954552ba422b441a7c7d57c1a5b4fcb
|
refs/heads/master
| 2023-08-07T15:24:10.569228
| 2023-08-02T12:25:44
| 2023-08-02T12:25:44
| 106,742,468
| 141
| 10
|
Apache-2.0
| 2020-08-03T13:25:36
| 2017-10-12T20:34:28
|
C++
|
UTF-8
|
Python
| false
| false
| 2,323
|
py
|
photodedup.py
|
# Copyright 2021 Ringgaard Research ApS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Find and remove duplicate photos."""
import hashlib
import requests
import sys
import sling
import sling.util
import sling.flags as flags
import sling.media.photo as photo
flags.define("--id",
default=None,
help="Item id photo updates")
flags.define("--all",
help="check all profiles",
default=False,
action="store_true")
flags.define("--checkpoint",
help="file with latest checkpoint for scanning photo db",
default=None,
metavar="FILE")
flags.define("--dryrun",
help="do not update database",
default=False,
action="store_true")
flags.parse()
if flags.arg.all:
# Get id of all profiles changed since last run.
chkpt = sling.util.Checkpoint(flags.arg.checkpoint)
ids = set(photo.photodb().keys(begin=chkpt.checkpoint))
print(len(ids), "profiles to update")
# Check all profiles.
total_removed = 0
profiles_updated = 0
for id in ids:
profile = photo.Profile(id)
removed = profile.dedup()
if removed > 0:
# Write updated profile.
if not flags.arg.dryrun: profile.write()
total_removed += removed
profiles_updated += 1
if flags.arg.dryrun:
print(total_removed, "photos can be removed from",
profiles_updated, "profiles")
else:
chkpt.commit(photo.photodb().epoch())
print(total_removed, "photos removed from", profiles_updated, "profiles")
else:
# Check single profile.
profile = photo.Profile(flags.arg.id)
if profile.isnew:
print("no profile found for", id)
else:
removed = profile.dedup()
if removed > 0 and not flags.arg.dryrun:
# Write updated profile.
profile.write()
|
08588fd0c8835f804831780e35bd31637a2042a5
|
0583610357726c5ce6caadb51851f9ba1bc8ce07
|
/jaeger_client/span.py
|
65ff1611fb4f9676dfe1a937506893bf839e2b83
|
[
"Apache-2.0"
] |
permissive
|
jaegertracing/jaeger-client-python
|
df980511ffa01e56d2d9aca3bb196aff7fe387ab
|
5f68f1171c4156fd05675b6782aac1182058e7f4
|
refs/heads/master
| 2023-08-16T18:09:46.140041
| 2022-07-10T19:17:14
| 2022-07-10T19:17:14
| 56,551,019
| 387
| 165
|
Apache-2.0
| 2022-07-10T19:16:25
| 2016-04-19T00:10:38
|
Python
|
UTF-8
|
Python
| false
| false
| 7,674
|
py
|
span.py
|
# Copyright (c) 2016-2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import time
import logging
from typing import TYPE_CHECKING, Any, Dict, Optional, List
import opentracing
from opentracing.ext import tags as ext_tags
from .tracer import Reference
from . import codecs, thrift
from .constants import SAMPLED_FLAG, DEBUG_FLAG
from .span_context import SpanContext
import jaeger_client.thrift_gen.jaeger.ttypes as ttypes
if TYPE_CHECKING:
from .tracer import Tracer
logger = logging.getLogger('jaeger_tracing')
class Span(opentracing.Span):
"""Implements opentracing.Span."""
__slots__ = ['_tracer', '_context',
'operation_name', 'start_time', 'end_time',
'logs', 'tags', 'finished', 'update_lock']
def __init__(
self,
context: SpanContext,
tracer: 'Tracer',
operation_name: str,
tags: Optional[Dict[str, Any]] = None,
start_time: Optional[float] = None,
references: Optional[List[Reference]] = None
) -> None:
super(Span, self).__init__(context=context, tracer=tracer)
self.operation_name = operation_name
self.start_time = start_time or time.time()
self.end_time: Optional[float] = None
self.finished = False
self.update_lock = threading.Lock()
self.references = references
# we store tags and logs as Thrift objects to avoid extra allocations
self.tags: List[ttypes.Tag] = []
self.logs: List[ttypes.Log] = []
if tags:
for k, v in tags.items():
self.set_tag(k, v)
def set_operation_name(self, operation_name: str) -> 'Span':
"""
Set or change the operation name.
:param operation_name: the new operation name
:return: Returns the Span itself, for call chaining.
"""
with self.update_lock:
self.operation_name = operation_name
return self
def finish(self, finish_time: Optional[float] = None) -> None:
"""Indicate that the work represented by this span has been completed
or terminated, and is ready to be sent to the Reporter.
If any tags / logs need to be added to the span, it should be done
before calling finish(), otherwise they may be ignored.
:param finish_time: an explicit Span finish timestamp as a unix
timestamp per time.time()
"""
if not self.is_sampled():
return
with self.update_lock:
if self.finished:
logger.warning('Span has already been finished; will not be reported again.')
return
self.finished = True
self.end_time = finish_time or time.time()
self.tracer.report_span(self)
def set_tag(self, key: str, value: Any) -> 'Span':
"""
:param key:
:param value:
"""
with self.update_lock:
if key == ext_tags.SAMPLING_PRIORITY and not self._set_sampling_priority(value):
return self
if self.is_sampled():
tag = thrift.make_tag(
key=key,
value=value,
max_length=self.tracer.max_tag_value_length,
max_traceback_length=self._tracer.max_traceback_length,
)
self.tags.append(tag)
return self
def _set_sampling_priority(self, value):
"""
N.B. Caller must be holding update_lock.
"""
# Ignore debug spans trying to re-enable debug.
if self.is_debug() and value:
return False
try:
value_num = int(value)
except ValueError:
return False
if value_num == 0:
self.context.flags &= ~(SAMPLED_FLAG | DEBUG_FLAG)
return False
if self.tracer.is_debug_allowed(self.operation_name):
self.context.flags |= SAMPLED_FLAG | DEBUG_FLAG
return True
return False
def log_kv(self, key_values: Dict[str, Any], timestamp: Optional[float] = None) -> 'Span':
if self.is_sampled():
timestamp = timestamp if timestamp else time.time()
# TODO handle exception logging, 'python.exception.type' etc.
log = thrift.make_log(
timestamp=timestamp if timestamp else time.time(),
fields=key_values,
max_length=self._tracer.max_tag_value_length,
max_traceback_length=self._tracer.max_traceback_length,
)
with self.update_lock:
self.logs.append(log)
return self
def set_baggage_item(self, key: str, value: Optional[str]) -> 'Span':
prev_value = self.get_baggage_item(key=key)
new_context = self.context.with_baggage_item(key=key, value=value)
with self.update_lock:
self._context = new_context
if self.is_sampled():
logs = {
'event': 'baggage',
'key': key,
'value': value,
}
if prev_value:
# TODO add metric for this
logs['override'] = 'true'
self.log_kv(key_values=logs)
return self
def get_baggage_item(self, key: str) -> Optional[str]:
return self.context.baggage.get(key)
def is_sampled(self) -> bool:
return self.context.flags & SAMPLED_FLAG == SAMPLED_FLAG
def is_debug(self) -> bool:
return self.context.flags & DEBUG_FLAG == DEBUG_FLAG
def is_rpc(self) -> bool:
for tag in self.tags:
if tag.key == ext_tags.SPAN_KIND:
return tag.vStr == ext_tags.SPAN_KIND_RPC_CLIENT or \
tag.vStr == ext_tags.SPAN_KIND_RPC_SERVER
return False
def is_rpc_client(self) -> bool:
for tag in self.tags:
if tag.key == ext_tags.SPAN_KIND:
return tag.vStr == ext_tags.SPAN_KIND_RPC_CLIENT
return False
@property
def trace_id(self) -> int:
return self.context.trace_id
@property
def span_id(self) -> int:
return self.context.span_id
@property
def parent_id(self) -> Optional[int]:
return self.context.parent_id
@property
def flags(self) -> int:
return self.context.flags
def __repr__(self) -> str:
c = codecs.span_context_to_string(
trace_id=self.context.trace_id, span_id=self.context.span_id,
parent_id=self.context.parent_id, flags=self.context.flags)
return '%s %s.%s' % (c, self.tracer.service_name, self.operation_name)
def info(self, message, payload=None):
"""DEPRECATED"""
if payload:
self.log(event=message, payload=payload)
else:
self.log(event=message)
return self
def error(self, message, payload=None):
"""DEPRECATED"""
self.set_tag('error', True)
if payload:
self.log(event=message, payload=payload)
else:
self.log(event=message)
return self
|
92950a631f433704740d64746d6e1b636c3e1957
|
578db86c51d44ebddd0dc7b1738985b3dc69eb74
|
/corehq/apps/data_pipeline_audit/management/commands/compare_doc_ids.py
|
8e6db7e612472f2649a1b53f7820c5527da44ae9
|
[
"BSD-3-Clause"
] |
permissive
|
dimagi/commcare-hq
|
a43c7dd32b5f89c89fd5aa1b1359ab7301f4ff6b
|
e7391ddae1af1dbf118211ecb52c83fc508aa656
|
refs/heads/master
| 2023-08-16T22:38:27.853437
| 2023-08-16T19:07:19
| 2023-08-16T19:07:19
| 247,278
| 499
| 203
|
BSD-3-Clause
| 2023-09-14T19:03:24
| 2009-07-09T17:00:07
|
Python
|
UTF-8
|
Python
| false
| false
| 5,805
|
py
|
compare_doc_ids.py
|
from datetime import datetime
from dateutil.relativedelta import relativedelta
from django.core.management.base import BaseCommand, CommandError
from itertools import zip_longest
from corehq.util.argparse_types import date_type
from corehq.apps.data_pipeline_audit.dbacessors import (
get_es_case_counts,
get_es_case_ids,
get_es_case_range,
get_es_form_ids,
get_es_user_ids,
get_primary_db_case_ids,
get_primary_db_form_ids,
)
from corehq.apps.domain.dbaccessors import get_doc_ids_in_domain_by_class
from corehq.apps.users.dbaccessors import (
get_all_user_ids_by_domain,
get_mobile_user_ids,
)
from corehq.apps.users.models import CommCareUser
from corehq.form_processor.models import XFormInstance
from corehq.util.markup import (
CSVRowFormatter,
SimpleTableWriter,
TableRowFormatter,
)
DATE_FORMAT = "%Y-%m-%d"
class Command(BaseCommand):
help = "Print doc IDs that are in the primary DB but not in ES. Use in conjunction with 'raw_doc' view."
def add_arguments(self, parser):
parser.add_argument('domain')
parser.add_argument('doc_type')
parser.add_argument(
'-s',
'--startdate',
dest='start',
type=date_type,
help="The start date. Only applicable to forms and cases on SQL domains. - format YYYY-MM-DD",
)
parser.add_argument(
'-e',
'--enddate',
dest='end',
type=date_type,
help="The end date. Only applicable to forms and cases on SQL domains. - format YYYY-MM-DD",
)
parser.add_argument('--csv', action='store_true', default=False, dest='csv',
help='Write output in CSV format.')
def handle(self, domain, doc_type, **options):
csv = options.get('csv')
startdate = options.get('start')
enddate = options.get('end')
if startdate and enddate and enddate <= startdate:
raise CommandError("enddate must be after startdate")
handlers = {
'CommCareCase': compare_cases,
'CommCareCase-Deleted': compare_cases,
'CommCareUser': _compare_users,
'CommCareUser-Deleted': _compare_users,
'WebUser': _compare_users,
}
handlers.update({doc_type: compare_xforms for doc_type in XFormInstance.DOC_TYPE_TO_STATE})
try:
primary_count, es_count, primary_only, es_only = \
handlers[doc_type](domain, doc_type, startdate, enddate)
except KeyError:
raise CommandError('Unsupported doc type. Use on of: {}'.format(', '.join(handlers)))
if csv:
row_formatter = CSVRowFormatter()
else:
row_formatter = TableRowFormatter([50, 50])
date_range_output = ''
if startdate or enddate:
end = (enddate or datetime.utcnow().date()).strftime(DATE_FORMAT)
start = startdate.strftime(DATE_FORMAT)
date_range_output = ' (Between {} and {})'.format(start, end)
print("\nDoc ID analysis for {}{}\n".format(doc_type, date_range_output))
print("Primary Count: {}".format(primary_count))
print("ES Count: {}\n".format(es_count))
writer = SimpleTableWriter(self.stdout, row_formatter)
writer.write_table(
['Only in Primary', 'Only in ES'],
zip_longest(primary_only, es_only, fillvalue='')
)
def compare_cases(domain, doc_type, startdate, enddate):
hundred_thousand = 100000
case_count = get_es_case_counts(domain, doc_type, startdate, enddate)
if case_count < hundred_thousand:
# small enough domain, so lookup diffs in one go
return _get_diffs(
get_primary_db_case_ids(domain, doc_type, startdate, enddate),
get_es_case_ids(domain, doc_type, startdate, enddate)
)
# large domain, so break up by month
startdate, enddate = get_es_case_range(domain)
primary_count, es_count, primary_ids, es_ids = 0, 0, set(), set()
while startdate <= enddate:
batch_enddate = (startdate + relativedelta(months=1))
pc1, esc1, p1, es1 = _get_diffs(
get_primary_db_case_ids(domain, doc_type, startdate, batch_enddate),
get_es_case_ids(domain, doc_type, startdate, batch_enddate)
)
primary_count = primary_count + pc1
es_count = es_count + esc1
primary_ids = primary_ids.union(p1)
es_ids = es_ids.union(es1)
startdate = batch_enddate
return primary_count, es_count, primary_ids, es_ids
def compare_xforms(domain, doc_type, startdate, enddate):
return _get_diffs(
get_primary_db_form_ids(domain, doc_type, startdate, enddate),
get_es_form_ids(domain, doc_type, startdate, enddate)
)
def _compare_users(domain, doc_type, startdate, enddate):
if startdate or enddate:
raise CommandError("Date filtering not supported for users")
include_web_users = doc_type == 'WebUser'
if not include_web_users and 'Deleted' in doc_type:
# deleted users = all users - non-deleted users
all_mobile_user_ids = set(get_doc_ids_in_domain_by_class(domain, CommCareUser))
non_deleted_mobile_user_ids = get_mobile_user_ids(domain)
couch_count = all_mobile_user_ids - non_deleted_mobile_user_ids
else:
couch_count = set(get_all_user_ids_by_domain(
domain,
include_web_users=include_web_users,
include_mobile_users=not include_web_users)
)
return _get_diffs(
couch_count,
get_es_user_ids(domain, doc_type)
)
def _get_diffs(primary_ids, es_ids):
return len(primary_ids), len(es_ids), primary_ids - es_ids, es_ids - primary_ids
|
4b0f1af9de2bbff580025a054b1101c09fa94ec0
|
e0c29408e6e4a19078a8299a03a575eb5def5cee
|
/tests/test_seedless_procrustes.py
|
67dd34d2f1027b327cf8993b6fabcea1a6aa8bc6
|
[
"MIT"
] |
permissive
|
microsoft/graspologic
|
790e1dfe18743280628b32b4ee946572bbfdd547
|
ba74fdb6e5394268a9fae38f95991cbd585bd99e
|
refs/heads/dev
| 2023-09-01T00:03:34.665845
| 2023-08-07T17:49:04
| 2023-08-07T17:49:04
| 147,768,493
| 226
| 101
|
MIT
| 2023-08-07T17:49:06
| 2018-09-07T04:00:05
|
Python
|
UTF-8
|
Python
| false
| false
| 6,457
|
py
|
test_seedless_procrustes.py
|
# Copyright (c) Microsoft Corporation and contributors.
# Licensed under the MIT License.
import unittest
import numpy as np
from scipy import stats
from graspologic.align import SeedlessProcrustes, SignFlips
class TestSeedlessProcrustes(unittest.TestCase):
def test_bad_kwargs(self):
# type errors for all but initial Q and initial P
with self.assertRaises(TypeError):
SeedlessProcrustes(optimal_transport_lambda="oops")
with self.assertRaises(TypeError):
SeedlessProcrustes(optimal_transport_eps="oops")
with self.assertRaises(TypeError):
SeedlessProcrustes(optimal_transport_num_reps=3.14)
with self.assertRaises(TypeError):
SeedlessProcrustes(iterative_num_reps=3.14)
with self.assertRaises(TypeError):
SeedlessProcrustes(init=["hi", "there"])
# value errors for all but initial Q and initial P
with self.assertRaises(ValueError):
SeedlessProcrustes(optimal_transport_lambda=-0.01)
with self.assertRaises(ValueError):
SeedlessProcrustes(optimal_transport_eps=-0.01)
with self.assertRaises(ValueError):
SeedlessProcrustes(optimal_transport_num_reps=-1)
with self.assertRaises(ValueError):
SeedlessProcrustes(iterative_num_reps=-1)
with self.assertRaises(ValueError):
SeedlessProcrustes(init="hi")
# initial Q and initial P passed when shouldn't be
with self.assertRaises(ValueError):
SeedlessProcrustes(
init="2d",
initial_Q=np.eye(2),
)
with self.assertRaises(ValueError):
SeedlessProcrustes(init="2d", initial_P=np.ones((100, 100)) / 10000)
with self.assertRaises(ValueError):
SeedlessProcrustes(
init="custom",
initial_Q=np.eye(2),
initial_P=np.ones((100, 100)) / 10000,
)
# initial Q and initial P specific values
# pass bad types
with self.assertRaises(TypeError):
SeedlessProcrustes(init="custom", initial_Q="hello there")
with self.assertRaises(TypeError):
SeedlessProcrustes(init="custom", initial_P="hello there")
with self.assertRaises(TypeError):
SeedlessProcrustes(init="custom", initial_Q={"hello": "there"})
with self.assertRaises(TypeError):
SeedlessProcrustes(init="custom", initial_P={"hello": "there"})
# pass non ndim=2 matrices (cuaght by check_array)
with self.assertRaises(ValueError):
SeedlessProcrustes(init="custom", initial_Q=np.eye(5).reshape(5, 5, 1))
with self.assertRaises(ValueError):
SeedlessProcrustes(init="custom", initial_P=np.eye(5).reshape(5, 5, 1))
# pass not an orthogonal matrix as a Q
with self.assertRaises(ValueError):
SeedlessProcrustes(init="custom", initial_Q=np.eye(3)[:-1])
with self.assertRaises(ValueError):
SeedlessProcrustes(init="custom", initial_Q=np.ones((3, 3)))
# pass not a soft assignment matrix as P
with self.assertRaises(ValueError):
SeedlessProcrustes(init="custom", initial_P=np.ones((3, 2)))
def test_bad_datasets(self):
X = np.arange(6).reshape(6, 1)
Y = np.arange(6).reshape(6, 1)
Y_wrong_d = np.arange(12).reshape(6, 2)
# check passing weird stuff as input (caught by us)
with self.assertRaises(TypeError):
aligner = SeedlessProcrustes()
aligner.fit("hello there", Y)
with self.assertRaises(TypeError):
aligner = SeedlessProcrustes()
aligner.fit(X, "hello there")
with self.assertRaises(TypeError):
aligner = SeedlessProcrustes()
aligner.fit({"hello": "there"}, Y)
with self.assertRaises(TypeError):
aligner = SeedlessProcrustes()
aligner.fit(X, {"hello": "there"})
# check passing arrays of weird ndims (caught by check_array)
with self.assertRaises(ValueError):
aligner = SeedlessProcrustes()
aligner.fit(X, Y.reshape(3, 2, 1))
with self.assertRaises(ValueError):
aligner = SeedlessProcrustes()
aligner.fit(X.reshape(3, 2, 1), Y)
# check passing arrays with different dimensions (caught by us)
with self.assertRaises(ValueError):
aligner = SeedlessProcrustes()
aligner.fit(X, Y_wrong_d)
# check passing array with wrong dimensions to transform (caught by us)
with self.assertRaises(ValueError):
aligner = SeedlessProcrustes()
aligner.fit(X, Y)
aligner.transform(Y_wrong_d)
def test_different_inits(self):
np.random.seed(314)
mean = np.ones(3) * 5
cov = np.eye(3) * 0.1
X = stats.multivariate_normal.rvs(mean, cov, 100)
print(X.shape)
Y = stats.multivariate_normal.rvs(mean, cov, 100)
W = stats.ortho_group.rvs(3)
Y = Y @ W
aligner_1 = SeedlessProcrustes(init="2d")
aligner_1.fit(X, Y)
aligner_2 = SeedlessProcrustes(init="sign_flips")
aligner_2.fit(X, Y)
test_sign_flips = SignFlips()
self.assertTrue(
np.all(test_sign_flips.fit(X, Y).Q_ == aligner_2.selected_initial_Q_)
)
aligner_3 = SeedlessProcrustes(init="custom")
aligner_3.fit(X, Y)
self.assertTrue(np.all(np.eye(3) == aligner_3.selected_initial_Q_))
aligner_4 = SeedlessProcrustes(init="custom", initial_Q=-np.eye(3))
aligner_4.fit(X, Y)
self.assertTrue(np.all(-np.eye(3) == aligner_4.selected_initial_Q_))
aligner_5 = SeedlessProcrustes(
init="custom", initial_P=np.ones((100, 100)) / 10000
)
aligner_5.fit(X, Y)
def test_aligning_datasets(self):
np.random.seed(314)
n, d = 250, 2
mean = np.ones(d) * 5
cov = np.ones((d, d)) * 0.02 + np.eye(d) * 0.8
X = stats.multivariate_normal.rvs(mean, cov, n)
Y = np.concatenate([X, X])
W = stats.ortho_group.rvs(d)
Y = Y @ W
aligner = SeedlessProcrustes(init="2d")
Q = aligner.fit(X, Y).Q_
self.assertTrue(np.linalg.norm(Y.mean(axis=0) - (X @ Q).mean(axis=0)) < 0.1)
if __name__ == "__main__":
unittest.main()
|
fe65de04fffa980460cb6260ec249cce0a09f0e0
|
03a7f7a7eb8c16b537b65ec21f465bb0335bc3b8
|
/docs/papers/sc2013/bench/pythran/mandel.py
|
b04b8f26793f96462e05047ef6219f10f8b22c24
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
serge-sans-paille/pythran
|
a0e22af1ac5e1f34f3f29dce36502f4a897b5186
|
d8ab07b4b3b690f50603cb4d08ba303d3af18b90
|
refs/heads/master
| 2023-09-01T16:04:03.289285
| 2023-08-30T09:13:58
| 2023-08-31T08:03:22
| 4,479,494
| 1,882
| 200
|
BSD-3-Clause
| 2023-09-06T20:08:10
| 2012-05-29T08:02:14
|
C++
|
UTF-8
|
Python
| false
| false
| 862
|
py
|
mandel.py
|
#runas mandel(800,0,0, 800)
#pythran export mandel(int, float, float, int)
def mandel(size, x_center, y_center, max_iteration):
import numpy as np
out = np.zeros((size,size))
for i in xrange(size):
"omp parallel for private(j,x,y,a,b,iteration, color_value) schedule(static,5)"
for j in xrange(size):
x,y = ( x_center + 4.0*float(i-size/2)/size,
y_center + 4.0*float(j-size/2)/size
)
a,b = (0.0, 0.0)
iteration = 0
while (a**2 + b**2 <= 4.0 and iteration < max_iteration):
a,b = a**2 - b**2 + x, 2*a*b + y
iteration += 1
if iteration == max_iteration:
color_value = 255
else:
color_value = iteration*10 % 255
out[i,j]=color_value
return out
|
1cec3c2b1ad29d7952730b6ec817eef1c2893b19
|
d1f15554df2d5c0f74ddbcba6e870359841f682b
|
/wagtail/actions/revert_to_page_revision.py
|
a0c07be5afd3c90b00c2062146b6fd7fbb00a707
|
[
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
wagtail/wagtail
|
bd405f89b86e0c625fef0685fd6bfba41cf5cbfc
|
06a7bc6124bf62675c09fbe0a4ed9bbac183e025
|
refs/heads/main
| 2023-09-04T06:22:51.601208
| 2023-09-01T15:22:00
| 2023-09-01T15:22:00
| 16,479,108
| 12,974
| 3,580
|
BSD-3-Clause
| 2023-09-14T10:45:04
| 2014-02-03T12:41:59
|
Python
|
UTF-8
|
Python
| false
| false
| 2,018
|
py
|
revert_to_page_revision.py
|
from django.core.exceptions import PermissionDenied
class RevertToPageRevisionError(RuntimeError):
"""
Raised when the revision revert cannot be performed for data reasons.
"""
pass
class RevertToPageRevisionPermissionError(PermissionDenied):
"""
Raised when the revision revert cannot be performed due to insufficient permissions.
"""
pass
class RevertToPageRevisionAction:
def __init__(
self,
page,
revision,
user=None,
log_action="wagtail.revert",
submitted_for_moderation=False,
approved_go_live_at=None,
changed=True,
clean=True,
):
self.page = page
self.revision = revision
self.user = user
self.log_action = log_action
self.submitted_for_moderation = submitted_for_moderation
self.approved_go_live_at = approved_go_live_at
self.changed = changed
self.clean = clean
def check(self, skip_permission_checks=False):
if self.page.alias_of_id:
raise RevertToPageRevisionError(
"Revisions are not required for alias pages as they are an exact copy of another page."
)
# Permission checks
if (
self.user
and not skip_permission_checks
and not self.page.permissions_for_user(self.user).can_edit()
):
raise RevertToPageRevisionPermissionError(
"You do not have permission to edit this page"
)
def execute(self, skip_permission_checks=False):
self.check(skip_permission_checks=skip_permission_checks)
return self.revision.as_object().save_revision(
previous_revision=self.revision,
user=self.user,
log_action=self.log_action,
submitted_for_moderation=self.submitted_for_moderation,
approved_go_live_at=self.approved_go_live_at,
changed=self.changed,
clean=self.clean,
)
|
244643d9319ca9b72b998837f73a0cbc83e67d45
|
1634f33c5021e8465a695fb5244504e2eeeecff5
|
/kitsune/search/management/commands/es_reindex.py
|
b535c5eea378c129def5da981fd1ebade0511c03
|
[] |
permissive
|
mozilla/kitsune
|
fee4b8598eb01f5b4add00f2f010b45e2a6ca901
|
67ec527bfc32c715bf9f29d5e01362c4903aebd2
|
refs/heads/main
| 2023-09-01T21:41:59.076570
| 2023-08-31T22:34:05
| 2023-08-31T22:34:05
| 489,645
| 1,218
| 697
|
BSD-3-Clause
| 2023-09-14T08:43:19
| 2010-01-26T18:53:57
|
Python
|
UTF-8
|
Python
| false
| false
| 4,894
|
py
|
es_reindex.py
|
from math import ceil
from dateutil.parser import parse as dateutil_parse
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db import connection, reset_queries
from kitsune.search.es_utils import get_doc_types, index_objects_bulk
class Command(BaseCommand):
help = "Reindex ES documents"
def add_arguments(self, parser):
parser.add_argument(
"--limit",
type=str,
dest="limit",
nargs="*",
default="",
help="Limit to specific doc types",
)
parser.add_argument(
"--percentage",
type=float,
default=100,
help="Index a percentage of total documents",
)
parser.add_argument(
"--count",
type=int,
default=None,
help="Index a set number of documents per type (overrides --percentage)",
)
parser.add_argument(
"--sql-chunk-size",
type=int,
default=settings.ES_DEFAULT_SQL_CHUNK_SIZE,
help="Retrieve this number of documents from SQL in each Celery job",
)
parser.add_argument(
"--elastic-chunk-size",
type=int,
default=settings.ES_DEFAULT_ELASTIC_CHUNK_SIZE,
help="Send this number of documents to ElasticSearch in each bulk request",
)
parser.add_argument(
"--timeout",
type=float,
default=settings.ES_BULK_DEFAULT_TIMEOUT,
help="Set the request timeout (in seconds)",
)
parser.add_argument(
"--updated-before",
type=dateutil_parse,
default=None,
help="Only index model instances updated before this date",
)
parser.add_argument(
"--updated-after",
type=dateutil_parse,
default=None,
help="Only index model instances updated after this date",
)
parser.add_argument(
"--print-sql-count",
action="store_true",
help="Print the number of SQL statements executed",
)
def handle(self, *args, **kwargs):
doc_types = get_doc_types()
limit = kwargs["limit"]
if limit:
doc_types = [dt for dt in doc_types if dt.__name__ in limit]
progress_msg = "Indexed {progress} out of {count}"
for dt in doc_types:
self.stdout.write("Reindexing: {}".format(dt.__name__))
model = dt.get_model()
before = kwargs["updated_before"]
after = kwargs["updated_after"]
if before or after:
try:
qs = model.objects_range(before=before, after=after)
except NotImplementedError:
print(
f"{model} hasn't implemeneted an `updated_column_name` property."
"No documents will be indexed of this type."
)
continue
else:
qs = model._default_manager.all()
total = qs.count()
count = kwargs["count"]
percentage = kwargs["percentage"]
if count:
print("Indexing {} documents out of {}".format(count, total))
else:
if percentage < 100:
count = int(total * percentage / 100)
qs = qs[:count]
else:
count = total
print("Indexing {}%, so {} documents out of {}".format(percentage, count, total))
id_list = list(qs.values_list("pk", flat=True))
sql_chunk_size = kwargs["sql_chunk_size"]
# slice the list of ids into chunks of `sql_chunk_size` and send a task to celery
# to process each chunk. we do this so as to not OOM on celery when processing
# tens of thousands of documents
for x in range(ceil(count / sql_chunk_size)):
start = x * sql_chunk_size
end = start + sql_chunk_size
index_objects_bulk.delay(
dt.__name__,
id_list[start:end],
timeout=kwargs["timeout"],
# elastic_chunk_size determines how many documents get sent to elastic
# in each bulk request, the limiting factor here is the performance of
# our elastic cluster
elastic_chunk_size=kwargs["elastic_chunk_size"],
)
if kwargs["print_sql_count"]:
print("{} SQL queries executed".format(len(connection.queries)))
reset_queries()
print(progress_msg.format(progress=min(end, count), count=count))
|
dea3fc72dde713e864c591df5c751646c52c8b2d
|
8eb7e2224cd81cd21fd5b0c4dd54abe85ba07e49
|
/netmiko/no_config.py
|
799fd81a9aeb9b8bae129ff804a84d0e1f225fea
|
[
"MIT"
] |
permissive
|
ktbyers/netmiko
|
f8b980569fd863f0a7bfe28580366339c4bd31ec
|
2e56b40ec639da130471c59dd1f3c93983471e41
|
refs/heads/develop
| 2023-08-30T20:33:05.554926
| 2023-08-29T21:50:45
| 2023-08-29T21:50:45
| 27,283,062
| 3,397
| 1,594
|
MIT
| 2023-09-04T03:04:31
| 2014-11-28T21:42:52
|
Python
|
UTF-8
|
Python
| false
| false
| 844
|
py
|
no_config.py
|
class NoConfig:
"""
Class for platforms that have no config mode.
check_config_mode returns True as the expectation is that configuration commands
can be executed directly. So in your current state, you are in "config mode" i.e.
you can make configuration changes.
If you truly cannot make any configuration changes to device then you should probably
overwrite check_config_mode in the platform specific driver and return False.
"""
def check_config_mode(
self, check_string: str = "", pattern: str = "", force_regex: bool = False
) -> bool:
return True
def config_mode(
self, config_command: str = "", pattern: str = "", re_flags: int = 0
) -> str:
return ""
def exit_config_mode(self, exit_config: str = "", pattern: str = "") -> str:
return ""
|
c7d9ebaad66d5608100a931e2910ffce26c66115
|
f61577e4f63d30def2a6979e76a7478319359163
|
/scripts/gpa_utils.py
|
d5aab8cd2dd77a25aeeeff7d004148b73f509aaa
|
[
"MIT"
] |
permissive
|
GPUOpen-Tools/gpu_performance_api
|
27f3f69392c44afdd2b8cabbe4a0c453c0d28f26
|
376cec1c5e92324f0fcfc573be2a26a129e43945
|
refs/heads/master
| 2023-09-04T05:23:08.614847
| 2023-06-22T22:28:29
| 2023-06-22T22:28:29
| 56,246,835
| 102
| 26
|
MIT
| 2023-03-29T07:28:10
| 2016-04-14T15:01:21
|
C++
|
UTF-8
|
Python
| false
| false
| 5,828
|
py
|
gpa_utils.py
|
#!/usr/bin/python
## Copyright (c) 2019 Advanced Micro Devices, Inc. All rights reserved.
# Utility Python Script for gpa
import sys
import zipfile
import tarfile
import os
import subprocess
import urllib
SHELLARG = False
# The environment variable SHELL is only set for Cygwin or Linux
SHELLTYPE = os.environ.get('SHELL')
if ( SHELLTYPE == None ):
# running on windows under default shell
SHELLARG = True
if sys.version_info.major == 3:
import urllib.request
# Write files to a valid archive
def WriteFileToArchive(archive_handle, file_absolute_path, file_path_in_archive):
print("Adding file to archive %s: %s (as %s)" % (archive_handle, file_absolute_path, file_path_in_archive))
if archive_handle is not None:
if sys.platform == "win32":
archive_handle.write(file_absolute_path, file_path_in_archive)
else:
archive_handle.add(file_absolute_path, file_path_in_archive)
# Creates an archive handle for given archive name
def CreateArchive(archive_file_name):
if sys.platform == "win32":
archive_handle = zipfile.ZipFile(archive_file_name, 'w', zipfile.ZIP_DEFLATED)
else:
archive_handle = tarfile.open(archive_file_name, 'w:gz')
print("Created archive %s at %s" % (archive_handle, archive_file_name))
return archive_handle
# Returns the SHA of the HEAD of the repo on local machine
def GetGitLocalRepoHead(git_local_repo_path):
if os.path.isdir(git_local_repo_path):
# current_dir = os.getcwd()
# os.chdir(git_local_repo_path)
try:
git_process = subprocess.Popen(["git", "-C", git_local_repo_path, \
"rev-list", "-1", "HEAD"], shell=False, stdout=subprocess.PIPE)
except subprocess.CalledProcessError as error:
print("process creation for git rev-list failed")
git_process.wait()
revision = git_process.communicate()[0]
revision_str = revision.decode()
revision_str = revision_str.strip()
# os.chdir(current_dir)
return revision_str
return None
# Downloads file from URL
def Download(source_url, dest_dir, file_name):
# Assuming path is absolute
if False == os.path.isabs(dest_dir):
print("Destination path is not valid")
return False
# clean up path, collapsing any ../ and converting / to \ for Windows
dest_dir = os.path.normpath(dest_dir)
if False == os.path.isdir(dest_dir):
os.makedirs(dest_dir)
dest_path = os.path.join(dest_dir, file_name)
print("Downloading " + file_name + " to " + dest_dir)
if sys.version_info.major < 3:
urllib.urlretrieve(source_url, dest_path)
else:
urllib.request.urlretrieve(source_url, dest_path)
if (os.path.isfile(dest_path)):
print("File Downloaded Successfully")
return True
else:
print("Unable to download file")
return False
def SwitchToBranchOrRef(localrepopath, branch_or_ref):
if os.path.isdir(localrepopath):
currentDir = os.getcwd()
os.chdir(localrepopath)
commandArgs = ["git", "checkout", branch_or_ref]
try:
sys.stdout.flush()
p = subprocess.check_call(commandArgs, shell=SHELLARG)
sys.stdout.flush()
sys.stderr.flush()
os.chdir(currentDir)
except subprocess.CalledProcessError as e:
print ("'git clone' failed with returncode: %d\n" % e.returncode)
os.chdir(currentDir)
sys.stderr.flush()
sys.exit(1)
def CloneGitRepo(remote, branch, target):
target = os.path.normpath(target)
commandArgs = ["git", "clone", remote, target]
try:
sys.stdout.flush()
subprocess.check_call(commandArgs, shell=SHELLARG)
sys.stdout.flush()
sys.stderr.flush()
except subprocess.CalledProcessError as e:
print ("'git clone' failed with returncode: %d\n" % e.returncode)
sys.stderr.flush()
return False
# Use SwitchToBranchOrRef so that both branch names and commit hashes are supported.
if branch is not None:
SwitchToBranchOrRef(target, branch)
return True
# verify a branch exists in a repository.
def VerifyBranch(repo_source, commit):
reqd_commit = commit
branch_found = False
# reqd_commit may be "None". In this case return false to tell the caller to use a different branch.
if reqd_commit is None:
branch_found = False
else:
# Determine if branch exists in repository.
# Assume master branch always exists.
if reqd_commit == "master":
branch_found = True
else:
branch_ref = "refs/heads/" + reqd_commit
ls_remote_cmd = ["git", "ls-remote", repo_source, branch_ref]
try:
sys.stdout.flush()
cmd_output = subprocess.check_output(ls_remote_cmd, shell=SHELLARG)
sys.stdout.flush()
sys.stderr.flush()
except subprocess.CalledProcessError as e:
print("Error attempting to query %s for information about branch %s."%(repo_source, commit))
print("ReturnCode: %d"%e.returncode)
sys.exit(1)
if len(cmd_output) == 0:
print("Git reference %s does not exist in %s."%(commit, repo_source))
branch_found = False
else:
# output is in format "<sha_of_commit> refs/heads/<branch_name>
(commit_sha, commit_ref_dirty) = cmd_output.split(b'\t')
commit_ref = commit_ref_dirty.replace(b'\n', b'')
if commit_ref == branch_ref.encode():
branch_found = True
else:
branch_found = False
return branch_found
|
cbca5db91bbbf79ee18a987901179b44bcefd824
|
a198aa98679ae1fc70388f8376a9a41444040319
|
/deprecated-tools/add-GTFS-to-a-network-dataset/scripts/GenerateStop2StreetConnectors.py
|
bcc70588fb829a5f06f4c11a63df28023f254dd1
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Esri/public-transit-tools
|
8437831328e94fa9fe74469922c204eb6fb74b22
|
47cbc3de67a7b1bf9255e07e88cba7b051db0505
|
refs/heads/master
| 2023-09-05T17:01:23.375328
| 2023-08-30T19:58:50
| 2023-08-30T19:58:50
| 42,553,165
| 155
| 67
|
Apache-2.0
| 2023-08-30T19:58:51
| 2015-09-15T23:38:22
|
Python
|
UTF-8
|
Python
| false
| false
| 15,040
|
py
|
GenerateStop2StreetConnectors.py
|
################################################################################
## Toolbox: Add GTFS to a Network Dataset
## Tool name: 2) Generate Stop-Street Connectors
## Created by: Melinda Morang, Esri, mmorang@esri.com
## Last updated: 2 February 2018
################################################################################
''' This tool snaps the transit stops to the street feature class, generates a
connector line between the original stop location and the snapped stop location,
and adds vertices to the street features at the locations of the snapped stops.
These steps ensure good connectivity in the network dataset. Alternate methods
can be substituted for this step when the user's data contains more information
about how stops should be connected to streets, such as station entrance
locations or station interior geometry.'''
################################################################################
'''Copyright 2018 Esri
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.'''
################################################################################
import os, sqlite3
import arcpy
class CustomError(Exception):
pass
try:
# Get the original overwrite output setting so we can reset it at the end.
OverwriteOutput = arcpy.env.overwriteOutput
# It's okay to overwrite stuff in this tool
arcpy.env.overwriteOutput = True
# Check if they have the necessary license.
# The snap_edit tool requires at least Standard (ArcEditor)
ArcLicense = arcpy.ProductInfo()
if ArcLicense != "ArcEditor" and ArcLicense != "ArcInfo":
message = "To run this tool, you must have the Desktop Standard (ArcEditor) \
or Advanced (ArcInfo) license. Your license type is: %s." % ArcLicense
arcpy.AddError(message)
raise CustomError
# ----- Collect user inputs -----
# Location of FD where network dataset will be created
outFD = arcpy.GetParameterAsText(0)
# Streets feature class
Streets = arcpy.GetParameterAsText(1)
# SQL expression for selecting street features where pedestrians are allowed
SelectExpression = arcpy.GetParameterAsText(2)
# Max distance for which stops will be snapped to streets
snapdist = arcpy.GetParameterAsText(3) # Default: 40m
# Units of snap distance
snapunits = arcpy.GetParameterAsText(4) # Default: meters
outGDB = os.path.dirname(outFD)
# Stops must exist. Check is in tool validation
outStops = os.path.join(outFD, "Stops")
# The SQL database was created in GenerateStopPairs and placed in the GDB. Name should be correct.
SQLDbase = os.path.join(outGDB, "GTFS.sql")
# Output feature classes
outStopsSnapped = os.path.join(outFD, "Stops_Snapped2Streets")
outConnectors = os.path.join(outFD, "Connectors_Stops2Streets")
outStreetsSplit = os.path.join(outFD, "Streets_UseThisOne")
outTempSelection = os.path.join(outFD, "Temp_SelectedStreets")
TempSnappedStops = os.path.join(outGDB, "TempStopsSnapped4Integrate")
# ----- Collect parent_station info -----
parent_stations = {}
where = "location_type = '1'"
with arcpy.da.SearchCursor(outStops, ["Shape@", "stop_id"], where) as cur:
for row in cur:
parent_stations[row[1]] = row[0].firstPoint # Use firstPoint to convert from PointGeometry to Point
# ----- Create a feature class for stops snapped to streets -----
arcpy.AddMessage("Snapping stops to streets network...")
# Create a copy of the original stops FC. We don't want to overwrite it.
arcpy.management.CopyFeatures(outStops, outStopsSnapped)
SR = arcpy.Describe(outStopsSnapped).spatialReference
# ----- Handle parent stations and station entrances -----
# Delete station entrances from Stops - these will only be in the snapped version to connect to streets
# Also make a list of parent stations with entrances
parent_stations_with_entrances = []
where = "location_type = '2'"
with arcpy.da.UpdateCursor(outStops, ["parent_station"], where) as cur:
for row in cur:
parent_stations_with_entrances.append(row[0])
cur.deleteRow()
parent_stations_with_entrances = list(set(parent_stations_with_entrances))
# Remove parent stations with valid entrances from snapped stops. They will be connected to streets through the entrances.
if parent_stations_with_entrances:
where = "location_type = '1'"
with arcpy.da.UpdateCursor(outStopsSnapped, ["stop_id"], where) as cur:
for row in cur:
if row[0] in parent_stations_with_entrances:
cur.deleteRow()
# Remove any stops that have a parent station.
# These should be connected to the parent station and not the street
parent_station_connectors = [] # list of line features
parent_stations_to_delete = []
if parent_stations:
where = "parent_station <> '' and location_type = '0'"
with arcpy.da.UpdateCursor(outStopsSnapped, ["Shape@", "stop_id", "parent_station", "location_type"], where) as cur:
for row in cur:
parent_station_id = row[2]
if parent_station_id not in parent_stations:
# This is a data problem, but we can get around it by just
# snapping the stop to the street instead of the missing parent station
continue
# Generate a straight line between the stop and its parent station
array = arcpy.Array()
array.add(row[0].firstPoint) # Use firstPoint to convert from PointGeometry to Point
array.add(parent_stations[parent_station_id])
polyline = arcpy.Polyline(array, SR)
if polyline.length != 0:
# Keep the line for later when we'll add it to the connectors feature class
parent_station_connectors.append([row[1], polyline, parent_station_id]) # [[stop_id, polyline geometry], [], ...]
else:
# If the stop and parent station are in the same place, don't generate a line because
# this will cause network build errors. Instead, we'll delete the parent_station later.
parent_stations_to_delete.append(parent_station_id)
# Delete this row from the snapped stops because the stop snaps to its parent station and not the street
cur.deleteRow()
parent_stations_to_delete = list(set(parent_stations_to_delete))
# ----- Snap stops to streets -----
# Select only those streets where pedestrians are allowed,
# as specified by the user's SQL expression
if SelectExpression:
SelectionMessage = "Stops will snap only to street features where the \
following is true: " + SelectExpression
arcpy.AddMessage(SelectionMessage)
arcpy.analysis.Select(Streets, outTempSelection, SelectExpression)
# Snap the stops to the streets network, using the snapping tolerance
# specified in the user's input.
snapdisttext = str(snapdist) + " " + snapunits # Eg, "40 meters"
snapenv = [outTempSelection, "EDGE", snapdisttext]
arcpy.edit.Snap(outStopsSnapped, [snapenv])
# Clean up.
arcpy.management.Delete(outTempSelection)
# ----- Generate lines connecting streets with stops -----
arcpy.AddMessage("Creating connector lines between stops and streets...")
# Put Stops and Snapped stops into same scratch FC for input to PointsToLine
outStopsCombined = os.path.join(outGDB, "TempStopswSnapped")
arcpy.management.CopyFeatures(outStops, outStopsCombined)
arcpy.management.Append(outStopsSnapped, outStopsCombined)
# Create Connector lines
arcpy.management.PointsToLine(outStopsCombined, outConnectors, "stop_id")
arcpy.management.AddField(outConnectors, "connector_type", "TEXT")
arcpy.management.CalculateField(outConnectors, "connector_type", '"Direct stop to street connection"', "PYTHON_9.3")
# Clean up.
arcpy.management.Delete(outStopsCombined)
# ----- Generate lines connecting parent stations with their child stops -----
# Delete parent stations that are coincident with stops.
if parent_stations_to_delete:
where = "location_type = '1'"
with arcpy.da.UpdateCursor(outStops, ["stop_id"], where) as cur:
for row in cur:
if row[0] in parent_stations_to_delete:
cur.deleteRow()
# Add connections between child stops and parent stations
if parent_station_connectors:
arcpy.management.AddField(outConnectors, "parent_station", "TEXT")
with arcpy.da.InsertCursor(outConnectors, ["stop_id", "SHAPE@", "parent_station", "connector_type"]) as cur:
for connector in parent_station_connectors:
cur.insertRow(connector + ["Stop to parent station connection"])
# ----- Generate lines connecting parent stations with their street entrances
if parent_stations_with_entrances:
station_entrance_connectors = []
where = "location_type = '2'"
with arcpy.da.UpdateCursor(outStopsSnapped, ["Shape@", "stop_id", "parent_station"], where) as cur:
for row in cur:
parent_station_id = row[2]
# Generate a straight line between the parent station and the street entrance
array = arcpy.Array()
array.add(parent_stations[parent_station_id])
array.add(row[0].firstPoint) # Use firstPoint to convert from PointGeometry to Point
polyline = arcpy.Polyline(array, SR)
if polyline.length == 0:
# If the station entrance and parent station are in the same place, don't generate a line because
# this will cause network build errors. Just delete the entrance because we don't need it.
# This should only happen if the parent station coincidentally falls exactly on top of a street feature
cur.deleteRow()
continue
# Keep the line for later when we'll add it to the connectors feature class
station_entrance_connectors.append([row[1], polyline, parent_station_id]) # [[stop_id, polyline geometry], [], ...]
# Actually add the lines
if station_entrance_connectors:
with arcpy.da.InsertCursor(outConnectors, ["stop_id", "SHAPE@", "parent_station", "connector_type"]) as cur:
for connector in station_entrance_connectors:
cur.insertRow(connector + ["Parent station to street entrance connection"])
# ----- Create and populate the wheelchair_boarding field -----
# Connect to the SQL database
conn = sqlite3.connect(SQLDbase)
c = conn.cursor()
# Determine if wheelchair_boarding is present
c.execute("PRAGMA table_info(stops)")
table_info = c.fetchall()
col_names = []
for col in table_info:
col_names.append(col[1])
if "wheelchair_boarding" in col_names:
arcpy.AddMessage("Handling wheelchair_boarding...")
# Make a dictionary of stop wheelchair_boarding info
GetStopInfoStmt = "SELECT stop_id, wheelchair_boarding, parent_station FROM stops"
c.execute(GetStopInfoStmt)
WheelchairBoarding_dict = {} # {stop_id: wheelchair_boarding}
ParentStation_dict = {} # {stop_id: parent_station}
for stop in c:
WheelchairBoarding_dict[stop[0]] = unicode(stop[1])
ParentStation_dict[stop[0]] = stop[2]
# Add wheelchair_boarding information to each stop-street connector
arcpy.management.AddField(outConnectors, "wheelchair_boarding", "TEXT")
with arcpy.da.UpdateCursor(outConnectors, ["stop_id", "wheelchair_boarding"]) as cur:
for row in cur:
stop_id = row[0]
wheelchair_boarding = WheelchairBoarding_dict[stop_id]
if not wheelchair_boarding or wheelchair_boarding==u'0':
# If there's a parent station, the stop inherits the value
parent_station = ParentStation_dict[stop_id]
if WheelchairBoarding_dict.has_key(parent_station):
wheelchair_boarding = WheelchairBoarding_dict[parent_station]
if wheelchair_boarding:
row[1] = wheelchair_boarding
else:
row[1] = u'0'
cur.updateRow(row)
# ----- Create vertices in steets at locations of snapped stops
arcpy.AddMessage("Creating vertices in streets at location of stops...")
arcpy.AddMessage("(This step might take a while.)")
# Copy snapped stops before running integrate because we don't want to make
# permanent changes to it.
arcpy.management.CopyFeatures(outStopsSnapped, TempSnappedStops)
# Copy the streets to a new FC because we're going to modify them.
arcpy.management.CopyFeatures(Streets, outStreetsSplit)
# Integrate adds vertices in outStreetsSplit at the locations where
# TempSnappedStops fall within the default XY Tolerance. Because the
# snapped stops are directly on top of the streets, neither streets nor
# stops should move at all (though Integrate sometimes causes this to
# happen).
arcpy.management.Integrate([[outStreetsSplit, 1], [TempSnappedStops, 2]])
# Add a pedestrians_allowed field that the user can calculate. It will be automatically used in the pedestrian
# restriction attribute if the user creates their network using the provided template.
arcpy.management.AddField(outStreetsSplit, "pedestrians_allowed", "SHORT")
# Clean up.
arcpy.management.Delete(TempSnappedStops)
arcpy.AddMessage("Finished!")
arcpy.AddMessage("Your stop-street connector feature class is:")
arcpy.AddMessage("- " + outConnectors)
arcpy.AddMessage("Your feature class of stops snapped to streets is:")
arcpy.AddMessage("- " + outStopsSnapped)
arcpy.AddMessage("Your modified streets feature class is:")
arcpy.AddMessage("- " + outStreetsSplit)
except CustomError:
arcpy.AddMessage("Failed to generate stop-street connectors.")
pass
except:
arcpy.AddMessage("Failed to generate stop-street connectors.")
raise
finally:
# Reset the overwrite output to the user's original setting..
arcpy.env.overwriteOutput = OverwriteOutput
|
2eca76ccde0ce48a86c0c51221821e3e98bc41fa
|
872ea32f551c803ac497a38667dc272965246561
|
/tensorflow_transform/tf_utils.py
|
b07c5c33d6b33dadfa6ccda21aa64efc16b24fac
|
[
"Apache-2.0"
] |
permissive
|
tensorflow/transform
|
5c4d74c15e7a13ef0901816dfe35b0901d6cb1da
|
d2bfc2640137324dcad7f7be365e6c851c01f4e9
|
refs/heads/master
| 2023-08-31T21:54:54.222760
| 2023-08-15T22:45:45
| 2023-08-15T22:46:20
| 81,509,390
| 1,030
| 267
|
Apache-2.0
| 2023-08-11T22:57:56
| 2017-02-10T00:36:53
|
Python
|
UTF-8
|
Python
| false
| false
| 72,766
|
py
|
tf_utils.py
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TF utils for computing information over given data."""
import contextlib
import enum
from typing import Callable, Optional, Sequence, Tuple, Union
import tensorflow as tf
from tensorflow_transform import annotators
from tensorflow_transform import common_types
# TODO(b/243513856): Switch to `collections.namedtuple` or `typing.NamedTuple`
# once the Spark issue is resolved.
from tfx_bsl.types import tfx_namedtuple
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.util import object_identity
# pylint: enable=g-direct-tensorflow-import
_AssetFileType = Union[tf.Tensor, str]
_FLOATING_NAN = float('nan')
# Global sentinels used to keep track of the total counts of y
GLOBAL_Y_COUNT_SENTINEL_STRING = b'global_y_count_sentinel'
GLOBAL_Y_COUNT_SENTINEL_INT = tf.int64.limits[1]
# Key for graph collection containing tuple of a key to the eager tensor
# representing asset path and the graph tensor tracking the analyzer in
# `analyzer_nodes.TENSOR_REPLACEMENTS`.
_ASSET_REPLACEMENTS = 'tft_asset_replacements'
# Key for graph collection containing string IDs for vocabulary extra tokens.
_VOCABULARY_RESERVED_TOKENS_IDS = 'tft_vocab_extra_tokens_ids'
# Key for graph collection containing extra tokens to include in a vocabulary.
_VOCABULARY_RESERVED_TOKENS = 'tft_vocab_extra_tokens'
ReducedBatchWeightedCounts = tfx_namedtuple.namedtuple('ReducedBatchCounts', [
'unique_x', 'summed_weights_per_x', 'summed_positive_per_x_and_y',
'counts_per_x'
])
_CompositeTensorRef = tfx_namedtuple.namedtuple('_CompositeTensorRef',
['type_spec', 'list_of_refs'])
def get_values(x: common_types.TensorType) -> tf.Tensor:
"""Extracts values if the given tensor is composite."""
if isinstance(x, tf.SparseTensor):
return x.values
elif isinstance(x, tf.RaggedTensor):
return x.flat_values
else:
return x
def copy_tensors(tensors):
"""Makes deep copies of a dict of tensors.
Makes deep copies (using tf.identity or its equivalent for `CompositeTensor`s)
of the values of `tensors`.
Args:
tensors: A a dict whose keys are strings and values are `Tensors`s or
`CompositeTensor`s.
Returns:
A copy of `tensors` with values replaced by tf.identity applied to the
value, or the equivalent for `CompositeTensor`s.
"""
return {
name: _copy_tensor_or_composite_tensor(tensor)
for name, tensor in tensors.items()
}
def _copy_tensor(tensor):
return tf.identity(tensor, name='{}_copy'.format(tensor.op.name))
def _copy_tensor_or_composite_tensor(tensor):
if isinstance(tensor, composite_tensor.CompositeTensor):
return tf.nest.map_structure(_copy_tensor, tensor, expand_composites=True)
return _copy_tensor(tensor)
def _get_ragged_batch_value_rowids(tensor: tf.RaggedTensor) -> tf.Tensor:
nested_value_rowids = tensor.nested_value_rowids()
result = nested_value_rowids[-1]
for value_rowids in reversed(nested_value_rowids[:-1]):
result = tf.gather(value_rowids, result)
return result
def _make_regex_filter_fn(
x: tf.Tensor,
filter_regex: Optional[str]) -> Callable[[tf.Tensor], tf.Tensor]:
"""Returns a filter function that applies `x`'s mask."""
if filter_regex is None:
return lambda values: values
else:
if x.dtype != tf.string:
raise ValueError('Regex filtering is only possible with string input, '
f'got {x.dtype}')
filter_mask = tf.logical_not(tf.strings.regex_full_match(x, filter_regex))
return lambda values: tf.boolean_mask(values, filter_mask)
def reduce_batch_weighted_counts(
x: common_types.TensorType,
weights: Optional[tf.Tensor] = None,
force: bool = False,
filter_regex: Optional[str] = None) -> ReducedBatchWeightedCounts:
"""Performs batch-wise reduction to produce (possibly weighted) counts.
Args:
x: Input `Tensor` or `CompositeTensor`.
weights: (Optional) Input weights.
force: If True, reduces input tensor without weights to unique elements and
counts.
filter_regex: (Optional) Regex that matches tokens that have to be filtered
out. May only be specified if `x` has string dtype.
Returns:
a named tuple of...
The unique values in x
The sum of the weights for each unique value in x if weights are provided,
else None
"""
if isinstance(x, tf.SparseTensor):
x = x.values
elif isinstance(x, tf.RaggedTensor):
x = x.flat_values
flat_x = tf.reshape(x, [-1])
filter_fn = _make_regex_filter_fn(flat_x, filter_regex)
flat_x = filter_fn(flat_x)
if weights is None:
if force:
unique, _, counts = tf.unique_with_counts(flat_x)
return ReducedBatchWeightedCounts(unique, None, None, counts)
else:
# TODO(b/112916494): Always do batch wise reduction once possible.
return ReducedBatchWeightedCounts(flat_x, None, None, None)
# TODO(b/134075780): Revisit expected weights shape when input is composite.
x, weights = assert_same_shape(x, weights)
weights = filter_fn(tf.reshape(weights, [-1]))
unique_x_values, unique_idx, _ = tf.unique_with_counts(
flat_x, out_idx=tf.int64)
summed_weights_per_x = tf.math.unsorted_segment_sum(
weights, unique_idx, tf.size(input=unique_x_values))
return ReducedBatchWeightedCounts(unique_x_values, summed_weights_per_x, None,
None)
def reduce_batch_weighted_cooccurrences(
x_input: common_types.TensorType,
y_input: tf.Tensor,
weights_input: Optional[tf.Tensor] = None,
extend_with_sentinel_counts: bool = True,
filter_regex: Optional[str] = None) -> ReducedBatchWeightedCounts:
"""Performs batch-wise reduction to produce weighted co-occurrences.
Computes the weighted co-occurrence of each feature value in x, for each value
in the range [0, max(y)). If extend_with_sentinel_counts is true, the return
value will include an additional sentinel token (not in the true vocabulary)
that is used to accumulate the global distribution of y values.
Args:
x_input: Input `Tensor` or `CompositeTensor`.
y_input: Integer `Tensor` with which to compute the co-occurrence with
x_input.
weights_input: (Optional) Weights input `Tensor`.
extend_with_sentinel_counts: If True, the reduced batch will be extended
a sentinel value that accumlate the total distribution of y values. Should
be True except when called recursively with the sentinel value as input.
filter_regex: (Optional) Regex that matches tokens that have to be filtered
out. Can only be specified if `x_input` has string dtype.
Returns:
a namedtuple of...
unique_x_values: the unique values in x
summed_weights_per_x: sum of the weights for each unique value in x
summed_positive_per_x_and_y: If tensor y is provided, the sum of
positive weights for each unique y value, for each unique value in x.
If y tensor is not provided, value is None.
counts_per_x: if y is provided, counts of each of the unique values in x,
otherwise, None.
"""
tf.compat.v1.assert_type(y_input, tf.int64)
# TODO(b/134075780): Revisit expected weights shape when input is sparse.
if isinstance(x_input, tf.SparseTensor):
batch_indices = x_input.indices[:, 0]
# y and densified x should have the same batch dimension.
assert_eq = tf.compat.v1.assert_equal(
tf.shape(y_input)[0], tf.cast(x_input.dense_shape[0], tf.int32))
with tf.control_dependencies([assert_eq]):
y = tf.gather(y_input, batch_indices)
x = x_input.values
elif isinstance(x_input, tf.RaggedTensor):
# Each batch instance in x corresponds to a single value in y.
x_row_indices = _get_ragged_batch_value_rowids(x_input)
assert_compatible = tf.debugging.assert_greater_equal(
tf.shape(y_input, out_type=tf.int64)[0], x_input.bounding_shape(axis=0))
with tf.control_dependencies([assert_compatible]):
x = tf.ensure_shape(x_input.flat_values, [None])
y = tf.gather(y_input, x_row_indices)
else:
y = y_input
x = x_input
if weights_input is None:
weights = tf.ones_like(x, dtype=tf.float32)
else:
x, weights_input = assert_same_shape(x, weights_input)
weights = weights_input
y = _broadcast_to_x_shape(x, y)
x, y = assert_same_shape(x, y)
x = tf.reshape(x, [-1])
filter_fn = _make_regex_filter_fn(x, filter_regex)
x = filter_fn(x)
y = filter_fn(tf.reshape(y, [-1]))
weights = filter_fn(tf.reshape(weights, [-1]))
unique_x_values, unique_idx, unique_count = tf.unique_with_counts(
x, out_idx=tf.int64)
summed_weights_per_x = tf.math.unsorted_segment_sum(
weights, unique_idx, tf.size(input=unique_x_values))
# For each feature value in x, computed the weighted sum positive for each
# unique value in y.
max_y_value = tf.cast(tf.reduce_max(input_tensor=y_input), tf.int64)
max_x_idx = tf.cast(tf.size(unique_x_values), tf.int64)
dummy_index = (max_y_value + 1) * unique_idx + y
summed_positive_per_x_and_y = tf.cast(
tf.math.unsorted_segment_sum(weights, dummy_index,
max_x_idx * (max_y_value + 1)),
dtype=tf.float32)
summed_positive_per_x_and_y = tf.reshape(summed_positive_per_x_and_y,
[max_x_idx, max_y_value + 1])
reduced_batch = ReducedBatchWeightedCounts(
unique_x=unique_x_values,
summed_weights_per_x=summed_weights_per_x,
summed_positive_per_x_and_y=summed_positive_per_x_and_y,
counts_per_x=unique_count)
# Add a sentinel token tracking the full distribution of y values.
if extend_with_sentinel_counts:
reduced_batch = extend_reduced_batch_with_y_counts(reduced_batch, y_input,
weights_input)
return reduced_batch
def extend_reduced_batch_with_y_counts(reduced_batch, y, weights=None):
"""Extend the ReducedBatchWeightedCounts with global counts for y.
This is used to maintain an accurate count of global frequencies of each value
in y. When x is multivalent, the sum over the summed_positive_per_x_and_y
will over-count the occurrence of y. To keep track of the true distribution
of y values, we add a sentinel value that tracks the global counts of each
distinct value in y. This is useful for computing the mutual information
between values in x and y.
Args:
reduced_batch: A ReducedBatchWeightedCounts instance.
y: A `Tensor` representing a batch of y values.
weights: Optional `Tensor` representing a batch of weight values.
Returns:
A new ReducedBatchWeightedCounts instance with sentinel values appended.
"""
# Create a dummy sentinel token that is present in every record.
if reduced_batch.unique_x.dtype.is_integer:
sentinel_values = tf.cast(
tf.fill(tf.shape(y), GLOBAL_Y_COUNT_SENTINEL_INT), tf.int64)
else:
sentinel_values = tf.fill(tf.shape(y), GLOBAL_Y_COUNT_SENTINEL_STRING)
# Computing the batch reduction over this sentinel token will reduce to a
# single sentinel value in sentinel_batch.unique_x, with the
# summed_positive_per_x_and_y thus capturing the total summed positive per
# value in y.
sentinel_batch = reduce_batch_weighted_cooccurrences(
sentinel_values, y, weights, extend_with_sentinel_counts=False)
# Concatenate the sentinel counts with the existing reduced batch.
return ReducedBatchWeightedCounts(
unique_x=tf.concat([reduced_batch.unique_x, sentinel_batch.unique_x],
axis=0),
summed_weights_per_x=tf.concat([
reduced_batch.summed_weights_per_x,
sentinel_batch.summed_weights_per_x
],
axis=0),
summed_positive_per_x_and_y=tf.concat([
reduced_batch.summed_positive_per_x_and_y,
sentinel_batch.summed_positive_per_x_and_y
],
axis=0),
counts_per_x=tf.concat(
[reduced_batch.counts_per_x, sentinel_batch.counts_per_x], axis=0))
def hashable_tensor_or_op(tensor_or_op):
"""Returns a hashable reference to a Tensor if given a Tensor/CompositeTensor.
Use deref_tensor_or_op on the result to get the Tensor (or SparseTensor).
Args:
tensor_or_op: A `tf.Tensor`, `tf.CompositeTensor`, or other type.
Returns:
A hashable representation for the Tensor or CompositeTensor, or the original
value for other types.
"""
if isinstance(tensor_or_op, tf.Tensor):
return tensor_or_op.ref()
if isinstance(tensor_or_op, composite_tensor.CompositeTensor):
return _CompositeTensorRef(
type_spec=tf.type_spec_from_value(tensor_or_op),
list_of_refs=tuple(
hashable_tensor_or_op(component) for component in tf.nest.flatten(
tensor_or_op, expand_composites=True)))
return tensor_or_op
def deref_tensor_or_op(tensor_or_op):
"""Returns a Tensor or CompositeTensor if given a reference, otherwise input.
Args:
tensor_or_op: An output of `hashable_tensor_or_op`.
Returns:
A Tensor, CompositeTensor, or the given tensor_or_op.
"""
if isinstance(tensor_or_op, object_identity.Reference):
return tensor_or_op.deref()
if isinstance(tensor_or_op, _CompositeTensorRef):
return tf.nest.pack_sequence_as(
structure=tensor_or_op.type_spec,
flat_sequence=[
deref_tensor_or_op(component)
for component in tensor_or_op.list_of_refs
],
expand_composites=True)
return tensor_or_op
def _broadcast_to_x_shape(x, y):
"""Broadcasts y to same shape as x as needed.
Args:
x: An input feature.
y: A feature that is either the same shape as x or has the same outer
dimensions as x. If the latter, y is broadcast to the same shape as x.
Returns:
A Tensor that contains the broadcasted feature, y.
"""
# The batch dimension of x and y must be the same, and y must be 1D.
x_shape = tf.shape(input=x)
y_shape = tf.shape(input=y)
assert_eq = tf.compat.v1.assert_equal(x_shape[0], y_shape[0])
with tf.control_dependencies([assert_eq]):
y = tf.identity(y)
rank_delta = tf.rank(x) - tf.rank(y)
target_shape = tf.concat(
[tf.shape(y), tf.ones(rank_delta, dtype=tf.int32)], axis=0)
matched_rank = tf.reshape(y, target_shape)
return tf.broadcast_to(matched_rank, x_shape)
def assert_same_shape(x, y):
"""Asserts two tensors have the same dynamic and static shape.
Args:
x: A `Tensor`.
y: A `Tensor`
Returns:
The elements `x` and `y`, the results must be used in order to ensure that
the dynamic check is executed.
"""
x.shape.assert_is_compatible_with(y.shape)
assert_eq = tf.compat.v1.assert_equal(tf.shape(input=x), tf.shape(input=y))
with tf.control_dependencies([assert_eq]):
return tf.identity(x), tf.identity(y)
# TODO(b/178189903): This is needed because tf.sparse.reduce_* produces a dense
# tensor which loses its original shape information.
def _sparse_reduce_batch_keep_shape(
sparse_reduce_fn: Callable, sparse_tensor: tf.SparseTensor) -> tf.Tensor: # pylint: disable=g-bare-generic
"""Applies a tf.sparse.reduce_* method on the given sparse_tensor."""
result = sparse_reduce_fn(sparse_tensor, axis=0)
result.set_shape(sparse_tensor.get_shape()[1:])
return result
def reduce_batch_count(x: common_types.TensorType,
reduce_instance_dims: bool) -> tf.Tensor:
"""Counts elements in the given tensor.
Args:
x: A `Tensor` or `CompositeTensor`.
reduce_instance_dims: A bool, if True - collapses the batch and instance
dimensions to arrive at a single scalar output. Otherwise, only collapses
the batch dimension and outputs a `Tensor` of the same shape as the input.
Returns:
The element count of `x`. The result is either a scalar if
reduce_instance_dims is True, otherwise a `Tensor` having shape of `x`
without the first (batch) dimension. NaNs and infinite input values are
ignored.
"""
if isinstance(x, tf.SparseTensor):
if reduce_instance_dims:
x = x.values
else:
ones_like = tf.SparseTensor(
indices=x.indices,
values=tf.cast(_is_finite(x.values), tf.int64),
dense_shape=x.dense_shape)
# TODO(b/178189903): Remove this once we no longer lose static shape
# information.
ones_like._dense_shape_default = x._dense_shape_default # pylint: disable=protected-access
return _sparse_reduce_batch_keep_shape(tf.sparse.reduce_sum, ones_like)
elif isinstance(x, tf.RaggedTensor):
if reduce_instance_dims:
x = x.flat_values
else:
finite_mask = tf.cast(_is_finite(x), tf.int64)
return tf.math.reduce_sum(finite_mask, axis=0).to_tensor()
# Exlude NaNs and infinite elements from size calculation. They can only occur
# in tensors with floating data types.
if x.dtype.is_floating:
finite_mask = tf.cast(tf.math.is_finite(x), tf.int64)
return tf.reduce_sum(finite_mask, axis=None if reduce_instance_dims else 0)
if reduce_instance_dims:
return tf.size(input=x)
# Fill a tensor shaped like x except batch_size=1 with batch_size.
x_shape = tf.shape(input=x)
return tf.fill(x_shape[1:], x_shape[0])
def _map_values(
map_function: Callable[[Union[tf.Tensor, tf.RaggedTensor]],
Union[tf.Tensor, tf.RaggedTensor]],
tensor: common_types.ConsistentTensorType,
) -> common_types.ConsistentTensorType:
values = tensor if isinstance(tensor, tf.Tensor) else tensor.values
result = map_function(values)
if not isinstance(tensor, tf.Tensor):
return tensor.with_values(result)
else:
return result
def maybe_format_vocabulary_input(
x: common_types.ConsistentTensorType,
) -> common_types.ConsistentTensorType:
if x.dtype == tf.string:
# b/62379925: This is a workaround to allow tokens to contain spaces when
# store_frequency=True, which should eventaully be removed.
def map_spaces(t: Union[tf.Tensor, tf.RaggedTensor]
) -> Union[tf.Tensor, tf.RaggedTensor]:
return tf.strings.regex_replace(t, ' ', '__SPACE__')
return _map_values(map_spaces, x)
return x
def _to_string(x: common_types.TensorType) -> common_types.TensorType:
"""Converts values in the given `Tensor` or `CompositeTensor` to strings."""
if x.dtype is tf.string:
return x
return _map_values(tf.strings.as_string, x)
def reduce_batch_count_per_key(
key: common_types.TensorType) -> Tuple[tf.Tensor, tf.Tensor]:
"""Computes per-key counts in the given tensor.
Args:
key: A `Tensor` or `CompositeTensor`.
Returns:
A 2-tuple containing the tensor's (key_vocab, count_per_key).
"""
key = _to_string(key)
if isinstance(key, tf.SparseTensor):
key = key.values
elif isinstance(key, tf.RaggedTensor):
key = key.flat_values
key.set_shape([None])
unique = tf.unique_with_counts(key, out_idx=tf.int64)
return unique.y, unique.count
def reorder_histogram(bucket_vocab: tf.Tensor, counts: tf.Tensor,
boundary_size: int) -> tf.Tensor:
"""Return the histogram counts in indexed order, and zero out missing values.
The count_elements analyzer returns counts in alphanumeric order, only for the
values that are present. To construct a well-formed histogram, we need to
rearrange them in numerical order, and fill in the missing values.
Ex: The data contains values in the following form: [0, 1, 0, 1, 0, 3, 0, 1]
bucket_indices happen to be the same as these values, and
count_elements(tf.strings.as_string(bucket_indices)) returns:
bucket_vocab=['1', '3', '0'],
counts=[3, 1, 4]
If boundaries=[0, 1, 2, 3, 4], we expect counts=[4, 3, 0, 1, 0],
which this function will return.
Args:
bucket_vocab: A `Tensor` that names the buckets corresponding to the count
information returned.
counts: A `Tensor` that matches the bucket_vocab.
boundary_size: A scalar that provides information about how big the returned
counts should be.
Returns:
counts: A `Tensor` of size boundary_size corresponding to counts of all
available buckets.
"""
if bucket_vocab.dtype == tf.string:
bucket_vocab = tf.strings.to_number(bucket_vocab, tf.int32)
# counts/bucket_vocab may be out of order and missing values (empty buckets).
ordering = tf.argsort(
tf.concat([bucket_vocab,
tf.sets.difference([tf.range(boundary_size)],
[bucket_vocab]).values], axis=-1))
counts = tf.pad(counts, [[0, boundary_size - tf.size(counts)]])
return tf.gather(counts, ordering)
# Used to decide which bucket boundary index to assign to a value.
class Side(enum.Enum):
RIGHT = 'right'
LEFT = 'left'
def assign_buckets(x: tf.Tensor,
bucket_boundaries: tf.Tensor,
side: Side = Side.LEFT) -> tf.Tensor:
"""Assigns every value in x to a bucket index defined by bucket_boundaries.
Note that `x` and `bucket_boundaries` will be cast to a common type that can
hold the largest of values.
Args:
x: a `Tensor` of values to be bucketized.
bucket_boundaries: The bucket boundaries `Tensor`. Note that the boundaries
are going to be flattened.
side: Controlls index of a bucket that is being assigned: LEFT means that
a value is going to be assigned index of the rightmost boundary such that
boundary <= value; RIGHT means that a value is assigned index of the
leftmost boundary such that value < boundary.
Returns:
A `Tensor` of dtype int64 with the same shape as `x`, and each element in
the returned tensor representing the bucketized value. Bucketized value is
in the range [0, len(bucket_boundaries)].
"""
with tf.compat.v1.name_scope(None, 'assign_buckets'):
flat_x = tf.reshape(x, [-1])
flat_boundaries = tf.reshape(bucket_boundaries, [-1])
# Cast values or boundaries to the "largest" dtype to avoid truncating
# larger values and avoid casting if dtypes are the same.
if flat_x.dtype.max > flat_boundaries.dtype.max:
flat_boundaries = tf.cast(flat_boundaries, flat_x.dtype)
else:
flat_x = tf.cast(flat_x, flat_boundaries.dtype)
if side == Side.LEFT:
# Ignore the last boundary to replicate behavior of the previously used
# `BoostedTreesBucketize` for backwards compatibility.
flat_boundaries = flat_boundaries[:-1]
buckets = tf.searchsorted(
flat_boundaries, flat_x, side=side.value, out_type=tf.int64)
return tf.reshape(buckets, tf.shape(x))
# TODO(b/62379925): Remove this once all supported TF versions have
# tf.data.experimental.DatasetInitializer.
class _DatasetInitializerCompat(
getattr(tf.data.experimental, 'DatasetInitializer',
getattr(tf.lookup.experimental, 'DatasetInitializer', object))):
"""Extends DatasetInitializer when possible and registers the init_op."""
def __init__(self, *args, **kwargs):
if self.__class__.mro()[1] == object:
raise NotImplementedError(
'Cannot create a DatasetInitializer with this version of TF: {}'
.format(tf.__version__))
super().__init__(*args, **kwargs)
def initialize(self, table):
init_op = super().initialize(table)
collection_ref = tf.compat.v1.get_collection_ref(
tf.compat.v1.GraphKeys.TABLE_INITIALIZERS)
if init_op not in collection_ref:
collection_ref.append(init_op)
return init_op
def _make_vocab_entry_to_dtype_fn(dtype):
def vocab_entry_to_dtype(key):
return key if dtype is tf.string else tf.strings.to_number(
key, out_type=dtype)
return vocab_entry_to_dtype
def _make_tfrecord_vocabulary_dataset(vocab_path,
key_dtype=tf.string,
value_dtype=tf.int64,
return_indicator_as_value=False,
has_indicator=False):
"""Makes a (key, value) dataset from a compressed tfrecord file."""
if not (value_dtype.is_floating or value_dtype.is_integer):
raise ValueError('value_dtype must be numeric. Got: %s' % value_dtype)
dataset = tf.data.TFRecordDataset(vocab_path, compression_type='GZIP')
key_dtype_fn = _make_vocab_entry_to_dtype_fn(key_dtype)
value_dtype_fn = _make_vocab_entry_to_dtype_fn(value_dtype)
if return_indicator_as_value:
assert has_indicator
def convert_dtype(k, v):
return key_dtype_fn(k), value_dtype_fn(v)
return dataset.map(
_split_vocabulary_entries,
num_parallel_calls=tf.data.experimental.AUTOTUNE).map(convert_dtype)
else:
if has_indicator:
drop_indicator = lambda k, v: k
dataset = dataset.map(
_split_vocabulary_entries,
num_parallel_calls=tf.data.experimental.AUTOTUNE).map(drop_indicator)
def convert_dtype_and_swap(v, k):
return key_dtype_fn(k), tf.cast(v, value_dtype)
return dataset.enumerate().map(convert_dtype_and_swap)
def make_tfrecord_vocabulary_lookup_initializer(filename_tensor,
key_dtype=tf.string,
value_dtype=tf.int64,
return_indicator_as_value=False,
has_indicator=False):
"""Makes a lookup table initializer from a compressed tfrecord file."""
with contextlib.ExitStack() as stack:
# If filename_tensor is a graph tensor (e.g. temporary analyzer output), the
# following operation cannot be lifted to init scope. Hence, check it is an
# eager tensor or a string constant.
if (tf.inside_function() and
isinstance(filename_tensor, (ops.EagerTensor, str))):
# Lift the dataset creation out of graph construction to avoid
# repeated initialization in TF2.
stack.enter_context(tf.init_scope())
dataset = _make_tfrecord_vocabulary_dataset(filename_tensor, key_dtype,
value_dtype,
return_indicator_as_value,
has_indicator)
if tf.inside_function():
annotators.track_object(dataset, name=None)
return _DatasetInitializerCompat(dataset)
def _split_vocabulary_entries(batched_vocab_lines):
"""Splits vocabulary entries separated by a single space.
Vocabulary entries that include indicators are formatted as:
"<indicator><single space><key>"
Args:
batched_vocab_lines: A possible batched string tensor.
Returns:
A pair of (indicator, key) tensors.
"""
# Setting maxsplit=1 allows the vocabulary entries to include space
# characters.
split = tf.strings.split(batched_vocab_lines, sep=' ', maxsplit=1)
if isinstance(split, tf.RaggedTensor):
split_tensor = split.to_tensor()
return split_tensor[:, 1], split_tensor[:, 0]
else:
return split[1], split[0]
def apply_per_key_vocabulary(per_key_filename: tf.Tensor,
key: tf.Tensor,
default_value: Optional[str] = None,
target_ndims: Optional[int] = None) -> tf.Tensor:
"""Apply a stored key-value mapping to a set of keys.
We expect the values stored in per_key_filename to be two comma-delimited
numbers, such that it has the following form:
a 1,3
b 2,4
if a and b are the keys corresponding to each row.
Args:
per_key_filename: The file name for the per-key vocabulary file.
key: A `Tensor` of dtype tf.string, which will determine which values are
returned.
default_value: (Optional) A string that determines the default output for
keys that are not found.
target_ndims: (Optional) The requested rank of each returned value (wrapped
in a single Tensor).
Returns:
A `Tensor` representing the mapped values of shape [None, 2, ...], where
extra dimensions are added according to `target_dims`.
If no default value is given, maps oov keys to [0, 0].
"""
if default_value is None:
default_value = '0,0'
def _construct_table(asset_filepath):
initializer = tf.lookup.TextFileInitializer(
asset_filepath,
key_dtype=tf.string,
key_index=1,
value_dtype=tf.string,
value_index=0,
delimiter=' ')
return tf.lookup.StaticHashTable(initializer, default_value=default_value)
table_lookup, unused_table_size = construct_and_lookup_table(
_construct_table, per_key_filename, key)
sparse_result = tf.compat.v1.strings.split(table_lookup, sep=',')
dense_result = tf.sparse.to_dense(sparse_result, '0')
# Add 0s where dense_result has empty strings.
number_strings = tf.where(
tf.strings.length(dense_result) > 0, dense_result,
tf.fill(tf.shape(dense_result), '0'))
numbers = tf.strings.to_number(number_strings)
# We add 1 to represent the dimension of the multiple associated values found
# in the vocabulary file (the d values present for every key).
return numbers if not target_ndims else _align_dims(numbers, target_ndims + 1)
def _is_finite(x: common_types.TensorType) -> common_types.TensorType:
"""Extension of `tf.math.is_finite` that works with all dtypes."""
if x.dtype.is_floating:
return tf.math.is_finite(x)
return tf.ones_like(x, dtype=tf.bool)
def _reduce_batch_count_mean_and_var_sparse(
x: tf.SparseTensor) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
"""Computes elementwise count, mean and var for the given sparse tensor."""
x_count = tf.cast(reduce_batch_count(x, reduce_instance_dims=False), x.dtype)
finite_x = tf.SparseTensor(
indices=x.indices,
values=tf.where(_is_finite(x.values), x.values, tf.zeros_like(x.values)),
dense_shape=x.dense_shape)
x_sum = _sparse_reduce_batch_keep_shape(tf.sparse.reduce_sum, finite_x)
x_mean = tf.math.divide_no_nan(x_sum, x_count)
x_minus_mean = tf.sparse.add(finite_x, -tf.broadcast_to(x_mean, tf.shape(x)))
x_minus_mean_sparse = tf.SparseTensor(x.indices,
tf.gather_nd(x_minus_mean, x.indices),
x.dense_shape)
sum_of_squares = tf.math.reduce_sum(
tf.square(tf.sparse.to_dense(x_minus_mean_sparse)), axis=0)
x_variance = tf.math.divide_no_nan(sum_of_squares, x_count)
return (x_count, x_mean, x_variance)
def _reduce_batch_count_mean_and_var_ragged(
x: tf.RaggedTensor) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
"""Computes elementwise count, mean and var for the given ragged tensor."""
zeros_like_x = tf.zeros_like(x)
x_is_finite = _is_finite(x)
x_sum = tf.reduce_sum(tf.where(x_is_finite, x, zeros_like_x), axis=0)
dense_x_count = tf.cast(
reduce_batch_count(x, reduce_instance_dims=False), x.dtype)
x_count = tf.RaggedTensor.from_tensor(
dense_x_count, lengths=x_sum.nested_row_lengths())
x_mean = tf.math.divide_no_nan(x_sum, x_count).to_tensor()
dense_x = x.to_tensor()
dense_x_is_finite = _is_finite(dense_x)
x_minus_mean = tf.where(dense_x_is_finite, dense_x - x_mean,
tf.zeros_like(dense_x))
x_minus_mean = tf.RaggedTensor.from_tensor(
x_minus_mean, lengths=x.nested_row_lengths())
sum_of_squares = tf.reduce_sum(input_tensor=tf.square(x_minus_mean), axis=0)
x_variance = tf.math.divide_no_nan(sum_of_squares, x_count)
return (dense_x_count, x_mean, x_variance.to_tensor())
def _reduce_batch_count_mean_and_var_dense(
x: tf.Tensor,
reduce_instance_dims: bool) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
"""Computes count, mean and var for the given dense tensor."""
axis = None if reduce_instance_dims else 0
x_count = tf.cast(reduce_batch_count(x, reduce_instance_dims), x.dtype)
zeros_like_x = tf.zeros_like(x)
x_is_finite = _is_finite(x)
x_sum = tf.reduce_sum(tf.where(x_is_finite, x, zeros_like_x), axis=axis)
x_mean = tf.math.divide_no_nan(x_sum, x_count)
x_minus_mean = tf.where(x_is_finite, x - x_mean, zeros_like_x)
sum_of_squares = tf.reduce_sum(
input_tensor=tf.square(x_minus_mean), axis=axis)
x_variance = tf.math.divide_no_nan(sum_of_squares, x_count)
return (x_count, x_mean, x_variance)
def reduce_batch_count_mean_and_var(
x: common_types.TensorType,
reduce_instance_dims: bool) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
"""Computes element count, mean and var for the given tensor.
Args:
x: A `Tensor` or `CompositeTensor`.
reduce_instance_dims: A bool, if True - collapses the batch and instance
dimensions to arrive at a single scalar output. Otherwise, only
collapses the batch dimension and outputs a `Tensor` of the same shape
as the input.
Returns:
A 3-tuple containing the tensor's (count, mean, var). NaNs and infinite
input values are ignored.
"""
if isinstance(x, tf.SparseTensor):
if reduce_instance_dims:
return _reduce_batch_count_mean_and_var_dense(
x.values, reduce_instance_dims=True)
else:
return _reduce_batch_count_mean_and_var_sparse(x)
elif isinstance(x, tf.RaggedTensor):
if reduce_instance_dims:
return _reduce_batch_count_mean_and_var_dense(
x.flat_values, reduce_instance_dims=True)
else:
return _reduce_batch_count_mean_and_var_ragged(x)
else:
return _reduce_batch_count_mean_and_var_dense(x, reduce_instance_dims)
def _num_terms_and_factors(num_samples, dtype):
"""Computes counts and sample multipliers for the given number of samples.
Args:
num_samples: An integral type scalar `Tensor` containing the number of
samples used to compute the L-moments. This must be non-negative.
dtype: The dtype of the samples to process. This determines the output
`Tensor`s dtype.
Returns:
The tuple (current_samples, current_pairs, current_triplets,
current_quadruplets, l1_factors, l2_factors, l3_factors, l4_factors).
Entries are `Tensor`s with the given dtype containing counters for each
moment and the factors to use to compute the moments.
"""
has_pairs = tf.math.greater(num_samples, 1)
has_triplets = tf.math.greater(num_samples, 2)
has_quadruplets = tf.math.greater(num_samples, 3)
current_samples = tf.cast(num_samples, dtype=dtype)
current_pairs = tf.cast(
current_samples * (current_samples - 1.0) / 2.0, dtype=dtype)
current_triplets = tf.cast(
current_pairs * (current_samples - 2.0) / 3.0, dtype=dtype)
current_quadruplets = tf.cast(
current_triplets * (current_samples - 3.0) / 4.0, dtype=dtype)
term_up = tf.range(0, current_samples, 1, dtype=dtype)
term_up_delay_1 = tf.range(-1, current_samples - 1, 1, dtype=dtype)
term_up_delay_2 = tf.range(-2, current_samples - 2, 1, dtype=dtype)
term_down = tf.range(current_samples - 1, -1, -1, dtype=dtype)
term_down_delay_1 = tf.range(current_samples - 2, -2, -1, dtype=dtype)
term_down_delay_2 = tf.range(current_samples - 3, -3, -1, dtype=dtype)
l1_denominator = tf.cond(tf.math.greater(num_samples, 0),
lambda: current_samples,
lambda: tf.constant(1, dtype))
l1_factors = tf.ones([num_samples], dtype=dtype) / l1_denominator
l2_denominator = tf.cond(has_pairs,
lambda: tf.cast(current_pairs * 2.0, dtype=dtype),
lambda: tf.constant(1, dtype))
l2_factors = (term_up - term_down) / l2_denominator
l3_denominator = tf.cond(has_triplets,
lambda: tf.cast(current_triplets * 6, dtype=dtype),
lambda: tf.constant(1, dtype))
l3_factors = ((term_up * term_up_delay_1 - 4.0 * term_up * term_down +
term_down * term_down_delay_1) / l3_denominator)
l4_denominator = tf.cond(
has_quadruplets,
lambda: tf.cast(current_quadruplets * 24, dtype=dtype),
lambda: tf.constant(1, dtype))
l4_factors = ((term_up * term_up_delay_1 * term_up_delay_2 -
9.0 * term_up * term_up_delay_1 * term_down +
9.0 * term_up * term_down * term_down_delay_1 -
term_down * term_down_delay_1 * term_down_delay_2) /
l4_denominator)
return (current_samples, current_pairs, current_triplets, current_quadruplets,
l1_factors, l2_factors, l3_factors, l4_factors)
@tf.function
def _condition_l_moments_sparse(
current_index, unused_l1_sum, unused_l2_sum, unused_l3_sum, unused_l4_sum,
unused_count_samples, unused_count_pairs, unused_count_triplets,
unused_count_quadruplets, x_rank_2):
"""Condition for the loop that computes L-moments for a `SparseTensor`."""
return tf.less(current_index, x_rank_2.dense_shape[1])
@tf.function
def _iteration_l_moments_sparse(
current_index, l1_sum, l2_sum, l3_sum, l4_sum, count_samples,
count_pairs, count_triplets, count_quadruplets, x_rank_2):
"""Process one column of a `SparseTensor` and updates L-moments variables."""
current_x = tf.boolean_mask(
x_rank_2.values,
tf.math.equal(x_rank_2.indices[:, 1], [current_index]))
sorted_x = tf.sort(current_x, axis=0)
num_samples = tf.shape(current_x)[0]
(current_samples, current_pairs, current_triplets, current_quadruplets,
l1_factors, l2_factors, l3_factors,
l4_factors) = _num_terms_and_factors(num_samples, x_rank_2.values.dtype)
dim_1 = x_rank_2.dense_shape[1]
new_l1_sum = l1_sum + tf.scatter_nd(
[[current_index]],
[tf.reduce_sum(tf.multiply(sorted_x, l1_factors), axis=0)], [dim_1])
new_l2_sum = l2_sum + tf.scatter_nd(
[[current_index]],
[tf.reduce_sum(tf.multiply(sorted_x, l2_factors), axis=0)], [dim_1])
new_l3_sum = l3_sum + tf.scatter_nd(
[[current_index]],
[tf.reduce_sum(tf.multiply(sorted_x, l3_factors), axis=0)], [dim_1])
new_l4_sum = l4_sum + tf.scatter_nd(
[[current_index]],
[tf.reduce_sum(tf.multiply(sorted_x, l4_factors), axis=0)], [dim_1])
new_count_samples = count_samples + tf.scatter_nd(
[[current_index]], [current_samples], [dim_1])
new_count_pairs = count_pairs + tf.scatter_nd(
[[current_index]], [current_pairs], [dim_1])
new_count_triplets = count_triplets + tf.scatter_nd(
[[current_index]], [current_triplets], [dim_1])
new_count_quadruplets = count_quadruplets + tf.scatter_nd(
[[current_index]], [current_quadruplets], [dim_1])
return (tf.add(current_index, 1),
new_l1_sum, new_l2_sum, new_l3_sum, new_l4_sum,
new_count_samples, new_count_pairs, new_count_triplets,
new_count_quadruplets, x_rank_2)
@tf.function
def _condition_l_moments_dense(
current_index, unused_l1_sum, unused_l2_sum, unused_l3_sum, unused_l4_sum,
unused_l1_factors, unused_l2_factors, unused_l3_factors, unused_l4_factors,
x_rank_2):
"""Condition for the loop that computes L-moments for a `Tensor`."""
return tf.less(current_index, tf.shape(x_rank_2)[1])
@tf.function
def _iteration_l_moments_dense(
current_index, l1_sum, l2_sum, l3_sum, l4_sum, l1_factors, l2_factors,
l3_factors, l4_factors, x_rank_2):
"""Process one column of a `Tensor` and updates L-moments variables."""
current_x = x_rank_2[:, current_index]
sorted_x = tf.sort(current_x)
dim_1 = tf.shape(x_rank_2)[1]
new_l1_sum = l1_sum + tf.scatter_nd(
[[current_index]],
[tf.reduce_sum(tf.multiply(sorted_x, l1_factors), axis=0)], [dim_1])
new_l2_sum = l2_sum + tf.scatter_nd(
[[current_index]],
[tf.reduce_sum(tf.multiply(sorted_x, l2_factors), axis=0)], [dim_1])
new_l3_sum = l3_sum + tf.scatter_nd(
[[current_index]],
[tf.reduce_sum(tf.multiply(sorted_x, l3_factors), axis=0)], [dim_1])
new_l4_sum = l4_sum + tf.scatter_nd(
[[current_index]],
[tf.reduce_sum(tf.multiply(sorted_x, l4_factors), axis=0)], [dim_1])
return (tf.add(current_index, 1),
new_l1_sum, new_l2_sum, new_l3_sum, new_l4_sum, l1_factors,
l2_factors, l3_factors, l4_factors, x_rank_2)
def reduce_batch_count_l_moments(
x: common_types.TensorType, reduce_instance_dims: bool
) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor,
tf.Tensor, tf.Tensor]:
"""Computes element first 4 L-moments and the corresponding counts.
Computes the first 4 L-moments (https://en.wikipedia.org/wiki/L-moment) and
the number of samples, pairs, etc. used to compute them.
Args:
x: A `Tensor` or `CompositeTensor`.
reduce_instance_dims: A bool, if True - collapses the batch and instance
dimensions to arrive at a single scalar output. Otherwise, only
collapses the batch dimension and outputs a `Tensor` of the same shape
as the input.
Returns:
The tuple (count_samples, l1, count_pairs, l2, count_triplets, l3,
count_quadruplets, l4). Each entry is a `Tensor` with the same dtype as x.
If reduce_instance_dims is True, the tensors are scalars; otherwise the
shape is x.shape[1:], i.e. the batch dimension is removed.
"""
if isinstance(x, tf.SparseTensor) and reduce_instance_dims:
x = x.values
elif isinstance(x, tf.RaggedTensor):
if reduce_instance_dims:
x = x.flat_values
else:
raise NotImplementedError(
'L-moments only support reduced dims for RaggedTensors')
if isinstance(x, tf.SparseTensor):
batch_size = x.dense_shape[0]
x_rank_2 = tf.sparse.reshape(x, [batch_size, -1])
dim_1 = x_rank_2.dense_shape[1]
initial_values = tf.zeros([dim_1], dtype=x.dtype)
(unused_current_index, l1_sum, l2_sum, l3_sum, l4_sum,
count_samples, count_pairs, count_triplets,
count_quadruplets, unused_x_rank_2) = tf.while_loop(
_condition_l_moments_sparse,
_iteration_l_moments_sparse,
[tf.constant(0, dim_1.dtype)] + [initial_values] * 8 + [x_rank_2])
if reduce_instance_dims:
final_shape = ()
elif x.get_shape().ndims and x.get_shape()[1:].is_fully_defined():
final_shape = x.get_shape()[1:]
else:
final_shape = tf.shape(x)[1:]
l1 = tf.reshape(l1_sum, final_shape)
l2 = tf.reshape(l2_sum, final_shape)
l3 = tf.reshape(l3_sum, final_shape)
l4 = tf.reshape(l4_sum, final_shape)
count_l1 = tf.reshape(count_samples, final_shape)
count_l2 = tf.reshape(count_pairs, final_shape)
count_l3 = tf.reshape(count_triplets, final_shape)
count_l4 = tf.reshape(count_quadruplets, final_shape)
else:
num_samples = tf.size(x) if reduce_instance_dims else tf.shape(x)[0]
(count_samples, count_pairs, count_triplets, count_quadruplets,
l1_factors, l2_factors, l3_factors, l4_factors) = _num_terms_and_factors(
num_samples, x.dtype)
x_rank_2 = tf.reshape(x, [num_samples, -1])
dim_1 = tf.shape(x_rank_2)[1]
initial_moment_values = tf.zeros([dim_1], dtype=x.dtype)
(unused_current_index, l1_sum, l2_sum, l3_sum, l4_sum, unused_l1_factors,
unused_l2_factors, unused_l3_factors, unused_l4_factors,
unused_x_rank_2) = tf.while_loop(
_condition_l_moments_dense,
_iteration_l_moments_dense,
[tf.constant(0, dim_1.dtype)] + [initial_moment_values] * 4 +
[l1_factors, l2_factors, l3_factors, l4_factors, x_rank_2])
final_shape = (() if reduce_instance_dims else tf.shape(x)[1:])
l1 = tf.reshape(l1_sum, final_shape)
l2 = tf.reshape(l2_sum, final_shape)
l3 = tf.reshape(l3_sum, final_shape)
l4 = tf.reshape(l4_sum, final_shape)
count_l1 = tf.fill(final_shape, count_samples)
count_l2 = tf.fill(final_shape, count_pairs)
count_l3 = tf.fill(final_shape, count_triplets)
count_l4 = tf.fill(final_shape, count_quadruplets)
return (count_l1, l1, count_l2, l2, count_l3, l3, count_l4, l4)
def _validate_and_get_dense_value_key_inputs(
x: common_types.TensorType,
key: common_types.TensorType) -> Tuple[tf.Tensor, tf.Tensor]:
"""Validate x and key and returns dense representations if feasible.
Check if sparse x and sparse key have identical indices, map key if dense.
Args:
x: A `Tensor` or `CompositeTensor`.
key: A `Tensor` or `CompositeTensor`. Must be `Tensor` if x is `Tensor`.
Returns:
The values of x and key if both are composite, the values of x and a mapped
key if only x is composite, or the original x and key if both are dense.
"""
if isinstance(x, tf.Tensor) and isinstance(key, tf.Tensor):
return x, key
elif isinstance(x, tf.Tensor):
raise ValueError('A dense key is required if x is dense')
elif isinstance(x, tf.SparseTensor) and isinstance(key, tf.SparseTensor):
assert_shape = tf.debugging.assert_equal(x.dense_shape, key.dense_shape)
assert_eq = tf.debugging.assert_equal(x.indices, key.indices)
with tf.control_dependencies([assert_eq, assert_shape]):
return tf.identity(x.values), tf.identity(key.values)
elif isinstance(x, tf.SparseTensor) and isinstance(key, tf.Tensor):
# In this case, the row of x corresponds to the key at that row.
x_row_indices = x.indices[:, 0]
assert_compatible = tf.debugging.assert_greater_equal(
tf.shape(key, out_type=tf.int64)[0], x.dense_shape[0])
with tf.control_dependencies([assert_compatible]):
return x.values, tf.gather(key, x_row_indices)
elif isinstance(x, tf.SparseTensor):
raise ValueError('A sparse or dense key is required if x is sparse')
elif isinstance(x, tf.RaggedTensor) and isinstance(key, tf.RaggedTensor):
x.shape.assert_is_compatible_with(key.shape)
assert_ops = [
tf.debugging.assert_equal(x_split, key_split) for x_split, key_split in
zip(x.nested_row_splits, key.nested_row_splits)
]
with tf.control_dependencies(assert_ops):
return (tf.ensure_shape(tf.identity(x.flat_values), [None]),
tf.ensure_shape(tf.identity(key.flat_values), [None]))
elif isinstance(x, tf.RaggedTensor) and isinstance(key, tf.Tensor):
# Each batch instance in x corresponds to a single element in key.
x_row_indices = _get_ragged_batch_value_rowids(x)
assert_compatible = tf.debugging.assert_greater_equal(
tf.shape(key, out_type=tf.int64)[0], x.bounding_shape(axis=0))
with tf.control_dependencies([assert_compatible]):
return (tf.ensure_shape(x.flat_values,
[None]), tf.gather(key, x_row_indices))
else:
raise ValueError('A ragged or dense key is required if x is ragged')
def lookup_key(query: tf.Tensor, key_vocab: tf.Tensor) -> tf.Tensor:
"""Look up the index of each element in query in key_vocab.
Args:
query: A `Tensor`.
key_vocab: A 1-D `Tensor` of unique keys.
Returns:
The indices of the keys in query, determined by position in key_vocab.
"""
def _lookup_key():
# Obtain 0-indexed int64 positions for the keys in key_vocab.
indices = tf.cast(tf.range(tf.size(key_vocab)), tf.int64)
expanded_vocab_size = tf.expand_dims(tf.size(key_vocab), axis=0)
matrix_shape = tf.concat([expanded_vocab_size, tf.shape(query)], axis=0)
# Expand dims of key_vocab to rank of query.
vocab_shape = tf.concat(
[expanded_vocab_size,
tf.ones(tf.rank(query), dtype=tf.int32)], axis=0)
# Make copies of key_vocab to fill matrix_shape.
expand_vocab = tf.broadcast_to(
tf.reshape(key_vocab, vocab_shape), matrix_shape)
# Make copies of indices to fill matrix_shape.
expand_indices = tf.broadcast_to(
tf.reshape(indices, vocab_shape), matrix_shape)
# Make copies of query to fill matrix_shape.
expand_query = tf.broadcast_to(query, matrix_shape)
# Indices where expand_query equals expand_vocab is set to the key's
# index. All the other indices are -1.
expand_result = tf.where(
tf.math.equal(expand_query, expand_vocab), expand_indices,
tf.cast(tf.fill(matrix_shape, -1), tf.int64))
# Reduce matrix above to desired 1-D shape.
result = tf.math.reduce_max(expand_result, axis=0)
result.set_shape(query.shape)
return result
def _check_vocab_size_and_lookup_key():
return tf.cond(
tf.math.equal(tf.size(key_vocab), 0),
lambda: tf.cast(tf.fill(tf.shape(query), -1), tf.int64), _lookup_key)
def _check_input_size_and_lookup_key():
return tf.cond(
tf.math.equal(tf.size(query),
0), lambda: tf.constant([], dtype=tf.int64),
_check_vocab_size_and_lookup_key)
return _check_input_size_and_lookup_key()
def _align_dims(tensor: tf.Tensor, target_ndims: int) -> tf.Tensor:
"""Expand the rank of input tensor until it matches the target rank.
Non-elementwise per-key reduce returns a tensor with rank 1 (batch).
The dimension count needs to match with x to finish the final mapping, because
we want to broadcast each reduction with x. To do so we need to add singleton
dimensions, otherwise TF will try to broadcast along the wrong dimensions.
Args:
tensor: A `Tensor`.
target_ndims: The count of dims we want the output to meet or exceed.
Returns:
The original input, with dimension count >= target_ndims.
"""
if target_ndims is None or target_ndims <= tensor.get_shape().ndims:
return tensor
for _ in range(target_ndims - tensor.get_shape().ndims):
tensor = tf.expand_dims(tensor, -1)
return tensor
def map_per_key_reductions(tensors_to_map: Tuple[tf.Tensor, ...],
key: common_types.TensorType, key_vocab: tf.Tensor,
original_input: common_types.TensorType,
reduce_instance_dims: bool) -> Tuple[tf.Tensor, ...]:
"""Rearrange the reduced per-key result to correspond to the original keys.
Args:
tensors_to_map: A tuple of 1-D `Tensor`s that are same shape as key_vocab,
to be mapped to respective key.
key: A `Tensor` or `CompositeTensor`.
key_vocab: A 1-D `Tensor`.
original_input: A `Tensor` or `CompositeTensor`.
reduce_instance_dims: A `bool`. True if tensors_to_map are reduced in
dimension, else False.
Returns:
A tuple same length as tensors_to_map, of `Tensor`s the same dimension as
original_input. We are mapping using the key for each original_input,
but output rank needs to match original_input in the dense case.
For the sparse case, it is enough for output to match original_input.values.
Any missing key would result in a mapping to 0.
"""
_, key = _validate_and_get_dense_value_key_inputs(original_input, key)
key_indices = lookup_key(key, key_vocab)
ndims = (None if isinstance(original_input,
(tf.SparseTensor, tf.RaggedTensor)) else
original_input.get_shape().ndims)
# Append 0s to allow mapping OOVs to it.
tensors_to_map = [
tf.concat([t, tf.expand_dims(tf.zeros_like(t[0]), 0)], axis=0)
for t in tensors_to_map
]
# Replace `-1`s due to OOV with size of key_vocab.
adjusted_indices = tf.where(
key_indices >= 0, key_indices,
tf.cast(
tf.fill(tf.shape(key_indices), tf.size(key_vocab)), dtype=tf.int64))
axis = -1 if reduce_instance_dims else 0
mapped_result = [
_align_dims(tf.gather(t, adjusted_indices, axis=axis), ndims)
for t in tensors_to_map
]
return tuple(mapped_result)
def reduce_batch_count_mean_and_var_per_key(
x: common_types.TensorType, key: common_types.TensorType,
reduce_instance_dims: bool
) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]:
"""Computes per-key element count, mean and var for the given tensor.
Args:
x: A `Tensor` or `CompositeTensor`.
key: A `Tensor` or `CompositeTensor` (cannot be None).
Must meet one of the following conditions:
1. Both x and key are dense,
2. Both x and key are composite and `key` must exactly match `x` in
everything except values,
3. The axis=1 index of each element of sparse x matches its index of
dense key.
reduce_instance_dims: A bool, if True - collapses the batch and instance
dimensions to arrive at a single scalar output. Otherwise, only
collapses the batch dimension and outputs a `Tensor` of the same shape
as the input. Not supported for `CompositeTensor`s.
Returns:
A 4-tuple containing the `Tensor`s (key_vocab, count, mean, var). NaNs and
infinite input values are ignored.
"""
if isinstance(x, (tf.SparseTensor, tf.RaggedTensor)):
if not reduce_instance_dims:
raise NotImplementedError(
'Mean and var per key only support reduced dims for CompositeTensors')
x, key = _validate_and_get_dense_value_key_inputs(x, key)
unique = tf.unique(key, out_idx=tf.int64)
x_is_finite = _is_finite(x)
finite_x = tf.where(x_is_finite, x, tf.zeros_like(x))
if reduce_instance_dims:
x_count = tf.cast(x_is_finite, x.dtype)
if x.get_shape().ndims != 1:
x_count = tf.reduce_sum(x_count, axis=1)
x_count = tf.math.unsorted_segment_sum(x_count, unique.idx,
tf.size(unique.y))
sums = (
tf.reduce_sum(finite_x, axis=1)
if x.get_shape().ndims != 1 else finite_x)
sums = tf.math.unsorted_segment_sum(sums, unique.idx, tf.size(unique.y))
else:
sums = tf.math.unsorted_segment_sum(finite_x, unique.idx, tf.size(unique.y))
x_count = tf.math.unsorted_segment_sum(
tf.cast(x_is_finite, tf.float32), unique.idx, tf.size(unique.y))
means = tf.math.divide_no_nan(tf.cast(sums, x.dtype), x_count)
sum_sqs = tf.math.unsorted_segment_sum(
tf.square(finite_x), unique.idx, tf.size(input=unique.y))
if sum_sqs.get_shape().ndims != 1 and reduce_instance_dims:
sum_sqs = tf.reduce_sum(sum_sqs, axis=1)
variances = tf.math.divide_no_nan(sum_sqs, x_count) - tf.square(means)
return unique.y, tf.cast(x_count, tf.int64), means, variances
# Code for serializing and example proto
_DEFAULT_VALUE_BY_DTYPE = {
tf.string: '',
tf.float32: 0,
tf.int64: 0
}
def _encode_proto(values_dict, message_type, descriptor_source=''):
"""A wrapper around tf.raw_ops.EncodeProto."""
field_names = []
sizes = []
values = []
for field_name, value in sorted(values_dict.items(), key=lambda x: x[0]):
if isinstance(value, tf.SparseTensor):
size = tf.sparse.reduce_sum(
tf.SparseTensor(value.indices,
tf.ones_like(value.values, dtype=tf.int32),
value.dense_shape),
axis=1)
value = tf.sparse.to_dense(value, _DEFAULT_VALUE_BY_DTYPE[value.dtype])
else:
value = tf.reshape(value, [tf.shape(input=value)[0], -1])
size = tf.fill((tf.shape(input=value)[0],), tf.shape(input=value)[1])
field_names.append(field_name)
values.append(value)
sizes.append(size)
sizes = tf.stack(sizes, axis=1)
return tf.raw_ops.EncodeProto(
sizes=sizes,
values=values,
field_names=field_names,
message_type=message_type,
descriptor_source=descriptor_source)
def _serialize_feature(values):
"""Serialize a Tensor or SparseTensor as `Feature` protos.
`values` should be a Tensor of rank >=1 or SparseTensor of rank 2. We will
refer to the size of the first dimension as batch_size.
This function encodes each row of the `Tensor` as a list of values (flattening
the other dimensions) and each row of the `SparseTensor` as a list of values,
where the indices within each row are ignored and assumed to be 0, 1, ....
Args:
values: A `Tensor` or `SparseTensor`.
Returns:
A tensor of shape (batch_size,) and type `tf.string` where each element is
a serialized `Feature` proto.
Raises:
ValueError: If the dtype is of `values` is not `tf.string`, `tf.float32`
or `tf.int64`.
"""
values = tf.compat.v1.convert_to_tensor_or_sparse_tensor(values)
if values.dtype == tf.string:
values_dict = {
'bytes_list': _encode_proto({'value': values}, 'tensorflow.BytesList')
}
elif values.dtype == tf.float32:
values_dict = {
'float_list': _encode_proto({'value': values}, 'tensorflow.FloatList')
}
elif values.dtype == tf.int64:
values_dict = {
'int64_list': _encode_proto({'value': values}, 'tensorflow.Int64List')
}
else:
raise ValueError('Cannot encode values of dtype {}'.format(values.dtype))
return _encode_proto(values_dict, 'tensorflow.Feature')
def serialize_example(features):
"""Serialized a dict of `Tensor` or `SparseTensor`s as example protos.
`features` should be a dict where each value is a Tensor of rank >=1 or
SparseTensor of rank 2. The sizes of the first dimension of each value should
be the same, and we refer to this size as batch_size.
Args:
features: A dictionary whose values are `Tensor`s or `SparseTensor`s.
Returns:
A tensor of shape (batch_size,) and type `tf.string` where each element is
a serialized `Example` proto.
"""
features_dict = []
for key, value in sorted(features.items(), key=lambda x: x[0]):
serialized_value = _serialize_feature(value)
features_dict.append(
_encode_proto({
'key': tf.fill((tf.shape(input=serialized_value)[0],), key),
'value': serialized_value,
}, 'tensorflow.Features.FeatureEntry'))
features_dict = tf.stack(features_dict, axis=1)
features = _encode_proto({'feature': features_dict}, 'tensorflow.Features')
return _encode_proto({'features': features}, 'tensorflow.Example')
def _get_missing_value(dtype: tf.DType) -> tf.Tensor:
if dtype.is_floating:
return tf.constant(_FLOATING_NAN, dtype)
else:
return tf.constant(dtype.min + 1, dtype)
def _sparse_minus_reduce_min_and_reduce_max(
x: tf.SparseTensor) -> Tuple[tf.Tensor, tf.Tensor]:
"""Computes the -min and max of a SparseTensor x.
It differs from sparse_reduce_max in that sparse_reduce_max returns 0 when all
elements are missing along axis 0.
We replace the 0 with NaN when x's dtype is float and dtype.min+1 when it's
int.
Args:
x: A `SparseTensor`.
Returns:
Two `Tensors' which are the -min and max.
Raises:
TypeError: If the type of `x` is not supported.
"""
minus_x = tf.SparseTensor(
indices=x.indices, values=0 - x.values, dense_shape=x.dense_shape)
x_count = reduce_batch_count(x, reduce_instance_dims=False)
batch_has_no_values = tf.equal(x_count, tf.constant(0, dtype=tf.int64))
x_batch_max = _sparse_reduce_batch_keep_shape(tf.sparse.reduce_max, x)
x_batch_minus_min = _sparse_reduce_batch_keep_shape(tf.sparse.reduce_max,
minus_x)
missing_value = _get_missing_value(x.dtype)
x_batch_max = tf.where(batch_has_no_values,
tf.fill(tf.shape(input=x_batch_max), missing_value),
x_batch_max)
x_batch_minus_min = tf.where(
batch_has_no_values,
tf.fill(tf.shape(input=x_batch_minus_min), missing_value),
x_batch_minus_min)
return x_batch_minus_min, x_batch_max
def reduce_batch_minus_min_and_max(
x: common_types.TensorType,
reduce_instance_dims: bool) -> Tuple[tf.Tensor, tf.Tensor]:
"""Computes the -min and max of a tensor x.
NOTE: For TF versions < 2.4, if all feature values are NaNs, the -min and max
will both be -inf (consistent with`tf.reduce_max`).
Args:
x: A `Tensor` or `CompositeTensor`.
reduce_instance_dims: A bool indicating whether this should collapse the
batch and instance dimensions to arrive at a single scalar output, or only
collapse the batch dimension and outputs a vector of the same shape as the
input.
Returns:
The computed tensor's (batch -min, batch max) pair.
"""
# In TF < 2.3, neg(x) would throw an exception, if x was tf.int16. Hence, cast
# to tf.int32.
if x.dtype in (tf.uint8, tf.uint16, tf.int16):
x = tf.cast(x, tf.int32)
elif x.dtype == tf.uint32 or x.dtype == tf.uint64:
raise TypeError('Tensor type %r is not supported' % x.dtype)
if reduce_instance_dims:
if isinstance(x, tf.SparseTensor):
x = x.values
elif isinstance(x, tf.RaggedTensor):
x = x.flat_values
x_batch_max = tf.reduce_max(input_tensor=x)
x_batch_minus_min = tf.reduce_max(input_tensor=tf.zeros_like(x) - x)
return assert_same_shape(x_batch_minus_min, x_batch_max)
elif isinstance(x, tf.SparseTensor):
return _sparse_minus_reduce_min_and_reduce_max(x)
x_batch_max = tf.reduce_max(input_tensor=x, axis=0)
if isinstance(x, tf.RaggedTensor):
x_batch_minus_min = tf.reduce_max(input_tensor=tf.math.negative(x), axis=0)
missing_value = _get_missing_value(x.dtype)
return (x_batch_minus_min.to_tensor(default_value=missing_value),
x_batch_max.to_tensor(default_value=missing_value))
else:
# TODO(iindyk): switch to `tf.math.negative` when analyzer cache will get
# invalidated next time.
return (tf.reduce_max(input_tensor=0 - x, axis=0), x_batch_max)
def reduce_batch_minus_min_and_max_per_key(
x: common_types.TensorType,
key: common_types.TensorType,
reduce_instance_dims: bool = True
) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
"""Computes the -min and max of a tensor x.
Args:
x: A `Tensor` or `CompositeTensor`.
key: A `Tensor` or `CompositeTensor`.
Must meet one of the following conditions:
1. Both x and key are dense,
2. Both x and key are composite and `key` must exactly match `x` in
everything except values,
3. The axis=1 index of each element of sparse x matches its index of
dense key.
reduce_instance_dims: A bool indicating whether this should collapse the
batch and instance dimensions to arrive at a single scalar output, or only
collapse the batch dimension and outputs a vector of the same shape as the
input.
Returns:
A 3-tuple containing the `Tensor`s (key_vocab, min_per_key, max_per_key).
"""
if x.dtype == tf.uint8 or x.dtype == tf.uint16:
x = tf.cast(x, tf.int32)
elif x.dtype == tf.uint32 or x.dtype == tf.uint64:
raise TypeError('Tensor type %r is not supported' % x.dtype)
if not reduce_instance_dims and isinstance(
x, (tf.SparseTensor, tf.RaggedTensor)):
raise NotImplementedError(
'Elementwise reduction of composite tensors is not supported'
)
x, key = _validate_and_get_dense_value_key_inputs(x, key)
def get_batch_max_per_key(tensor, key_uniques): # pylint: disable=missing-docstring
if not reduce_instance_dims or tensor.get_shape().ndims < 2:
row_maxes = tensor
else:
row_maxes = tf.reduce_max(
tensor, axis=tf.range(1, tensor.get_shape().ndims))
return tf.math.unsorted_segment_max(row_maxes, key_uniques.idx,
tf.size(input=key_uniques.y))
unique = tf.unique_with_counts(key, out_idx=tf.int64)
x_batch_maxes = get_batch_max_per_key(x, unique)
x_batch_minus_mins = get_batch_max_per_key(-x, unique)
x_batch_minus_mins, x_batch_maxes = assert_same_shape(x_batch_minus_mins,
x_batch_maxes)
return (unique.y, x_batch_minus_mins, x_batch_maxes)
def track_asset_analyzer_output(eager_asset_path: ops.EagerTensor,
graph_tensor: tf.Tensor):
"""Track `graph_tensor` representing analyzer output written to `eager_asset_path`."""
graph = ops.get_default_graph()
graph.add_to_collection(
_ASSET_REPLACEMENTS,
(hashable_tensor_or_op(graph_tensor), eager_asset_path))
def _get_asset_analyzer_output_and_control_dependency(
asset_filepath: _AssetFileType
) -> Tuple[_AssetFileType, Optional[tf.Tensor]]:
"""Returns a tuple of (asset filepath, control dependency)."""
control_dependency = None
asset_replacements_coll = ops.get_default_graph().get_collection(
_ASSET_REPLACEMENTS)
if not asset_replacements_coll:
return asset_filepath, control_dependency
if not isinstance(asset_filepath, tf.Tensor):
raise ValueError('Expected asset_filepath ({}) to be a tf.Tensor.'.format(
asset_filepath))
eager_asset_filepath = dict(asset_replacements_coll).get(
hashable_tensor_or_op(asset_filepath), None)
if eager_asset_filepath:
control_dependency = asset_filepath
asset_filepath = eager_asset_filepath
return asset_filepath, control_dependency
def _lookup_table(table: lookup_ops.LookupInterface, x: tf.Tensor,
control_dependency: Optional[tf.Tensor]) -> tf.Tensor:
"""Look up x in table with an optional depndency on control_dependency."""
with contextlib.ExitStack() as stack:
# tf.control_dependencies([tensor]) adds a dependency to tensor.op. Wrap the
# tensor in an identity op to ensure that walking the graph from `result`
# encounters the control_dependency tensor.
if control_dependency is not None:
stack.enter_context(
tf.control_dependencies([tf.identity(control_dependency)]))
result = table.lookup(x)
return result
def construct_and_lookup_table(construct_table_callable: Callable[
[_AssetFileType], lookup_ops.LookupInterface],
asset_filepath: _AssetFileType,
x: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
"""Construct a table and look x up in it.
Args:
construct_table_callable: A Callable that takes a path to an asset file and
constructs a lookup table.
asset_filepath: Path to an asset used to construct the table. Can be a
python string, a `tf.Tensor`, a `tf.Placeholder`.
x: A categorical `Tensor` of type tf.string or tf.int[8|16|32|64] to which
the table lookup should be applied.
Returns:
A tuple of the result from looking x up in a table and the table's size.
"""
# If table is lifted into an initialization scope, add a control dependency
# on the graph tensor used to track this analyzer in
# `analyzer_nodes.TENSOR_REPLACEMENTS`.
asset_filepath, control_dependency = (
_get_asset_analyzer_output_and_control_dependency(asset_filepath))
with contextlib.ExitStack() as stack:
if (tf.inside_function() and
isinstance(asset_filepath, (ops.EagerTensor, str))):
# Lift the table initialization out of graph construction to avoid
# repeated initialization in TF2.
stack.enter_context(tf.init_scope())
table = construct_table_callable(asset_filepath)
table_size = table.size()
return _lookup_table(table, x, control_dependency), table_size
def lookup_table(lookup_fn: Callable[[common_types.TensorType, tf.Tensor],
Tuple[tf.Tensor, tf.Tensor]],
asset_filepath: _AssetFileType, x: common_types.TensorType):
"""Takes a `lookup_fn` and invokes it on `x` and `asset_filepath`.
If an eager tensor is being tracked by `asset_filepath`, `lookup_fn` is
invoked on it instead.
Args:
lookup_fn: A Callable that should take a tensor and a deferred vocab
filename as an input and return a lookup `op` along with the table size.
asset_filepath: Path to an asset used to construct the table. Can be a
python string, a `tf.Tensor`, a `tf.Placeholder`.
x: A categorical `Tensor` or `SparseTensor` of type tf.string or
tf.int[8|16|32|64] to which the table lookup should be applied.
Returns:
A tuple of the result from looking x up and the table size.
"""
# If table is lifted into an initialization scope, add a control dependency
# on the graph tensor used to track this analyzer in
# `analyzer_nodes.TENSOR_REPLACEMENTS`.
asset_filepath, control_dependency = (
_get_asset_analyzer_output_and_control_dependency(asset_filepath))
lookup_result, table_size = lookup_fn(x, asset_filepath)
with contextlib.ExitStack() as stack:
# tf.control_dependencies([tensor]) adds a dependency to tensor.op. Wrap the
# `lookup_result` in an identity op to ensure that walking the graph from
# it encounters the `control_dependency` tensor. The table size should not
# have the `control_dependency` tensor as its parent, hence it is returned
# as is.
if control_dependency is not None:
stack.enter_context(
tf.control_dependencies([tf.identity(control_dependency)]))
return tf.identity(lookup_result), table_size
def to_vocab_range(x: tf.SparseTensor,
vocab_size: Union[int, tf.Tensor]) -> tf.SparseTensor:
"""Mods x's int values to enforce that the vocab_ids in x are in range.
Args:
x: A int-valued SparseTensor typically representing the vocab indices of
terms. This is usually the output of tft.compute_and_apply_vocabulary.
vocab_size: An int or scalar tensor representing the size of vocab. Values
in x will be mod by this size to avoid negative or out-of-vocab indices.
Returns:
A sparse tensor of the same size as x with negative or out-of-vocab values
normalized.
"""
return tf.SparseTensor(
indices=x.indices,
values=tf.math.mod(x.values, vocab_size),
dense_shape=x.dense_shape)
def document_frequency_to_idf(document_frequency: tf.Tensor,
corpus_size: Union[int, tf.Tensor],
smooth: bool = True,
add_baseline: bool = True) -> tf.Tensor:
"""Computes inverse document frequency given document frequency.
The inverse document frequency of a term, by default, is calculated as
1 + log ((corpus size + 1) / (document frequency + 1)), where document
frequency is the number of documents that contain this term.
Args:
document_frequency: A tensor storing the document frequency of each term.
corpus_size: An int or int scalar tensor representing the size of the entire
dataset, i.e., number of examples.
smooth: A bool indicating if the inverse document frequency should be
smoothed. If True, which is the default, then the idf is calculated as 1 +
log((corpus size + 1) / (document frequency of term + 1)). Otherwise, the
idf is 1 + log((corpus size) / (document frequency of term)), which could
result in a division by zero error.
add_baseline: A bool indicating if the inverse document frequency should be
added with a constant baseline 1.0. If True, which is the default, then
the idf is calculated as 1 + log(*). Otherwise, the idf is log(*) without
the constant 1 baseline. Keeping the baseline reduces the discrepancy in
idf between commonly seen terms and rare terms.
Returns:
A tensor of the inverse document frequency of input document frequency.
"""
baseline = 1.0 if add_baseline else 0.0
if smooth:
return tf.math.log(
(tf.cast(corpus_size, dtype=tf.float32) + 1.0) /
(1.0 + tf.cast(document_frequency, dtype=tf.float32))) + baseline
else:
return tf.math.log(
tf.cast(corpus_size, dtype=tf.float32) /
(tf.cast(document_frequency, dtype=tf.float32))) + baseline
def register_vocabulary_reserved_tokens(
name: str, reserved_tokens: Union[Sequence[str], tf.Tensor]
) -> tf.Tensor:
"""Registers a reserved_tokens tensor to a vocabulary."""
if not isinstance(reserved_tokens, tf.Tensor):
reserved_tokens = tf.constant(reserved_tokens, dtype=tf.string)
tf.compat.v1.add_to_collection(_VOCABULARY_RESERVED_TOKENS_IDS, name)
tf.compat.v1.add_to_collection(_VOCABULARY_RESERVED_TOKENS, reserved_tokens)
return tf.size(reserved_tokens, out_type=tf.int64)
def fetch_vocabulary_reserved_tokens(graph, name: str) -> Sequence[str]:
"""Fetches an evaluated reserved_tokens tensor for a vocabulary."""
name_collection = graph.get_collection(_VOCABULARY_RESERVED_TOKENS_IDS)
tokens_collection = graph.get_collection(_VOCABULARY_RESERVED_TOKENS)
assert len(name_collection) == len(tokens_collection)
tensor = tokens_collection[name_collection.index(name)]
with tf.compat.v1.Session(graph=graph) as session:
return session.run(tensor)
|
3c09cd787a283535bc1004982026424bb114060e
|
0dddc0508138396c740901be4a0f9eebefb8fded
|
/ax/modelbridge/tests/test_metrics_as_task_transform.py
|
447452888910dc78f212a1efabae2a75f397a6ee
|
[
"MIT"
] |
permissive
|
facebook/Ax
|
473beb143016f95f4ec381ed1bd95b32c1ca31f8
|
6443cee30cbf8cec290200a7420a3db08e4b5445
|
refs/heads/main
| 2023-09-01T09:29:13.684709
| 2023-08-31T21:49:30
| 2023-08-31T21:49:30
| 169,880,381
| 2,207
| 315
|
MIT
| 2023-09-14T21:26:51
| 2019-02-09T15:23:44
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 5,101
|
py
|
test_metrics_as_task_transform.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from copy import deepcopy
import numpy as np
from ax.core.observation import Observation, ObservationData, ObservationFeatures
from ax.core.parameter import ChoiceParameter
from ax.modelbridge.transforms.metrics_as_task import MetricsAsTask
from ax.utils.common.testutils import TestCase
from ax.utils.testing.core_stubs import get_search_space_for_range_values
class MetricsAsTaskTransformTest(TestCase):
def setUp(self) -> None:
self.metric_task_map = {
"metric1": ["metric2", "metric3"],
"metric2": ["metric3"],
}
self.observations = [
Observation(
data=ObservationData(
metric_names=["metric1", "metric2", "metric3"],
means=np.array([1.0, 2.0, 3.0]),
covariance=np.diag([1.0, 2.0, 3.0]),
),
features=ObservationFeatures(parameters={"x": 5.0, "y": 2.0}),
arm_name="0_0",
),
Observation(
data=ObservationData(
metric_names=["metric3"],
means=np.array([30.0]),
covariance=np.array([[30.0]]),
),
features=ObservationFeatures(parameters={"x": 10.0, "y": 4.0}),
),
]
self.search_space = get_search_space_for_range_values(min=0.0, max=20.0)
self.expected_new_observations = [
Observation(
data=ObservationData(
metric_names=["metric1", "metric2", "metric3"],
means=np.array([1.0, 2.0, 3.0]),
covariance=np.diag([1.0, 2.0, 3.0]),
),
features=ObservationFeatures(
parameters={"x": 5.0, "y": 2.0, "METRIC_TASK": "TARGET"}
),
arm_name="0_0",
),
Observation(
data=ObservationData(
metric_names=["metric2", "metric3"],
means=np.array([1.0, 1.0]),
covariance=np.diag([1.0, 1.0]),
),
features=ObservationFeatures(
parameters={"x": 5.0, "y": 2.0, "METRIC_TASK": "metric1"}
),
arm_name="0_0",
),
Observation(
data=ObservationData(
metric_names=["metric3"],
means=np.array([2.0]),
covariance=np.array([[2.0]]),
),
features=ObservationFeatures(
parameters={"x": 5.0, "y": 2.0, "METRIC_TASK": "metric2"}
),
arm_name="0_0",
),
Observation(
data=ObservationData(
metric_names=["metric3"],
means=np.array([30.0]),
covariance=np.array([[30.0]]),
),
features=ObservationFeatures(
parameters={"x": 10.0, "y": 4.0, "METRIC_TASK": "TARGET"}
),
),
]
self.t = MetricsAsTask(
search_space=self.search_space,
observations=self.observations,
config={"metric_task_map": self.metric_task_map},
)
def testInit(self) -> None:
with self.assertRaises(ValueError):
MetricsAsTask(
search_space=self.search_space, observations=self.observations
)
def testTransformObservations(self) -> None:
new_obs = self.t.transform_observations(deepcopy(self.observations))
self.assertEqual(new_obs, self.expected_new_observations)
new_obs = self.t.untransform_observations(new_obs)
self.assertEqual(new_obs, self.observations)
def testTransformObservationFeatures(self) -> None:
obsfs_t = self.t.transform_observation_features(
deepcopy([obs.features for obs in self.observations])
)
for obsf in obsfs_t:
assert obsf.parameters["METRIC_TASK"] == "TARGET"
obsfs_t = self.t.untransform_observation_features(obsfs_t)
for obsf in obsfs_t:
assert "METRIC_TASK" not in obsf.parameters
with self.assertRaises(ValueError):
self.t.untransform_observation_features(
deepcopy([obs.features for obs in self.expected_new_observations])
)
def testTransformSearchSpace(self) -> None:
new_ss = self.t._transform_search_space(deepcopy(self.search_space))
self.assertEqual(len(new_ss.parameters), 3)
new_param = new_ss.parameters["METRIC_TASK"]
self.assertIsInstance(new_param, ChoiceParameter)
self.assertEqual(
new_param.values, ["TARGET", "metric1", "metric2"] # pyre-ignore
)
self.assertTrue(new_param.is_task) # pyre-ignore
|
8d29dc1a91c1969bebf51d2aded2fcfaf382e406
|
2ae0b8d95d439ccfd55ea7933ad4a2994ad0f6c5
|
/tools/mo/openvino/tools/mo/utils/import_extensions.py
|
039ff2c2fa21e967c13e3af3fcef6c63930b15f6
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/openvino
|
38ea745a247887a4e14580dbc9fc68005e2149f9
|
e4bed7a31c9f00d8afbfcabee3f64f55496ae56a
|
refs/heads/master
| 2023-08-18T03:47:44.572979
| 2023-08-17T21:24:59
| 2023-08-17T21:24:59
| 153,097,643
| 3,953
| 1,492
|
Apache-2.0
| 2023-09-14T21:42:24
| 2018-10-15T10:54:40
|
C++
|
UTF-8
|
Python
| false
| false
| 3,788
|
py
|
import_extensions.py
|
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import importlib
import logging as log
import os
import pkgutil
import sys
from openvino.tools.mo.back.replacement import BackReplacementPattern
from openvino.tools.mo.load.loader import Loader
from openvino.tools.mo.middle.replacement import MiddleReplacementPattern
from openvino.tools.mo.ops.op import Op
from openvino.tools.mo.utils.class_registration import _check_unique_ids, update_registration, \
get_enabled_and_disabled_transforms, clear_registered_classes_dict
from openvino.tools.mo.utils.model_analysis import AnalyzeAction
def get_internal_dirs(framework: str, get_front_classes: callable):
front_classes = get_front_classes()
return {
('ops', ): [Op],
('analysis',): [AnalyzeAction],
('load', framework): [Loader],
('front', ): front_classes,
('front', framework): front_classes,
('front', framework, 'extractors'): front_classes,
('middle', ): [MiddleReplacementPattern],
('back', ): [BackReplacementPattern]}
def import_by_path(path: str, middle_names: list = (), prefix: str = ''):
for module_loader, name, ispkg in pkgutil.iter_modules([path]):
importlib.import_module('{}{}.{}'.format(prefix, '.'.join(middle_names), name))
def default_path():
EXT_DIR_NAME = '.'
return os.path.abspath(os.getcwd().join(EXT_DIR_NAME))
def load_dir(framework: str, path: str, get_front_classes: callable):
"""
Assuming the following sub-directory structure for path:
front/
<framework>/
<other_files>.py
<other_directories>/
<other_files>.py
ops/
<ops_files>.py
middle/
<other_files>.py
back/
<other_files>.py
This function loads modules in the following order:
1. ops/<ops_files>.py
2. front/<other_files>.py
3. front/<framework>/<other_files>.py
4. middle/<other_files>.py
5. back/<other_files>.py
Handlers loaded later override earlier registered handlers for an op.
1, 2, 3 can concur for the same op, but 4 registers a transformation pass
and it shouldn't conflict with any stuff loaded by 1, 2 or 3.
It doesn't load files from front/<other_directories>
"""
log.info("Importing extensions from: {}".format(path))
root_dir, ext = os.path.split(path)
sys.path.insert(0, root_dir)
enabled_transforms, disabled_transforms = get_enabled_and_disabled_transforms()
internal_dirs = get_internal_dirs(framework, get_front_classes)
prefix = 'openvino.tools.' if ext == 'mo' else ''
exclude_modules = {'tf', 'onnx', 'kaldi', 'mxnet', 'caffe'}
exclude_modules.remove(framework)
for p in internal_dirs.keys():
import_by_path(os.path.join(path, *p), [ext, *p], prefix)
update_registration(internal_dirs[p], enabled_transforms, disabled_transforms, exclude_modules)
sys.path.remove(root_dir)
def load_dirs(framework: str, dirs: list, get_front_classes: callable):
if dirs is None:
return
internal_dirs = get_internal_dirs(framework, get_front_classes)
for p, dir_names in internal_dirs.items():
for d in dir_names:
d.registered_cls = []
d.registered_ops = {}
clear_registered_classes_dict()
mo_inner_extensions = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, 'mo'))
dirs.insert(0, mo_inner_extensions)
dirs = [os.path.abspath(e) for e in dirs]
if default_path() not in dirs:
dirs.insert(0, default_path())
for path in dirs:
load_dir(framework, path, get_front_classes)
_check_unique_ids()
|
0ac669874295dd62205700092c3f7153fa21a492
|
7e6f1d74ae86125990d6f61cb926d7352cfb3dba
|
/custom_components/browser_mod/mod_view.py
|
bb6cedf7087a82e70458cbfe2080cbc02a81a0fe
|
[
"MIT"
] |
permissive
|
thomasloven/hass-browser_mod
|
92402981c923e5d3a24d142548bab34d0bb42065
|
89bec37383bc307bec4b302af19d32120e684641
|
refs/heads/master
| 2023-08-23T04:21:02.652814
| 2023-07-14T23:32:52
| 2023-07-14T23:32:52
| 194,140,521
| 1,053
| 190
|
MIT
| 2023-07-25T05:52:38
| 2019-06-27T17:56:02
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 2,409
|
py
|
mod_view.py
|
from homeassistant.components.frontend import add_extra_js_url
from .const import FRONTEND_SCRIPT_URL, SETTINGS_PANEL_URL
import logging
_LOGGER = logging.getLogger(__name__)
async def async_setup_view(hass):
# Serve the Browser Mod controller and add it as extra_module_url
hass.http.register_static_path(
FRONTEND_SCRIPT_URL,
hass.config.path("custom_components/browser_mod/browser_mod.js"),
)
add_extra_js_url(hass, FRONTEND_SCRIPT_URL)
# Serve the Browser Mod Settings panel and register it as a panel
hass.http.register_static_path(
SETTINGS_PANEL_URL,
hass.config.path("custom_components/browser_mod/browser_mod_panel.js"),
)
hass.components.frontend.async_register_built_in_panel(
component_name="custom",
sidebar_title="Browser Mod",
sidebar_icon="mdi:server",
frontend_url_path="browser-mod",
require_admin=False,
config={
"_panel_custom": {
"name": "browser-mod-panel",
"js_url": SETTINGS_PANEL_URL,
}
},
)
# Also load Browser Mod as a lovelace resource so it's accessible to Cast
resources = hass.data["lovelace"]["resources"]
if resources:
if not resources.loaded:
await resources.async_load()
resources.loaded = True
frontend_added = False
for r in resources.async_items():
if r["url"].startswith(FRONTEND_SCRIPT_URL):
frontend_added = True
continue
# While going through the resources, also preload card-mod if it is found
if "card-mod.js" in r["url"]:
add_extra_js_url(hass, r["url"])
if not frontend_added:
if getattr(resources, "async_create_item", None):
await resources.async_create_item(
{
"res_type": "module",
"url": FRONTEND_SCRIPT_URL + "?automatically-added",
}
)
elif getattr(resources, "data", None) and getattr(
resources.data, "append", None
):
resources.data.append(
{
"type": "module",
"url": FRONTEND_SCRIPT_URL + "?automatically-added",
}
)
|
9c7cc1911ba22e0c8b85ff1afe4962d67b80d394
|
2b64c18cd4e415568039b00efaf8b633c1d7e91e
|
/tests/test_vector.py
|
ede4a3d0cb6e04e1fd842117fe44fccf7f832925
|
[
"Apache-2.0",
"Python-2.0"
] |
permissive
|
edgedb/edgedb-python
|
3181294588f2904431200967303066fe01cd3b81
|
717cd76b76c44b3d0472f2895be56fca74970e96
|
refs/heads/master
| 2023-08-30T18:03:43.569471
| 2023-07-25T03:14:35
| 2023-07-25T03:14:35
| 139,201,883
| 320
| 43
|
Apache-2.0
| 2023-09-14T21:57:15
| 2018-06-29T22:37:25
|
Python
|
UTF-8
|
Python
| false
| false
| 4,065
|
py
|
test_vector.py
|
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2019-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from edgedb import _testbase as tb
import edgedb
import array
# An array.array subtype where indexing doesn't work.
# We use this to verify that the non-boxing memoryview based
# fast path works, since the slow path won't work on this object.
class brokenarray(array.array):
def __getitem__(self, i):
raise AssertionError("the fast path wasn't used!")
class TestVector(tb.SyncQueryTestCase):
def setUp(self):
super().setUp()
if not self.client.query_required_single('''
select exists (
select sys::ExtensionPackage filter .name = 'pgvector'
)
'''):
self.skipTest("feature not implemented")
self.client.execute('''
create extension pgvector;
''')
def tearDown(self):
try:
self.client.execute('''
drop extension pgvector;
''')
finally:
super().tearDown()
async def test_vector_01(self):
val = self.client.query_single('''
select <ext::pgvector::vector>[1.5,2.0,3.8]
''')
self.assertTrue(isinstance(val, array.array))
self.assertEqual(val, array.array('f', [1.5, 2.0, 3.8]))
val = self.client.query_single(
'''
select <json><ext::pgvector::vector>$0
''',
[3.0, 9.0, -42.5],
)
self.assertEqual(val, '[3, 9, -42.5]')
val = self.client.query_single(
'''
select <json><ext::pgvector::vector>$0
''',
array.array('f', [3.0, 9.0, -42.5])
)
self.assertEqual(val, '[3, 9, -42.5]')
val = self.client.query_single(
'''
select <json><ext::pgvector::vector>$0
''',
array.array('i', [1, 2, 3]),
)
self.assertEqual(val, '[1, 2, 3]')
# Test that the fast-path works: if the encoder tries to
# call __getitem__ on this brokenarray, it will fail.
val = self.client.query_single(
'''
select <json><ext::pgvector::vector>$0
''',
brokenarray('f', [3.0, 9.0, -42.5])
)
self.assertEqual(val, '[3, 9, -42.5]')
# I don't think it's worth adding a dependency to test this,
# but this works too:
# import numpy as np
# val = self.client.query_single(
# '''
# select <json><ext::pgvector::vector>$0
# ''',
# np.asarray([3.0, 9.0, -42.5], dtype=np.float32),
# )
# self.assertEqual(val, '[3,9,-42.5]')
# Some sad path tests
with self.assertRaises(edgedb.InvalidArgumentError):
self.client.query_single(
'''
select <ext::pgvector::vector>$0
''',
[3.0, None, -42.5],
)
with self.assertRaises(edgedb.InvalidArgumentError):
self.client.query_single(
'''
select <ext::pgvector::vector>$0
''',
[3.0, 'x', -42.5],
)
with self.assertRaises(edgedb.InvalidArgumentError):
self.client.query_single(
'''
select <ext::pgvector::vector>$0
''',
'foo',
)
|
d380ffef36a2fe81f031509bb77592c70121dc30
|
c30c2a2f2b8cf648175e1ae6f37c7834a08a8c51
|
/scripts/corehelper.py
|
edc115e7543191748ac2aaf43084582dd117b9f0
|
[
"BSD-3-Clause"
] |
permissive
|
sdnfv/openNetVM
|
bb411bf10075697a5f276cb517df7b8c631b56cf
|
20e01501355129bfa8f46d44f39ca2a8bbb516d0
|
refs/heads/master
| 2022-09-02T15:45:00.358618
| 2022-02-01T02:53:19
| 2022-02-01T02:53:19
| 56,240,513
| 276
| 153
|
NOASSERTION
| 2022-08-04T19:16:59
| 2016-04-14T13:40:21
|
C
|
UTF-8
|
Python
| false
| false
| 8,731
|
py
|
corehelper.py
|
#! /usr/bin/python3
# openNetVM
# https://github.com/sdnfv/openNetVM
#
# BSD LICENSE
#
# Copyright(c)
# 2015-2018 George Washington University
# 2015-2018 University of California Riverside
# 2010-2014 Intel Corporation.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Setup and display ONVM core list info """
import sys
import argparse
import os
ONVM_CONST_MGR_THRD = 3
sockets = []
cores = []
core_map = {}
onvm_mgr_corelist = []
onvm_nfs_corelist = []
### Code from Intel DPDK ###
"""
This function reads the /proc/cpuinfo file and determines the CPU
architecture which will be used to determine corelists for each
openNetVM NF and the manager.
From Intel DPDK usertools/cpu_layout.py
"""
def dpdk_cpu_info():
global core_map
global cores
global core_map
fd=open("/proc/cpuinfo")
lines = fd.readlines()
fd.close()
core_details = []
core_lines = {}
for line in lines:
if len(line.strip()) != 0:
name, value = line.split(":", 1)
core_lines[name.strip()] = value.strip()
else:
core_details.append(core_lines)
core_lines = {}
for core in core_details:
for field in ["processor", "core id", "physical id"]:
if field not in core:
print("Error getting '%s' value from /proc/cpuinfo" % field)
sys.exit(1)
core[field] = int(core[field])
if core["core id"] not in cores:
cores.append(core["core id"])
if core["physical id"] not in sockets:
sockets.append(core["physical id"])
key = (core["physical id"], core["core id"])
if key not in core_map:
core_map[key] = []
core_map[key].append(core["processor"])
"""
Print out CPU architecture info.
From Intel DPDK usertools/cpu_layout.py
"""
def dpdk_cpu_info_print():
global core_map
global cores
global core_map
max_processor_len = len(str(len(cores) * len(sockets) * 2 - 1))
max_core_map_len = max_processor_len * 2 + len('[, ]') + len('Socket ')
max_core_id_len = len(str(max(cores)))
print("")
print("============================================================")
print("Core and Socket Information (as reported by '/proc/cpuinfo')")
print("============================================================\n")
print("cores = ",cores)
print("sockets = ", sockets)
print("")
print(" ".ljust(max_core_id_len + len('Core ')))
for s in sockets:
print("Socket %s" % str(s).ljust(max_core_map_len - len('Socket ')))
print("")
print(" ".ljust(max_core_id_len + len('Core ')))
for s in sockets:
print("--------".ljust(max_core_map_len))
print("")
for c in cores:
print("Core %s" % str(c).ljust(max_core_id_len))
for s in sockets:
print(str(core_map[(s,c)]).ljust(max_core_map_len))
print("\n")
### End Intel DPDK Codeblock ###
"""
This function uses the information read from /proc/cpuinfo and determines
the corelists for each openNetVM process: the manager and each NF.
"""
def onvm_corelist():
global core_map
global cores
global core_map
global onvm_mgr_corelist
global onvm_nfs_corelist
core_index = 0
total_cores = len(cores)
# Run calculations for openNetVM corelists
# ONVM Manager defaults to three threads 1x RX, 1x TX, 1x stat
while core_index < ONVM_CONST_MGR_THRD:
core = core_map.get((0, core_index), None)
if core is None:
print("Not enough cores: onvm requires {} to run manager. (You have {})".format(ONVM_CONST_MGR_THRD, len(core_map)))
sys.exit(1)
onvm_mgr_corelist.append(core)
core_index += 1
while core_index < total_cores:
onvm_nfs_corelist.append(core_map[(0, cores[core_index])])
core_index += 1
"""
Reads the output of lscpu to determine if hyperthreading is enabled.
"""
def onvm_ht_isEnabled():
lscpu_output = os.popen('lscpu -p').readlines()
for line in lscpu_output:
try:
line_csv = line.split(',')
phys_core = int(line_csv[0])
logical_core = int(line_csv[1])
if phys_core != logical_core:
return True
except ValueError:
pass
return False
"""
Print out openNetVM corelist info
"""
def onvm_corelist_print():
global onvm_mgr_corelist
global onvm_nfs_corelist
onvm_print_header()
if onvm_ht_isEnabled():
print("This script only works if hyperthreading is disabled.")
print("Run no_hyperthread.sh to disable hyperthreading before ")
print("running this script again.")
print("")
sys.exit(1)
print("** MAKE SURE HYPERTHREADING IS DISABLED **")
print("")
print("openNetVM requires at least three cores for the manager:")
print("one for NIC RX, one for statistics, and one for NIC TX.")
print("For rates beyond 10Gbps it may be necessary to run multiple TX")
print("or RX threads.")
print("")
print("Each NF running on openNetVM needs its own core too.")
print("")
print("Use the following information to run openNetVM on this system:")
print("")
mgr_corelist=""
for c in onvm_mgr_corelist:
for i in c:
mgr_corelist += "%s," %(i)
print("\t- openNetVM Manager corelist: %s" %(mgr_corelist[:len(mgr_corelist)-1]))
print("")
print("\t- openNetVM can handle %d NFs on this system" %(len(onvm_nfs_corelist)))
for i, cores in enumerate(onvm_nfs_corelist, 1):
print("\t\t- NF %d:" %i)
for c in cores:
print("%s" %(c))
def onvm_print_header():
print("===============================================================")
print("\t\t openNetVM CPU Corelist Helper")
print("===============================================================")
print("")
"""
Function contains program execution sequence
"""
def run():
if args.all:
dpdk_cpu_info()
onvm_corelist()
dpdk_cpu_info_print()
onvm_corelist_print()
elif args.onvm:
dpdk_cpu_info()
onvm_corelist()
onvm_corelist_print()
elif args.cpu:
dpdk_cpu_info()
dpdk_cpu_info_print()
else:
print("You supplied 0 arguments, running with flag --onvm")
print("")
dpdk_cpu_info()
onvm_corelist()
onvm_corelist_print()
if __name__ == "__main__":
### Set up arg parsing
parser = argparse.ArgumentParser(description='openNetVM corelist helper script')
parser.add_argument("-o", "--onvm",
action="store_true",
help="[Default option] Display openNetVM corelist information.")
parser.add_argument("-c", "--cpu",
action="store_true",
help="Display CPU architecture only.")
parser.add_argument("-a", "--all",
action="store_true",
help="Display all CPU information.")
parser.add_argument("-v", "--verbose",
action="store_true",
help="Verbose mode displays detailed corelist info.")
args = parser.parse_args()
# Function call to run program
run()
|
3c67659b49196882f82c4106a832ba28d29f050c
|
bbd69601912a3361d788efd03a47f9d4e3bac09e
|
/demo/agw/ShortcutEditor.py
|
d4007e94f813d32e68a3a5b02a86b5f57fa2e72d
|
[] |
no_license
|
wxWidgets/Phoenix
|
56929484460a0399a8f1d9582bc77c20aa14748d
|
a1184286703cf24c4b88e5bc14cf2979c1b1ea00
|
refs/heads/master
| 2023-09-01T07:10:17.437093
| 2023-08-31T05:38:01
| 2023-08-31T05:38:01
| 5,078,061
| 2,268
| 677
| null | 2023-09-09T17:06:59
| 2012-07-17T06:22:25
|
Python
|
UTF-8
|
Python
| false
| false
| 13,965
|
py
|
ShortcutEditor.py
|
#!/usr/bin/env python
import wx
import os
import sys
import string
import random
from images import catalog
import images
from wx.lib.embeddedimage import PyEmbeddedImage
try:
dirName = os.path.dirname(os.path.abspath(__file__))
except:
dirName = os.path.dirname(os.path.abspath(sys.argv[0]))
sys.path.append(os.path.split(dirName)[0])
try:
from agw import shortcuteditor as SE
except ImportError: # if it's not there locally, try the wxPython lib.
import wx.lib.agw.shortcuteditor as SE
SE_DIR = os.path.split(SE.__file__)[0]
HTML_HELP = os.path.join(SE_DIR, 'data', 'default_help_text.html')
TOP_MENUS = ['File', 'Edit', 'View', 'Options', 'Window', 'Help']
COMBINATIONS = string.ascii_uppercase + string.digits
COMBINATIONS = [c for c in COMBINATIONS] + list(SE.KEYMAP.values())
ACCEL_IDS = wx.NewIdRef(6)
_ = wx.GetTranslation
#----------------------------------------------------------------------
_accelerators = PyEmbeddedImage(
b"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9i"
b"ZSBJbWFnZVJlYWR5ccllPAAAAUNJREFUeNqkkrFqg1AUho+idLKCEgSnvoBkUiwBoU4tGXyJ"
b"Lpmy+QI+gUuzFbK5OhSKQzZBfIlACCEiplAHkQj2XFNDaYOJ6YHPH7n85z/ncqm6ruE/xZDP"
b"eDzu66ORELmnrwy+Q9K2U9+6RZ6Rt+MKvw6nyCNy09HkHXk91cBGPpAn5PPiS/wuFnlATKS8"
b"dB9qPp+/oE6uvMwZU1XVxDRNyLIMBEGA3W53VkmJogiLxWJC7/d7WK/XMBwOYbVanVVFURqI"
b"h3gp13VrXdchTdNesw8GA4iiCOiyLIHneSiKAgzD6NTRaASqqgIJ3G63QLyU4zi1pmmQJEln"
b"IsdxkOf58V+SJIjjGBjShZgtywLP8xolu/0slmXB9/3mrNUgCA4T2LbdTLDZbP6knJqgVVmW"
b"DxNg2iwMw97vYLlcNu/gS4ABALx5qLfCRWM1AAAAAElFTkSuQmCC")
#----------------------------------------------------------------------
_edit = PyEmbeddedImage(
b"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAABmJLR0QAAAAAAAD5Q7t/AAAA"
b"CXBIWXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH1QsOEh8bQbXRcgAAAhxJREFUKM91ks1rE1EU"
b"xc99H5l8m2lolKRuqtamKohFChahYF0IfoBSF4IbwY1/gv+DK/cuXLgTxaWIXSjSjWKEFgm4"
b"aGvaJo1NJpm0eTPvzXPRmrZgz+7C+d3DPVz6WvmVTMTwT9tKK2W9le8Ta08VTXei0fL9R8nC"
b"iYFBJJPO2VPFwRyEkbLMazwrnvvJTM5frP3+MDz24PHAwGAtDohLZpt113nHNoH5he6nL+mx"
b"8wcNDIdFAG++imMVLaCFbzT+vtrdanWOBEyny9sveQjssLUa712aq7e9xaXqkQC8ea4r6Mds"
b"nTZKV07O3iu4KSJ7JGCXn8tMAn685WX98dtDI0UOwxj9HzAbCzxeRS+NjrOVnnBn5hJWZdOZ"
b"Rr0BIAiCw4Ax0doLFmdQWb+dbo3ccIslwRhx8trtMAybzT+9Xk/shikLbFSEs0zaBZzGjjN8"
b"92Hgt8NAdbyOUkprDaDb9cXu9igCTB9RCfGMv15r5qfjEVebTUYUGUNEAIhICM4G9euQuJw1"
b"+nplyQ3PzGirTWSFFAQCkRAilzuWz+f3EiTHcn37x8fPpUSKrj5JjZZ1fxuwXLBkKiV4KKWU"
b"Uu4dvVtyf0evV9fl5J38hSkE/ZiQrpstFI4LyYul0v7zEWcEcKA8OXX64mWeG9L9MMxlpBOL"
b"jI3FOOeCs/0yxepK7c3rt1wwYhJEkQlguYWxFjayjJHndW/dvDYA/gKtTuQVCWY6EAAAAABJ"
b"RU5ErkJgggo=")
#----------------------------------------------------------------------
_file = PyEmbeddedImage(
b"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQBAMAAADt3eJSAAAAB3RJTUUH0AwNAR44FPFuJQAA"
b"AAlwSFlzAAAK8AAACvABQqw0mAAAADBQTFRFAP8Af39/UFBQz8/P4ODg////8PDw///wn5+f"
b"MDAwgICAAAAAAAAAAAAAAAAAAAAAZbZ2KAAAAAF0Uk5TAEDm2GYAAABRSURBVHjaY2BcBQQL"
b"GBgYGEOBoAnECC8vL29XgDI0Z0IY6RZCYEZZmXMTRCTFxQjMSHeBMNLdoIySFCgDKABmhKVB"
b"GS4uUIYxCIAYSmDAAAcA0SwdEWLFwocAAAAASUVORK5CYII=")
#----------------------------------------------------------------------
_help = PyEmbeddedImage(
b"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAC+ElEQVR4nGWTP2xbVRjFf9+9"
b"9/01TgwJCQkMbkMKqCkEKGoXBpiQEqkpZUL8y8BeRqYwMMBQRMWMGtQgJCSKYWCohARIVCCo"
b"MJUgLbSipVERTuzYcexn+713L4OlirafdLZzznC+c4TbbnZxrSTIEkoeQ9S8iICob0WkevHM"
b"C5Xb+XKLeOH08WIxXnloZqoUhSFRFDHIcmr1XerNDts7navWuTfWPz1SucNgduH0qfm58mt7"
b"y/ezfq1LrZmR2SHFaAg9QTtLo1WnnybLv3+yuHrTYHZh7a1DT8ysFEfH+eVyh73TEa8vTvL0"
b"o0WsdXzz6w6nzm5x5cYALdDtNMgG3aO/ffxcRWYX18pTE6W/Dj7+CN9daDM17lN5+2GsteS5"
b"w1qLc44b9ZSXTlxHRHDOkrRqTWvzPXp837GVw0/OHl7fyOiljt2eJQ4U9VbGiTM1HLBn0iP2"
b"hR8v92n1QGmNaB3m6eCS8QNvSZmI7XYXRECED76skTshs6C18OyBGOccm7uOTjrMLNQRottH"
b"zOhIoVxrpsM0BPqpo9vJEa15YMLnzWNjWGs590efRg/8yABQUJB0dclYB71BjnWwvZORI3i+"
b"RnuKd16ZIA6EK/9mnPy6QxB7KDV8XDFw1BsGM0hzBMfmdooTwfgKZRQLB+9iZtJgrePD7xNS"
b"ZQgChdIKgJGCRZRGdZJBpd1OsM4hSlB6iKl7DM45nHNc2nQEoSGIPMLYY2TEIwxAtKkaRH3R"
b"au8uFcNRulZQaojKzwn7pn22EjC+xgs0fuhhfE15DP5cbyFKf6Qufvb8atJPqpHOMQKIIEo4"
b"+lTMoRmfhTmfuWmD9jReqJm+10ORs/FPv3L+/QNVBeBwy4O01QzE3uz2hesp3QFs7MDfTYdR"
b"cN+oUPIyzv3QqIrSy7dsYf+LX82jzOe5GS3rsEgcGeKCR6FouLvkMVYybDV6XNtIqoNMnvnp"
b"3Qebd6xx7uWzJZQ6Ltp71XhBOS7EhJEhzS27SV4VbU6ef2//6v81/wH6bjI8fK9HXAAAAABJ"
b"RU5ErkJggg==")
#----------------------------------------------------------------------
_options = PyEmbeddedImage(
b"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABmJLR0QA/wD/AP+gvaeTAAAA"
b"CXBIWXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH1QkaDBM5i2PCSAAAAfBJREFUOMulkktoE2EU"
b"hb+Z+EyKTRQKgkqwzMaFtt1FrC40FGJm60JwIVSkqLUtElICFQNDQqBrQXRlQIriwomN0GJX"
b"gtI2iUkXFYJVadOXhiBERDozbmaGMR3rwrP7ueece++5P/wnBOcjnVGigArI8Vgi9xdNNJ1R"
b"bI7YUlT7r/YDqKaZq/j6tQHNbLQd6YxiNBp1I51RDPdaw6pFAcR0RolaZKur19vmZhwFePDw"
b"PvFYQgZyACKgDt4cMp4+mzAA9fatETbX15A6Jer1r/das4ndGRUsMYBgFW8MDBqatiXoum7o"
b"ukZhfk4ovC8CyDsFK7R0sBHpu0i5UmG59gUgGY8l7v7zjE68yr80SpUS3Sd7KJYLmBNMArqr"
b"QTCSOgzUrPeVkE7XCYmjR47RbDZ5N/cWtzU8TvH4cJi+UCcdAS/ZmU2Ot39LLn1eOtd9qoeA"
b"P8BKbfnyhfD5+emp11XAABCDkVQXUHs0JjNbXmS2vEjHQR8A5t5yLv8CSZI4e7rX+mR2HiJQ"
b"HB8OM/WmxJamI+7zs1Fv2iOaI8vZJ4850O7nTKgXYMxpAMDuXR72+A7x88cvsvkFgHCrSS6v"
b"Uv1Y/SNsEWBl4zv7fQHa9np4PvMBIPxpcnTaSTRNkmvrqwtA0r5CMJK6BEw4uNvEO+E3N+LV"
b"9uq8VLwAAAAASUVORK5CYII=")
#----------------------------------------------------------------------
_view = PyEmbeddedImage(
b"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAAAB3RJTUUH2QMSDgcQHD2pKgAA"
b"AAlwSFlzAAALEgAACxIB0t1+/AAAAARnQU1BAACxjwv8YQUAAACWUExURb7Dxvz8/ObXxs+0"
b"mK6UZb6JXMGZf9rBpezn39PT07unkOq0iuvBotSdZ/7+/trZ2eng1uvr5O7u7v///ubMsNDe"
b"7ae+23Og32aX3Y+z58va6vHx8fT09Nzm8Zm66nql4+Dg3/j4+LXN8IOs5vf39/v7+6fE7g8U"
b"G8iUWdXGwMqldOvUtr/Jzunp6ayfnaSps5qhs9/NuBN0LcUAAAABdFJOUwBA5thmAAAAqUlE"
b"QVR42o2PSxaCMBAEx4kQZIKGGAQEE8NXRUHufzkRL2Dt6vWmGuAfXIzzrLWW7ucKh2HgjAsh"
b"gq+/cGDhOE3v0SfCdQ+fXWdMXlecKACp723Vmb6vbd0iPUDLpu1M3vc2tW0jb3BVpavy2to0"
b"M65UDFAW5cUsmtmkLJAg1hio5JRm+bmIdcQADlygjNUCUuQfl5BdSLRECYo8tl9TN8i2nuf5"
b"PPjr6Qc/LA45I8MgVQAAAABJRU5ErkJggg==")
#----------------------------------------------------------------------
_window = PyEmbeddedImage(
b"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAKnRFWHRDcmVhdGlvbiBUaW1l"
b"AERpIDQgTXJ6IDIwMDMgMDA6MjQ6MDQgKzAxMDDdSQ6OAAAAB3RJTUUH0woGFAwIjuRPzgAA"
b"AAlwSFlzAAAK8AAACvABQqw0mAAAAARnQU1BAACxjwv8YQUAAAEVSURBVHjaY2SY/tuDgYll"
b"OwMp4OqOKIZJntuArM+MDDP//59pTLzeJQ8ZGA6/ATLSGT2A5AkWkGD6WSDx5g1xJvDwMDBw"
b"cIBYlkB8C2zANdvrKGr+/mVg+P0bQv/5g8AgsT9/XjN4PdYEKRMEYjawAZqamigGgBT/+gXR"
b"AKORMcNjsDJGEMFEUuBhAdQ34N8/VEwIsMD8jGwAiA8KQBgbn6FgA0ABhWwAsub//xF8nAaA"
b"QxbNCzBXwFwAYoMMA2G8LoB5CaQQZggMwwzD6wJ0b6C7AoSh4D/EgGu7IqUZ3JaTFHcdFkuA"
b"5HuQ40GpiR+ILRggaRuUPBkJaP8P1XwciE8wQtMCLxALAzErkW4AefotEH8GAEJMrcAWjkHy"
b"AAAAAElFTkSuQmCC")
#----------------------------------------------------------------------
def GetValidMenuImages():
valid_images = []
counter = 0
for key in catalog:
bmp = catalog[key].GetBitmap()
if bmp.GetWidth() == 16 and bmp.GetHeight() == 16:
valid_images.append(bmp)
return valid_images
#----------------------------------------------------------------------
class ShortcutEditorDemo(wx.Frame):
def __init__(self, parent, log):
wx.Frame.__init__(self, parent, -1, 'ShortcutEditor wxPython Demo :-D', size=(900, 800))
self.log = log
self.valid_images = GetValidMenuImages()
self.used_shortcuts = []
self.SetIcon(images.Mondrian.GetIcon())
self.MakeMenuBar()
self.MakeAcceleratorTable()
dlg = SE.ShortcutEditor(self)
dlg.FromMenuBar(self)
dlg.FromAcceleratorTable(self.accelTable)
self.AddTopMenuBitmaps(dlg)
dlg.Bind(SE.EVT_SHORTCUT_CHANGING, self.OnShortcutChanging)
dlg.Bind(SE.EVT_SHORTCUT_CHANGED, self.OnShortcutChanged)
self.CenterOnScreen()
self.Show()
self.Raise()
wx.CallAfter(self.ShowDialog, dlg)
def MakeMenuBar(self):
bar = wx.MenuBar()
top_menus = []
for title in TOP_MENUS:
menu = wx.Menu()
self.AppendMenus(menu, title)
bar.Append(menu, title)
top_menus.append(menu)
self.SetMenuBar(bar)
def MakeAcceleratorTable(self):
table = []
saved_table = []
for i in range(6):
name = 'Accelerator %d'%(i+1)
choice = random.choice(list(SE.ACCELERATORS))
if choice == wx.ACCEL_ALT:
letter = random.choice(COMBINATIONS)
if len(letter) > 1:
inv_keyMap = dict(zip(SE.KEYMAP.values(), SE.KEYMAP.keys()))
wxk = inv_keyMap[letter]
else:
wxk = ord(letter)
else:
wxk = random.choice(list(SE.KEYMAP))
accel = (choice, wxk, ACCEL_IDS[i])
saved_accel = (name, choice, wxk, ACCEL_IDS[i])
self.Bind(wx.EVT_MENU, self.OnAcceleratorShortcuts, id=ACCEL_IDS[i])
table.append(accel)
saved_table.append(saved_accel)
self.accelTable = saved_table
self.SetAcceleratorTable(wx.AcceleratorTable(table))
def AppendMenus(self, top_menu, title, recursive=''):
num_menus = random.randint(2, 7)
for index in range(num_menus):
shortcut = self.CreateShortcut()
sub_menu = wx.MenuItem(top_menu, -1, '%s%sItem %d%s'%(recursive, title, index+1, shortcut),
'Help for %s%sItem %d'%(recursive, title, index+1))
if random.randint(0, 1) == 1:
# Get a random image for the menu
bmp = random.choice(self.valid_images)
sub_menu.SetBitmap(bmp)
self.Bind(wx.EVT_MENU, self.OnMenuShortcuts, id=sub_menu.GetId())
if random.randint(0, 10) == 5 and not recursive:
# Append a sub-sub-menu
dummy_menu = wx.Menu()
recursive = 'Sub-'
self.AppendMenus(dummy_menu, title, recursive)
dummy_item = top_menu.AppendSubMenu(dummy_menu, 'Sub ' + title)
if random.randint(0, 1) == 1:
# Get a random image for the menu
bmp = random.choice(self.valid_images)
dummy_item.SetBitmap(bmp)
recursive = ''
top_menu.Append(sub_menu)
if random.randint(0, 1) == 1 and index < num_menus - 1:
# Append a separator
top_menu.AppendSeparator()
def CreateShortcut(self):
rand = random.randint(0, 3)
if rand == 0:
# No shortcut
return ''
letter = random.choice(COMBINATIONS)
shortcut = '\t%s+' + letter
if rand == 1:
# Ctrl + character
modifier = 'Ctrl'
elif rand == 2:
# Shift + character
modifier = 'Shift'
else:
# Ctrl + Shift + character
modifier = 'Ctrl+Shift'
shortcut = shortcut % modifier
if shortcut in self.used_shortcuts:
return self.CreateShortcut()
self.used_shortcuts.append(shortcut)
return shortcut
def AddTopMenuBitmaps(self, dlg):
manager = dlg.GetShortcutManager()
for child in manager.children:
name = child.label.lower()
bitmap = eval('_%s'%name).GetBitmap()
child.SetBitmap(bitmap)
def ShowDialog(self, dlg):
answer = dlg.ShowModal()
if answer == wx.ID_CANCEL:
dlg.Destroy()
return
dlg.ToMenuBar(self)
dlg.ToAcceleratorTable(self)
dlg.Destroy()
def OnMenuShortcuts(self, event):
itemId = event.GetId()
menu = event.GetEventObject()
menuItem = menu.FindItemById(itemId)
label = menuItem.GetItemLabel()
label, accel = label.split('\t')
self.log.write('You have selected the shortcut for %s (%s)'%(label, accel))
def OnAcceleratorShortcuts(self, event):
itemId = event.GetId()
for label, choice, accel, ids in self.accelTable:
if ids == itemId:
self.log.write('You have selected the accelerator for %s (%s)'%(label, accel))
break
def OnShortcutChanging(self, event):
shortcut = event.GetShortcut()
oldAccel = event.GetOldAccelerator()
newAccel = event.GetAccelerator()
self.log.write('Shortcut for "%s" changing from "%s" to "%s"'%(shortcut.label, oldAccel, newAccel))
event.Skip()
def OnShortcutChanged(self, event):
shortcut = event.GetShortcut()
newAccel = event.GetAccelerator()
self.log.write('Shortcut for "%s" changed to "%s"'%(shortcut.label, newAccel))
#---------------------------------------------------------------------------
class TestPanel(wx.Panel):
def __init__(self, parent, log):
self.log = log
wx.Panel.__init__(self, parent, -1)
b1 = wx.Button(self, -1, " Run ShortcutEditor ", (50, 50))
self.Bind(wx.EVT_BUTTON, self.OnButton1, b1)
def OnButton1(self, event):
self.win = ShortcutEditorDemo(self, self.log)
#----------------------------------------------------------------------
def runTest(frame, nb, log):
win = TestPanel(nb, log)
return win
#----------------------------------------------------------------------
with open(HTML_HELP, 'rt') as fid:
overview = fid.read()
if __name__ == '__main__':
import run
run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
|
e8fdd79b491b0c9d91ef20c78a333279496fecf2
|
0e76737364820dd3c90e6a60725a837aed2a435e
|
/usb3_pipe/core.py
|
0ff2abb556bbdbfcec1cc466f7796841e2bf0921
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
enjoy-digital/usb3_pipe
|
4f40de8cdc443a1a60a844d25ebbb0a2c1c6ea6d
|
d2847d9501aa2c4995449362b99d503663e6b192
|
refs/heads/master
| 2022-05-28T09:38:32.624262
| 2022-05-03T07:10:00
| 2022-05-03T07:10:00
| 208,852,968
| 148
| 30
|
BSD-2-Clause
| 2022-05-03T07:10:01
| 2019-09-16T17:00:16
|
Verilog
|
UTF-8
|
Python
| false
| false
| 3,356
|
py
|
core.py
|
#
# This file is part of USB3-PIPE project.
#
# Copyright (c) 2019-2020 Florent Kermarrec <florent@enjoy-digital.fr>
# SPDX-License-Identifier: BSD-2-Clause
from migen import *
from litex.soc.interconnect import stream
from usb3_pipe.common import *
from usb3_pipe.lfps import LFPSUnit
from usb3_pipe.training import TSUnit
from usb3_pipe.ltssm import LTSSM
from usb3_pipe.scrambling import Scrambler, Descrambler
# USB3 PIPE ----------------------------------------------------------------------------------------
@ResetInserter()
class USB3PIPE(Module):
"""USB3.0 PIPE Core
Wrap an FPGA transceiver exposing 2 TX and RX data/ctrl streams into a USB3.0 PIPE by adding:
- LFPS detection/generation.
- Training Sequence Ordered Sets detection/generation.
- Clock compensation Ordered Sets removing/insertion.
- Convertion to/from a 32-bit/4-bit data/ctrl stream.
- Clock domain crossing to/from sys_clk (>=125MHz).
- RX words alignment.
- TX scrambling/RX descrambling.
- Link Training State Machine.
"""
def __init__(self, serdes, sys_clk_freq, with_endianness_swap=True):
assert sys_clk_freq >= 125e6
self.ready = Signal() # o
self.sink = stream.Endpoint([("data", 32), ("ctrl", 4)])
self.source = stream.Endpoint([("data", 32), ("ctrl", 4)])
# # #
# Endianness Swap --------------------------------------------------------------------------
if with_endianness_swap:
sink = stream.Endpoint([("data", 32), ("ctrl", 4)])
source = stream.Endpoint([("data", 32), ("ctrl", 4)])
sink_swap = EndiannessSwap(self.sink, sink)
source_swap = EndiannessSwap(source, self.source)
self.submodules += sink_swap, source_swap
else:
sink = self.sink
source = self.source
# LFPS -------------------------------------------------------------------------------------
lfps = LFPSUnit(serdes=serdes, sys_clk_freq=sys_clk_freq)
self.submodules.lfps = lfps
# TS----------------------------------------------------------------------------------------
ts = TSUnit(serdes=serdes)
self.submodules.ts = ts
# LTSSM ------------------------------------------------------------------------------------
ltssm = LTSSM(serdes=serdes, lfps_unit=lfps, ts_unit=ts, sys_clk_freq=sys_clk_freq)
self.submodules.ltssm = ltssm
self.comb += self.ready.eq(ltssm.polling.idle | ltssm.polling.recovery)
# Scrambling -------------------------------------------------------------------------------
scrambler = Scrambler()
scrambler = ResetInserter()(scrambler)
self.comb += scrambler.reset.eq(~ltssm.polling.tx_ready)
self.submodules.scrambler = scrambler
self.comb += [
sink.connect(scrambler.sink),
If(ltssm.polling.tx_ready, scrambler.source.connect(serdes.sink))
]
descrambler = Descrambler()
self.submodules.descrambler = descrambler
self.comb += [
serdes.source.connect(descrambler.sink, keep={"data", "ctrl"}),
If(ltssm.polling.rx_ready, serdes.source.connect(descrambler.sink, omit={"data", "ctrl"})),
descrambler.source.connect(source),
]
|
6a32aea70b5fe9834b6b72348b7a723e4a039728
|
8743cb01e02a5cf45d5e40d59fd04a40e68139b6
|
/generated_code/memlayout.py
|
393c313066ff6a8d8f3f88da89c2f91b401b08d1
|
[
"BSD-3-Clause"
] |
permissive
|
SeisSol/SeisSol
|
a2c9ae29021251db5f811c343762e0699756b39d
|
7fde47786f10aebbb4225f4c5125829ea9b541a1
|
refs/heads/master
| 2023-09-01T08:57:51.863085
| 2023-08-31T07:29:26
| 2023-08-31T07:29:26
| 21,459,883
| 227
| 129
|
BSD-3-Clause
| 2023-09-13T10:06:09
| 2014-07-03T11:17:47
|
C++
|
UTF-8
|
Python
| false
| false
| 4,439
|
py
|
memlayout.py
|
#! /usr/bin/env python
##
# @file
# This file is part of SeisSol.
#
# @author Carsten Uphoff (c.uphoff AT tum.de, http://www5.in.tum.de/wiki/index.php/Carsten_Uphoff,_M.Sc.)
#
# @section LICENSE
# Copyright (c) 2016, SeisSol Group
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# @section DESCRIPTION
#
import os
import arch
import re
class Candidate(object):
"""A Candidate measures if a memory layout is suitable
for a build configuration. If a build configuration
shares an attribute with a Candidate, the Candidate
gets a higher score.
The scoring system is chosen such that the best
Candidate is unique (i.e. 2**Importance).
"""
IMPORTANCE = {'precision': 1, 'equations': 2, 'order': 3, 'pe': 4, 'multipleSimulations': 5}
def __init__(self, atts):
self.atts = atts
def score(self, reqs):
sc = 0
for key,val in reqs.items():
if key in self.atts:
if val == self.atts[key]:
sc += 2**self.IMPORTANCE[key]
else: # requirement not satisifed
return 0
return sc
def __repr__(self):
return repr(self.atts)
def findCandidates(search_path):
"""Determine Candidate attributes from file name."""
archs = arch.getArchitectures()
pes = [arch.getCpu(a) for a in archs]
candidates = dict()
for c in os.listdir(search_path):
name, ext = os.path.splitext(c)
atts = dict()
for att in name.split('_'):
multipleSimulations = re.match('ms([0-9]+)', att)
order = re.match('O([0-9]+)', att)
if multipleSimulations:
atts['multipleSimulations'] = int(multipleSimulations.group(1))
elif order:
atts['order'] = int(order.group(1))
elif att.lower() in ['s', 'd']:
atts['precision'] = att.lower()
elif att.lower() in pes:
atts['pe'] = att.lower()
else:
atts['equations'] = att
candidates[c] = Candidate(atts)
return candidates
def guessMemoryLayout(env):
script_dir = os.path.dirname(os.path.abspath(__file__))
path = os.path.join(script_dir, '..', 'auto_tuning', 'config')
if 'gpu' in env['targets']:
print('INFO: Found gpu as a target. Memory layout will fall back to all dense')
return os.path.join(path, 'dense.xml')
# from least to most
importance = ['precision', 'equations', 'order', 'pe', 'multipleSimulations']
values = {
'precision': env['arch'][0].lower(),
'equations': env['equations'].lower(),
'order': int(env['order']),
'pe': arch.getCpu(env['arch']),
'multipleSimulations': int(env['multipleSimulations'])
}
candidates = findCandidates(search_path=path)
bestFit = max(candidates.keys(), key=lambda key: candidates[key].score(values))
bestScore = candidates[bestFit].score(values)
if bestScore == 0:
print('WARNING: No suitable memory layout found. (Will fall back to all dense.)')
bestFit = 'dense.xml'
print('Using memory layout {}'.format(bestFit))
return os.path.join(path, bestFit)
|
cb01979c4ae2328cf1964c73986aeb2ec897080b
|
39568e19301a7a112398be542154950af25591de
|
/util/dvsim/qsubopts.py
|
c94c1c1e8055eb1c0700d276113ff0a377ee280e
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
lowRISC/opentitan
|
493995bc7cf7cb3aee486a5203af3fd62bba3bfc
|
51f6017b8425b14d5a4aa9abace8fe5a25ef08c8
|
refs/heads/master
| 2023-08-31T22:05:09.425796
| 2023-08-14T14:52:15
| 2023-08-31T20:31:13
| 204,516,692
| 2,077
| 634
|
Apache-2.0
| 2023-09-14T21:16:21
| 2019-08-26T16:30:16
|
SystemVerilog
|
UTF-8
|
Python
| false
| false
| 96,486
|
py
|
qsubopts.py
|
#!/usr/bin/env python
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
# -*- coding: utf-8 -*-
# ----------------------------------
# qsubOptions Class
# ----------------------------------
"""A helper class designed to handle the managment of options and
positional arguments to qsub and related Grid Engine executables.
Contains functions to write the requested execution string either
to the command line or to a script file.
"""
import argparse
class qsubOptions():
"A data type meant to collect qsub options. See man qsub for information"
def __init__(self, optstring='', prog='qsub'):
# Which SGE command are we going to work with?
self.prog = prog
sge_program_names = [
'qsub', 'qrsh', 'qsh', 'qlogin', 'qalter', 'qresub', 'qmake'
]
assert self.prog in sge_program_names, 'Unsupported SGE command: ' + prog + \
'not one of ' + ', '.join(sge_program_names)
if prog == 'qmake' and '-pe' in optstring:
prog = 'qsub'
else:
prog = 'qrsh'
# SUPPRESS = If not specified, do not generate variable in namespace
self.parser = argparse.ArgumentParser(
description='Options to pass to qsub',
formatter_class=argparse.RawTextHelpFormatter,
argument_default=argparse.SUPPRESS,
epilog="""The following is scraped from the qsub manpage for GE \
6.2u5 dated 2009/12/01 12:24:06""")
# BEGIN SGE OPTION PARSER
# BUG if help still begins with a line with -option, have cosmetic bug where
# metavar cannot be specified correctly
yesno = ['y', 'yes', 'n', 'no']
if prog in ['qsub', 'qrsh', 'qsh', 'qlogin']:
self.parser.add_argument('-@',
metavar='optionfile',
help="""\
Forces qsub, qrsh, qsh, or qlogin to use the options contained
in optionfile. The indicated file may contain all
valid options. Comment lines must start with a "#" sign.""")
if prog in ['qsub', 'qalter']:
self.parser.add_argument('-a',
metavar='date_time',
help="""\
Available for qsub and qalter only.
Defines or redefines the time and date at which a job is eligible
for execution. Date_time conforms to [[CC]]YY]MMDDhhmm[.SS],
for the details, please see Date_time in: sge_types(1).
If this option is used with qsub or if a corresponding value is specified
in qmon then a parameter named a and the value in the format CCYYMMDDhhmm.SS
will be passed to the defined JSV instances (see -jsv option below or
find more information concerning JSV in jsv(1))""")
if prog in ['qsub', 'qsh', 'qrsh', 'qlogin', 'qalter']:
self.parser.add_argument('-ac',
metavar='variable[=value]',
action='append',
help=""" -ac variable[=value],...
Available for qsub, qsh, qrsh, qlogin and qalter only.
Adds the given name/value pair(s) to the job's context. Value may be omitted.
Grid Engine appends the given argument to the list of context variables for the job.
Multiple -ac, -dc, and -sc options may be given. The order is important here.
The outcome of the evaluation of all -ac, -dc, and -sc options or
corresponding values in qmon is passed to defined JSV instances as parameter
with the name ac. (see -jsv option below or find more information concerning
JSV in jsv(1)) QALTER allows changing this option even while the job executes."""
)
if prog in ['qsub', 'qalter', 'qrsh', 'qsh', 'qlogin']:
self.parser.add_argument('-ar',
metavar='ar_id',
help="""\
Available for qsub, qalter, qrsh, qsh, or qlogin only.
Assigns the submitted job to be a part of an existing Advance Reservation.
The complete list of existing
Advance Reservations can be obtained using the qrstat(1) command.
Note that the -ar option adds implicitly the -w e option if not otherwise requested.
Qalter allows changing this option even while the job executes.
The modified parameter will only be in effect
after a restart or migration of the job however.
If this option or a corresponding value in qmon is specified
then this value will be passed to defined JSV instances as parameter
with the name ar. (see -jsv option below or find more information
concerning JSV in jsv(1))""")
if prog in ['qsub', 'qsh', 'qrsh', 'qlogin', 'qalter']:
self.parser.add_argument('-A',
metavar='account_string',
help="""\
Available for qsub, qsh, qrsh, qlogin and qalter only.
Identifies the account to which the resource consumption of the
job should be charged. The account_string should
conform to the name definition in M sge_types 1 .
In the absence of this parameter Grid Engine will place the
default account string "ge" in the accounting record of the job.
Qalter allows changing this option even while the job executes.
If this option or a corresponding value in qmon is specified
then this value will be passed to defined JSV instances as parameter with the name A.
(see -jsv option below or find more information concerning JSV in jsv(1))"""
)
self.parser.add_argument('-binding',
nargs='+',
metavar=('binding_instance',
'binding_strategy'),
help="""\
-binding [ binding_instance ] binding_strategy
A job can request a specific processor core binding (processor affinity)
with this parameter. This request is neither a hard nor a soft request,
it is a hint for the execution host to do this if possible. Please note that
the requested binding strategy is not used for resource selection within
Grid Engine. As a result an execution host might be selected where Grid Engine
does not even know the hardware topology and therefore is not able
to apply the requested binding.
To enforce Grid Engine to select hardware on which the binding can be applied
please use the -l switch in combination with the complex attribute m_topology.
binding_instance is an optional parameter.
It might either be env, pe or set depending on which instance should
accomplish the job to core binding. If the value for binding_instance
is not specified then set will be used.
env means that the environment variable SGE_BINDING will be exported
to the job environment of the job. This variable contains the selected
operating system internal processor numbers. They might be more than selected
cores in presence of SMT or CMT because each core could be represented
by multiple processor identifiers. The processor numbers are space separated.
pe means that the information about the selected cores appears in
the fourth column of the pe_hostfile. Here the logical core and
socket numbers are printed (they start at 0 and have no holes)
in colon separated pairs (i.e. 0,0:1,0 which means core 0 on socket 0 and
core 0 on socket 1). For more information about the $pe_hostfile
check ge_pe(5)
set (default if nothing else is specified). The binding strategy is applied
by Grid Engine. How this is achieved depends on the underlying hardware
architecture of the execution host where the submitted job will be started.
On Solaris 10 hosts a processor set will be created where the job can
exclusively run in. Because of operating system limitations at least
one core must remain unbound. This resource could of course used by an unbound job.
On Linux hosts a processor affinity mask will be set to restrict the job
to run exclusively on the selected cores.
The operating system allows other unbound processes to use these cores.
Please note that on Linux the binding requires a Linux kernel
version of 2.6.16 or greater. It might be even possible to use a kernel with
lower version number but in that case additional kernel patches have to be
applied. The loadcheck tool in the utilbin directory can be used to check
if the hosts capabilities. You can also use the -sep in combination with
-cb of qconf(5) command to identify if Grid Engine is able to recognize the
hardware topology.
Possible values for binding_strategy are as follows:
linear:<amount>[:<socket>,<core>]
striding:<amount>:<n>[:<socket>,<core>]
explicit:[<socket>,<core>;...]<socket>,<core>
For the binding strategy linear and striding there is an optional
socket and core pair attached.
These denotes the mandatory starting point for the first core to bind on.
linear means that Grid Engine tries to bind the job on amount successive cores.
If socket and core is omitted then Grid Engine first allocates successive cores
on the first empty socket found. Empty means that there are
no jobs bound to the socket by Grid Engine. If this is not possible or is
not sufficient Grid Engine tries to
find (further) cores on the socket with the most unbound cores and so on.
If the amount of allocated cores is
lower than requested cores, no binding is done for the job.
If socket and core is specified then Grid Engine
tries to find amount of empty cores beginning with this starting point.
If this is not possible then binding is not done.
striding means that Grid Engine tries to find cores with a certain offset.
It will select amount of empty cores with a offset of n -1 cores in between.
Start point for the search algorithm is socket 0 core 0. As soon as
amount cores are found they will be used to do the job binding.
If there are not enough empty cores or if correct offset cannot be
achieved then there will be no binding done.
explicit binds the specified sockets and cores that are mentioned
in the provided socket/core list. Each socket/core pair has to
be specified only once.If a socket/core pair is already in use by a different job the
whole binding request will be ignored.
Qalter allows changing this option even while the job executes.
The modified parameter will only be in effect
after a restart or migration of the job, however.
If this option or a corresponding value in qmon is specified then these values
will be passe to defined JSV instances as parameters with the names binding_strategy,
binding_type, binding_amount, binding_step, binding_socket,
binding_core, binding_exp_n, binding_exp_socket<id>, binding_exp_core<id>.
Please note that the length of the socket/core value list of the explicit binding is
reported as binding_exp_n.
<id> will be replaced by the position of the socket/core pair within the explicit
list (0 <= id < binding_exp_n). The first socket/core pair of the explicit
binding will be reported with the parameter names bind-
ing_exp_socket0 and binding_exp_core0.
Values that do not apply for the specified binding will not be reported to JSV.
E.g. binding_step will only be
reported for the striding binding and all binding_exp_* values will passed to
JSV if explicit binding was speci‐
fied. (see -jsv option below or find more information concerning
JSV in jsv(1))""")
if prog in ['qsub', 'qrsh']:
self.parser.add_argument('-b',
choices=yesno,
help="""\
Available for qsub, qrsh only. Qalter does not allow changing this option.
This option cannot be embedded in the script file itself.
Gives the user the possibility to indicate explicitly whether command should be
treated as binary or script. If the value of -b is 'y', then command may be a
binary or script. The command might not be accessible from the
submission host. Nothing except the path of the command will be
transferred from the submission host to the
execution host. Path aliasing will be applied to the path of command
before command will be executed.
If the value of -b is 'n' then command needs to be a script and it will
be handled as script. The script file
has to be accessible by the submission host.
It will be transferred to the execution host. qsub/qrsh will search
directive prefixes within script.
qsub will implicitly use -b n whereas qrsh will apply the -b y option
if nothing else is specified.
The value specified with this option or the corresponding value
specified in qmon will only be passed to defined
JSV instances if the value is yes. The name of the parameter will be b.
The value will be y also when then long
form yes was specified during submission.
(see -jsv option below or find more information concerning JSV in
jsv(1))
Please note that submission of command as script (-b n) can have a
significant performance impact,especially for short running jobs and big job scripts.
Script submission adds a number of operations to the submission
process: The job script needs to be
- parsed at client side (for special comments)
- transferred from submit client to qmaster
- spooled in qmaster
- transferred to execd at job execution
- spooled in execd
- removed from spooling both in execd and qmaster once the job is done
If job scripts are available on the execution nodes, e.g. via NFS, binary
submission can be the better choice.""")
if prog in ['qsub', 'qalter']:
self.parser.add_argument('-c',
metavar='occasion_specifier',
help="""\
Available for qsub and qalter only.
Defines or redefines whether the job should be checkpointed, and if so,
under what circumstances. The specifica‐
tion of the checkpointing occasions with this option overwrites the
definitions of the when parameter in the
checkpointing environment (see checkpoint(5)) referenced by the qsub
-ckpt switch. Possible values for occa‐
sion_specifier are
n no checkpoint is performed.
s checkpoint when batch server is shut down.
m checkpoint at minimum CPU interval.
x checkpoint when job gets suspended.
<interval> checkpoint in the specified time interval.
The minimum CPU interval is defined in the queue configuration (see
queue_conf(5) for details). <interval> has
to be specified in the format hh:mm:ss.
The maximum of <interval> and the queue's minimum CPU interval is used
if <interval> is specified. This is done to ensure that a machine is not
overloaded by checkpoints being generated too frequently.
The value specified with this option or the corresponding value specified
in qmon will be passed to defined JSV
instances. The <interval> will be available as parameter with the name c_interval.
The character sequence
specified will be available as parameter with the name c_occasion.
Please note that if you change c_occasion via
JSV then the last setting of c_interval will be overwritten and vice versa.
(see -jsv option below or find more
information concerning JSV in jsv(1))""")
if prog in ['qsub', 'qalter']:
self.parser.add_argument('-ckpt',
metavar='ckpt_name',
help="""\
Available for qsub and qalter only.
Selects the checkpointing environment (see checkpoint(5)) to be used
for checkpointing the job. Also declares the job to be a checkpointing job.
If this option or a corresponding value in qmon is specified then this
value will be passed to defined JSV
instances as parameter with the name ckpt. (see -jsv option below or
find more information concerning JSV in jsv(1))""")
if prog in ['qsub', 'qsh', 'qrsh', 'qlogin']:
self.parser.add_argument('-clear',
action='store_true',
help="""\
Available for qsub, qsh, qrsh, and qlogin only.
Causes all elements of the job to be reset to the initial default
status prior to applying any modifications (if
any) appearing in this specific command.""")
if prog in ['qsub', 'qsh', 'qrsh', 'qalter']:
self.parser.add_argument('-cwd',
action='store_true',
help="""\
Available for qsub, qsh, qrsh and qalter only.
Execute the job from the current working directory.
This switch will activate Grid Engine's path aliasing
facility, if the corresponding configuration files are present (see ge_aliases(5)).
In the case of qalter, the previous definition of the current working
directory will be overwritten if qalter is
executed from a different directory than the preceding qsub or qalter.
Qalter allows changing this option even while the job executes.
The modified parameter will only be in effect
after a restart or migration of the job, however.
If this option or a corresponding value in qmon is specified
then this value will be passed to defined JSV
instances as parameter with the name cwd. The value of this parameter
will be the absolute path to the current
working directory. JSV scripts can remove the path from jobs during the
verification process by setting the
value of this parameter to an empty string.
As a result the job behaves as if -cwd was not specified during job
submission. (see -jsv option below or find more information
concerning JSV in jsv(1))""")
if prog in ['qsub', 'qrsh']:
self.parser.add_argument('-C',
metavar='prefix_string',
help="""\
Available for qsub and qrsh with script submission (-b n).
Prefix_string defines the prefix that declares a directive in the job's command.
The prefix is not a job
attribute, but affects the behavior of qsub and qrsh.
If prefix is a null string, the command will not be
scanned for embedded directives.
The directive prefix consists of two ASCII characters which,
when appearing in the first two bytes of a script
line, indicate that what follows is an Grid Engine command. The default is "#$".
The user should be aware that changing the first delimiting character
can produce unforeseen side effects. If
the script file contains anything other than a "#" character in the first byte
position of the line, the shell
processor for the job will reject the line and may exit the job prematurely.
If the -C option is present in the script file, it is ignored."""
)
if prog in ['qsub', 'qsh', 'qrsh', 'qlogin', 'qalter']:
self.parser.add_argument('-dc',
action='append',
metavar='variable',
help="""\
-dc variable,...
Available for qsub, qsh, qrsh, qlogin and qalter only.
Removes the given variable(s) from the job's context. Multiple -ac, -dc, and
-sc options may be given. The order is important.
Qalter allows changing this option even while the job executes.
The outcome of the evaluation of all -ac, -dc, and -sc options or corresponding
values in qmon is passed to
defined JSV instances as parameter with the name ac. (see -jsv option below or
find more information concerning
JSV in jsv(1))""")
if prog in ['qsh', 'qrsh']:
self.parser.add_argument('-display',
metavar='display_specifier',
help="""\
Available for qsh and qrsh.
Directs xterm(1) to use display_specifier in order to contact the X server.
The display_specifier has to con‐
tain the hostname part of the display name (e.g. myhost:1).
Local display names (e.g. :0) cannot be used in
grid environments. Values set with the -display option overwrite settings
from the submission environment and
from -v command line options.
If this option or a corresponding value in qmon is specified then this
value will be passed to defined JSV
instances as parameter with the name display. This value will also be available
in the job environment which
might optionally be passed to JSV scripts. The variable name will be DISPLAY.
(see -jsv option below or find
more information concerning JSV in jsv(1))""")
if prog in ['qsub', 'qsh', 'qrsh', 'qlogin', 'qalter']:
self.parser.add_argument('-dl',
metavar='date_time',
help="""\
Available for qsub, qsh, qrsh, qlogin and qalter only.
Specifies the deadline initiation time in [[CC]YY]MMDDhhmm[.SS] format (see -a
option above). The deadline ini‐
tiation time is the time at which a deadline job has to reach top priority to be
able to complete within a given
deadline. Before the deadline initiation time the priority of a deadline job
will be raised steadily until it
reaches the maximum as configured by the Grid Engine administrator.
This option is applicable only for users allowed to submit deadline jobs.
If this option or a corresponding value in qmon is specified then this
value will be passed to defined JSV
instances as parameter with the name dl. The format for the date_time value
is CCYYMMDDhhmm.SS (see -jsv option
below or find more information concerning JSV in jsv(1))""")
if prog in ['qsub', 'qsh', 'qrsh', 'qlogin', 'qalter']:
self.parser.add_argument('-e',
metavar='path',
help="""\
-e [[hostname]:]path,...
Available for qsub, qsh, qrsh, qlogin and qalter only.
Defines or redefines the path used for the standard error stream of the job.
For qsh, qrsh and qlogin only the
standard error stream of prolog and epilog is redirected. If the path
constitutes an absolute path name, the
error-path attribute of the job is set to path, including the hostname.
If the path name is relative, Grid
Engine expands path either with the current working directory path
(if the -cwd switch (see above) is also spec‐
ified) or with the home directory path. If hostname is present,
the standard error stream will be placed in the
corresponding location only if the job runs on the specified host.
If the path contains a ":" without a host‐
name, a leading ":" has to be specified.
By default the file name for interactive jobs is /dev/null.
For batch jobs the default file name has the form
job_name.ejob_id and job_name.ejob_id.task_id for array job tasks (see -t option
below).
If path is a directory, the standard error stream of the job will be put
in this directory under the default
file name. If the pathname contains certain pseudo environment variables,
their value will be expanded at run‐
time of the job and will be used to constitute the standard error stream path name.
The following pseudo envi‐
ronment variables are supported currently:
$HOME home directory on execution machine
$USER user ID of job owner
$JOB_ID current job ID
$JOB_NAME current job name (see -N option)
$HOSTNAME name of the execution host
$TASK_ID array job task index number
Alternatively to $HOME the tilde sign "~" can be used as common in csh(1)
or ksh(1). Note, that the "~" sign
also works in combination with user names, so that "~<user>" expands to the
home directory of <user>. Using
another user ID than that of the job owner requires corresponding permissions,
of course.
Qalter allows changing this option even while the job executes.
The modified parameter will only be in effect
after a restart or migration of the job, however.
If this option or a corresponding value in qmon is specified then this value
will be passed to defined JSV
instances as parameter with the name e. (see -jsv option below or
find more information concerning JSV in
jsv(1))""")
if prog in ['qsub', 'qsh', 'qrsh', 'qlogin', 'qalter']:
self.parser.add_argument('-hard',
action='store_true',
help="""\
Available for qsub, qsh, qrsh, qlogin and qalter only.
Signifies that all -q and -l resource requirements following in the command
line will be hard requirements and
must be satisfied in full before a job can be scheduled.
As Grid Engine scans the command line and script file for Grid Engine options
and parameters it builds a list of
resources required by a job. All such resource requests are considered as
absolutely essential for the job to
commence. If the -soft option (see below) is encountered during the scan then
all following resources are desig‐
nated as "soft requirements" for execution, or "nice-to-have, but not essential".
If the -hard flag is encoun‐
tered at a later stage of the scan, all resource requests following it once again
become "essential". The -hard
and -soft options in effect act as "toggles" during the scan.
If this option or a corresponding value in qmon is specified then the corresponding
-q and -l resource require‐
ments will be passed to defined JSV instances as parameter with the names
q_hard and l_hard. Find for informa‐
tion in the sections describing -q and -l. (see -jsv option below or find
more information concerning JSV in
jsv(1))""")
if prog in ['qsub', 'qrsh', 'qalter', 'qresub']:
# NOTE in SGE this is -h, here I have renamed it to -hold
# TODO check if multiple holds are parsed correctly
self.parser.add_argument('-hold',
choices='usonUOS',
help="""\
NOTE: Originally defined as -h, but changed to -hold here.
Available for qsub (only -h), qrsh, qalter and qresub (hold state is
removed when not set explicitly).
List of holds to place on a job, a task or some tasks of a job.
`u' denotes a user hold.
`s' denotes a system hold.
`o' denotes a operator hold.
`n' denotes no hold (requires manager privileges).
As long as any hold other than `n' is assigned to the job the job is
not eligible for execution. Holds can be
released via qalter and qrls(1). In case of qalter this is supported
by the following additional option speci‐
fiers for the -h switch:
`U' removes a user hold.
`S' removes a system hold.
`O' removes a operator hold.
Grid Engine managers can assign and remove all hold types,
Grid Engine operators can assign and remove user and
operator holds, and users can only assign or remove user holds.
In the case of qsub only user holds can be placed on a job and thus
only the first form of the option with the
-h switch alone is allowed. As opposed to this, qalter requires
the second form described above.
An alternate means to assign hold is provided by the qhold(1) facility.
If the job is a array job (see the -t option below), all tasks specified via
-t are affected by the -h operation
simultaneously.
Qalter allows changing this option even while the job executes.
The modified parameter will only be in effect
after a restart or migration of the job, however.
If this option is specified with qsub or during the submission
of a job in qmon then the parameter h with the
value u will be passed to the defined JSV instances indicating that
the job will be in user hold after the sub‐
mission finishes. (see -jsv option below or find more information
concerning JSV in jsv(1))""")
if prog in ['qsub', 'qrsh', 'qalter']:
self.parser.add_argument('-hold_jid',
nargs='+',
metavar='wc_job_list',
help="""\
Available for qsub, qrsh, and qalter only. See sge_types(1).
for wc_job_list definition.
Defines or redefines the job dependency list of the submitted job.
A reference by job name or pattern is only
accepted if the referenced job is owned by the same user as the referring job.
The submitted job is not eligible
for execution unless all jobs referenced in the comma-separated job id and/or
job name list have completed. If
any of the referenced jobs exits with exit code 100, the submitted
job will remain ineligible for execution.
With the help of job names or regular pattern one can specify a job
dependency on multiple jobs satisfying the
regular pattern or on all jobs with the requested name.
The name dependencies are resolved at submit time and
can only be changed via qalter. New jobs or name changes
of other jobs will not be taken into account.
Qalter allows changing this option even while the job executes.
The modified parameter will only be in effect
after a restart or migration of the job, however.
If this option or a corresponding value in qmon is specified
then this value will be passed to defined JSV
instances as parameter with the name hold_jid.
(see -jsv option below or find more information concerning JSV
in jsv(1))""")
if prog in ['qsub', 'qrsh', 'qalter']:
self.parser.add_argument('-hold_jid_ad',
nargs='+',
metavar='wc_job_list',
help="""\
Available for qsub, qrsh, and qalter only. See sge_types(1).
for wc_job_list definition.
Defines or redefines the job array dependency list of
the submitted job. A reference by job name or pattern is
only accepted if the referenced job is owned by the same
user as the referring job. Each sub-task of the submit‐
ted job is not eligible for execution unless the corresponding
sub-tasks of all jobs referenced in the comma-
separated job id and/or job name list have completed.
If any array task of the referenced jobs exits with exit
code 100, the dependent tasks of the submitted job will remain
ineligible for execution.
With the help of job names or regular pattern one can specify
a job dependency on multiple jobs satisfying the
regular pattern or on all jobs with the requested name.
The name dependencies are resolved at submit time and
can only be changed via qalter. New jobs or name changes of other
jobs will not be taken into account.
If either the submitted job or any job in wc_job_list are
not array jobs with the same range of sub-tasks (see
-t option below), the request list will be rejected and the
job create or modify operation will error.
qalter allows changing this option even while the job executes.
The modified parameter will only be in effect
after a restart or migration of the job, however.
If this option or a corresponding value in qmon is
specified then this value will be passed to defined JSV
instances as parameter with the name hold_jid_ad.
(see -jsv option below or find more information concerning
JSV in jsv(1))""")
if prog in ['qsub', 'qalter']:
self.parser.add_argument('-i',
metavar='file',
help="""\
-i [[hostname]:]file,...
Available for qsub, and qalter only.
Defines or redefines the file used for the standard input stream of
the job. If the file constitutes an absolute
filename, the input-path attribute of the job is set to path,
including the hostname. If the path name is rela‐
tive, Grid Engine expands path either with the current working
directory path (if the -cwd switch (see above) is
also specified) or with the home directory path. If hostname is present,
the standard input stream will be
placed in the corresponding location only if the job runs
on the specified host. If the path contains a ":"
without a hostname, a leading ":" has to be specified.
By default /dev/null is the input stream for the job.
It is possible to use certain pseudo variables, whose values
will be expanded at runtime of the job and will be
used to express the standard input stream as described in
the -e option for the standard error stream.
Qalter allows changing this option even while the job executes.
The modified parameter will only be in effect
after a restart or migration of the job, however.
If this option or a corresponding value in qmon is specified then
this value will be passed to defined JSV
instances as parameter with the name i.
(see -jsv option below or find more information concerning JSV in
jsv(1))""")
if prog in ['qrsh', 'qmake']:
self.parser.add_argument('-inherit',
action='store_true',
help="""\
Available only for qrsh and qmake(1).
qrsh allows the user to start a task in an already scheduled parallel job.
The option -inherit tells qrsh to
read a job id from the environment variable JOB_ID and start the
specified command as a task in this job. Please
note that in this case, the hostname of the host where the command
will be executed must precede the command to
execute; the syntax changes to
qrsh -inherit [ other options ] hostname command [ command_args ]
Note also, that in combination with -inherit, most other command line
options will be ignored. Only the options
-verbose, -v and -V will be interpreted. As a replacement to option
-cwd please use -v PWD.
Usually a task should have the same environment (including the
current working directory) as the corresponding
job, so specifying the option -V should be suitable for most applications.
Note: If in your system the qmaster tcp port is not configured as a service,
but rather via the environment
variable GE_QMASTER_PORT, make sure that this variable is set in the
environment when calling qrsh or qmake with
the -inherit option. If you call qrsh or qmake with the
-inherit option from within a job script, export
GE_QMASTER_PORT with the option "-v GE_QMASTER_PORT" either as
a command argument or an embedded directive.
This parameter is not available in the JSV context.
(see -jsv option below or find more information concerning
JSV in jsv(1))""")
if prog in ['qsub', 'qsh', 'qrsh', 'qlogin', 'qalter']:
self.parser.add_argument('-j',
choices=yesno,
help="""\
Available for qsub, qsh, qrsh, qlogin and qalter only.
Specifies whether or not the standard error stream of the job
is merged into the standard output stream.
If both the -j y and the -e options are present,
Grid Engine sets but ignores the error-path attribute.
Qalter allows changing this option even while the job executes.
The modified parameter will only be in effect
after a restart or migration of the job, however.
The value specified with this option or the corresponding
value specified in qmon will only be passed to defined
JSV instances if the value is yes. The name of the parameter will be j.
The value will be y also when then long
form yes was specified during submission.
(see -jsv option below or find more information concerning JSV in
jsv(1))""")
if prog in ['qsub', 'qsh', 'qrsh', 'qlogin', 'qalter']:
self.parser.add_argument('-js',
nargs='?',
type=int,
metavar='job_share',
help="""\
Available for qsub, qsh, qrsh, qlogin and qalter only.
Defines or redefines the job share of the job relative to other jobs.
Job share is an unsigned integer value.
The default job share value for jobs is 0.
The job share influences the Share Tree Policy and the Functional Policy.
It has no effect on the Urgency and
Override Policies (see share_tree(5), sched_conf(5) and the
Grid Engine Installation and Administration Guide
for further information on the resource management policies supported
by Grid Engine).
In case of the Share Tree Policy, users can distribute the tickets to
which they are currently entitled among
their jobs using different shares assigned via -js.
If all jobs have the same job share value, the tickets are
distributed evenly. Otherwise, jobs receive tickets relative
to the different job shares. Job shares are treated
like an additional level in the share tree in the latter case.
In connection with the Functional Policy, the job share can be
used to weight jobs within the functional job
category. Tickets are distributed relative to any uneven
job share distribution treated as a virtual share dis‐
tribution level underneath the functional job category.
If both the Share Tree and the Functional Policy are active,
the job shares will have an effect in both poli‐
cies, and the tickets independently derived in each of them are
added to the total number of tickets for each
job.
If this option or a corresponding value in qmon is specified
then this value will be passed to defined JSV
instances as parameter with the name js. (see -jsv option below or
find more information concerning JSV in
jsv(1))""")
if prog in ['qsub', 'qsh', 'qrsh', 'qlogin']:
self.parser.add_argument('-jsv',
metavar='jsv_url',
help="""\
Available for qsub, qsh, qrsh and qlogin only.
Defines a client JSV instance which will be executed to
verify the job specification before the job is sent to
qmaster.
In contrast to other options this switch will not be overwritten
if it is also used in sge_request files.
Instead all specified JSV instances will be executed to verify
the job to be submitted.
The JSV instance which is directly passed with the commandline
of a client is executed as first to verify the
job specification. After that the JSV instance which might have
been defined in various sge_request files will
be triggered to check the job. Find more details
in man page jsv(1) and sge_request(5).
The syntax of the jsv_url is specified in sge_types(1).()""")
if prog in ['qsub', 'qsh', 'qrsh', 'qlogin', 'qalter']:
self.parser.add_argument('-l',
metavar='keywords',
help="""\
-l resource=value,...
Available for qsub, qsh, qrsh, qlogin and qalter only.
Launch the job in a Grid Engine queue meeting the given resource
request list. In case of qalter the previous
definition is replaced by the specified one.
complex(5) describes how a list of available resources and their
associated valid value specifiers can be
obtained.
There may be multiple -l switches in a single command.
You may request multiple -l options to be soft or hard
both in the same command line. In case of a serial job multiple
-l switches refine the definition for the sought
queue.
Qalter allows changing the value of this option even while the
job is running, but only if the initial list of
resources does not contain a resource that is marked as consumable.
However the modification will only be effec‐
tive after a restart or migration of the job.
If this option or a corresponding value in qmon is specified the
these hard and soft resource requirements will
be passed to defined JSV instances as parameter with the names
l_hard and l_soft. If regular expressions will be
used for resource requests, then these expressions will
be passed as they are. Also shortcut names will not be
expanded. (see -jsv option above or find more information
concerning JSV in jsv(1))""")
if prog in ['qsub', 'qsh', 'qrsh', 'qlogin', 'qalter']:
# TODO check if multiple arguments are parsed correctly
self.parser.add_argument('-m',
nargs='+',
choices='beasn',
help="""\
Available for qsub, qsh, qrsh, qlogin and qalter only.
Defines or redefines under which circumstances mail
is to be sent to the job owner or to the users defined with
the -M option described below. The option arguments
have the following meaning:
`b' Mail is sent at the beginning of the job.
`e' Mail is sent at the end of the job.
`a' Mail is sent when the job is aborted or
rescheduled.
`s' Mail is sent when the job is suspended.
`n' No mail is sent.
Currently no mail is sent when a job is suspended.
Qalter allows changing the b, e, and a option arguments
even while the job executes. The modification of the b
option argument will only be in effect after a restart
or migration of the job, however.
If this option or a corresponding value in qmon is
specified then this value will be passed to defined JSV
instances as parameter with the name m. (see -jsv option
above or find more information concerning JSV in""")
if prog in ['qsub', 'qsh', 'qrsh', 'qlogin', 'qalter']:
self.parser.add_argument('-M',
metavar='user[@host]',
help="""\
-M user[@host],...
Available for qsub, qsh, qrsh, qlogin and qalter only.
Defines or redefines the list of users to which the server
that executes the job has to send mail, if the server
sends mail about the job. Default is the job owner at the originating host.
Qalter allows changing this option even while the job executes.
If this option or a corresponding value in qmon is specified then
this value will be passed to defined JSV
instances as parameter with the name M. (see -jsv option above or
find more information concerning JSV in
jsv(1))""")
if prog in ['qsub', 'qsh', 'qrsh', 'qlogin', 'qalter']:
self.parser.add_argument('-masterq',
nargs='+',
metavar='wc_queue_list',
help="""\
Available for qsub, qrsh, qsh, qlogin and qalter. Only meaningful
for parallel jobs, i.e. together with the -pe option.
Defines or redefines a list of cluster queues, queue domains and
queue instances which may be used to become the
so called master queue of this parallel job. A more detailed
description of wc_queue_list can be found in
sge_types(1). The master queue is defined as the queue where
the parallel job is started. The other queues to
which the parallel job spawns tasks are called slave queues.
A parallel job only has one master queue.
This parameter has all the properties of a resource request
and will be merged with requirements derived from
the -l option described above.
Qalter allows changing this option even while the job executes.
The modified parameter will only be in effect
after a restart or migration of the job, however.
If this option or a corresponding value in qmon is specified
the this hard resource requirement will be passed
to defined JSV instances as parameter with the name masterq.
(see -jsv option above or find more information
concerning JSV in jsv(1))""")
if prog in ['qsub', 'qrsh', 'qalter']:
self.parser.add_argument('-notify',
action='store_true',
help="""\
Available for qsub, qrsh (with command) and qalter only.
This flag, when set causes Grid Engine to send "warning" signals
to a running job prior to sending the signals
themselves. If a SIGSTOP is pending, the job will receive
a SIGUSR1 several seconds before the SIGSTOP. If a
SIGKILL is pending, the job will receive a SIGUSR2 several
seconds before the SIGKILL. This option provides the
running job, before receiving the SIGSTOP or SIGKILL,
a configured time interval to do e.g. cleanup operations.
The amount of time delay is controlled by the notify parameter
in each queue configuration (see queue_conf(5)).
Note that the Linux operating system "misused" the user
signals SIGUSR1 and SIGUSR2 in some early Posix thread
implementations. You might not want to use the
-notify option if you are running multi-threaded applications in
your jobs under Linux, particularly on 2.0 or earlier kernels.
Qalter allows changing this option even while the job executes.
Only if this option is used the parameter named notify with
the value y will be passed to defined JSV instances.
(see -jsv option above or find more information concerning
JSV in jsv(1))""")
if prog in ['qsub', 'qsh', 'qrsh', 'qlogin']:
self.parser.add_argument('-now',
choices=yesno,
help="""\
Available for qsub, qsh, qlogin and qrsh.
-now y tries to start the job immediately or not at all.
The command returns 0 on success, or 1 on failure (also
if the job could not be scheduled immediately).
For array jobs submitted with the -now option, if all tasks
cannot be immediately scheduled, no tasks are scheduled.
-now y is default for qsh, qlogin and qrsh
With the -now n option, the job will be put into the pending
queue if it cannot be executed immediately. -now n
is default for qsub.
The value specified with this option or the corresponding
value specified in qmon will only be passed to defined
JSV instances if the value is yes. The name of the
parameter will be now. The value will be y also when then
long form yes was specified during submission.
(see -jsv option above or find more information concerning JSV
in jsv(1))""")
if prog in ['qsub', 'qsh', 'qrsh', 'qlogin', 'qalter']:
self.parser.add_argument('-N',
metavar='name',
help="""\
Available for qsub, qsh, qrsh, qlogin and qalter only.
The name of the job. The name should follow the "name"
definition in sge_types(1). Invalid job names will be
denied at submit time.
If the -N option is not present, Grid Engine assigns
the name of the job script to the job after any directory
pathname has been removed from the script-name.
If the script is read from standard input, the job name defaults
to STDIN.
In the case of qsh or qlogin with the -N option is absent,
the string `INTERACT' is assigned to the job.
In the case of qrsh if the -N option is absent, the resulting
job name is determined from the qrsh command line
by using the argument string up to the first
occurrence of a semicolon or whitespace and removing the directory
pathname.
Qalter allows changing this option even while the job executes.
The value specified with this option or the corresponding value
specified in qmon will be passed to defined JSV
instances as parameter with the name N. (see -jsv
option above or find more information concerning JSV in
jsv(1))""")
if prog in ['qrsh']:
self.parser.add_argument('-noshell',
action='store_true',
help="""\
Available only for qrsh with a command line.
Do not start the command line given to qrsh in a user's login shell,
i.e. execute it without the wrapping
shell.
This option can be used to speed up execution as some overhead,
like the shell startup and sourcing the shell
resource files, is avoided.
This option can only be used if no shell-specific command line
parsing is required. If the command line contains
shell syntax like environment variable substitution or (back) quoting,
a shell must be started. In this case,
either do not use the -noshell option or include the shell call in the command line.
Example:
qrsh echo '$HOSTNAME'
Alternative call with the -noshell option
qrsh -noshell /bin/tcsh -f -c 'echo $HOSTNAME'""")
if prog in ['qrsh']:
self.parser.add_argument('-nostdin',
action='store_true',
help="""\
Available only for qrsh.
Suppress the input stream STDIN - qrsh will pass the option -n
to the rsh(1) command. This is especially useful,
if multiple tasks are executed in parallel using qrsh, e.g.
in a make(1) process - it would be undefined, which
process would get the input.""")
if prog in ['qsub', 'qsh', 'qrsh', 'qlogin', 'qalter']:
self.parser.add_argument('-o',
metavar='path',
help="""\
-o [[hostname]:]path,...
Available for qsub, qsh, qrsh, qlogin and qalter only.
The path used for the standard output stream of the job.
The path is handled as described in the -e option for
the standard error stream.
By default the file name for standard output has the
form job_name.ojob_id and job_name.ojob_id.task_id for
array job tasks (see -t option below).
Qalter allows changing this option even while the job executes.
The modified parameter will only be in effect
after a restart or migration of the job, however.
If this option or a corresponding value in qmon is
specified then this value will be passed to defined JSV
instances as parameter with the name o. (see -jsv option
above or find more information concerning JSV in
jsv(1))""")
if prog in ['qalter']:
self.parser.add_argument('-ot',
metavar='override_tickets',
help="""\
Available for qalter only.
Changes the number of override tickets for the specified job.
Requires manager/operator privileges.""")
if prog in ['qsub', 'qsh', 'qrsh', 'qlogin', 'qalter']:
self.parser.add_argument('-P',
metavar='project_name',
help="""\
Available for qsub, qsh, qrsh, qlogin and qalter only.
Specifies the project to which this job is assigned.
The administrator needs to give permission to individual
users to submit jobs to a specific project. (see -aprj option to qconf(1)).
If this option or a corresponding value in qmon is specified then
this value will be passed to defined JSV
instances as parameter with the name ot. (see -jsv option
above or find more information concerning JSV in
jsv(1))""")
if prog in ['qsub', 'qsh', 'qrsh', 'qlogin', 'qalter']:
self.parser.add_argument('-p',
metavar='priority',
help="""\
Available for qsub, qsh, qrsh, qlogin and qalter only.
Defines or redefines the priority of the job relative to other jobs.
Priority is an integer in the range -1023
to 1024. The default priority value for jobs is 0.
Users may only decrease the priority of their jobs.
Grid Engine managers and administrators may also increase
the priority associated with jobs. If a pending job has higher priority,
it is earlier eligible for being dis‐
patched by the Grid Engine scheduler.
If this option or a corresponding value in qmon is specified and
the priority is not 0 then this value will be
passed to defined JSV instances as parameter with the name p.
(see -jsv option above or find more information
concerning JSV in jsv(1))""")
if prog in ['qsub', 'qsh', 'qrsh', 'qlogin', 'qalter']:
self.parser.add_argument('-slot',
metavar='slot',
help="""\
Available for qsub, qsh, qrsh, qlogin and qalter only.
Defines or redefines the priority of the job relative to other jobs.
Priority is an integer in the range -1023
to 1024. The default priority value for jobs is 0.
Users may only decrease the priority of their jobs.
Grid Engine managers and administrators may also increase
the priority associated with jobs. If a pending job has higher priority,
it is earlier eligible for being dis‐
patched by the Grid Engine scheduler.
If this option or a corresponding value in qmon is specified and
the priority is not 0 then this value will be
passed to defined JSV instances as parameter with the name p.
(see -jsv option above or find more information
concerning JSV in jsv(1))""")
if prog in ['qsub', 'qsh', 'qrsh', 'qlogin', 'qalter']:
self.parser.add_argument('-pe',
nargs=2,
metavar=('parallel_environment', 'n'),
help="""\
-pe parallel_environment n[-[m]]|[-]m,...
Available for qsub, qsh, qrsh, qlogin and qalter only.
Parallel programming environment (PE) to instantiate.
For more detail about PEs, please see the sge_types(1).
Qalter allows changing this option even while the job executes.
The modified parameter will only be in effect
after a restart or migration of the job, however.
If this option or a corresponding value in qmon is specified
then the parameters pe_name, pe_min and pe_max will
be passed to configured JSV instances where pe_name will be the
name of the parallel environment and the values
pe_min and pe_max represent the values n and m which have been
provided with the -pe option. A missing specifi‐
cation of m will be expanded as value 9999999 in JSV scripts
and it represents the value infinity. (see -jsv
option above or find more information concerning JSV in jsv(1))"""
)
if prog in ['qrsh', 'qlogin']:
self.parser.add_argument('-pty',
choices=yesno,
help="""\
Available for qrsh and qlogin only.
-pty yes enforces the job to be started in a pseudo terminal (pty).
If no pty is available, the job start fails.
-pty no enforces the job to be started without a pty.
By default, qrsh without a command and qlogin start the
job in a pty, qrsh with a command starts the job without a pty.
This parameter is not available in the JSV context.
(see -jsv option above or find more information concerning
JSV in jsv(1))""")
if prog in ['qsub', 'qrsh', 'qsh', 'qlogin', 'qalter']:
self.parser.add_argument('-q',
nargs='+',
metavar='wc_queue_list',
help="""\
Available for qsub, qrsh, qsh, qlogin and qalter.
Defines or redefines a list of cluster queues,
queue domains or queue instances which may be used to execute
this job. Please find a description of wc_queue_list in sge_types(1).
This parameter has all the properties of
a resource request and will be merged with requirements derived from the
-l option described above.
Qalter allows changing this option even while the job executes.
The modified parameter will only be in effect
after a restart or migration of the job, however.
If this option or a corresponding value in qmon is specified
the these hard and soft resource requirements will
be passed to defined JSV instances as parameters with the
names q_hard and q_soft. If regular expressions will
be used for resource requests, then these expressions will
be passed as they are. Also shortcut names will not
be expanded. (see -jsv option above or find more information
concerning JSV in jsv(1))""")
if prog in ['qsub', 'qrsh', 'qsh', 'qlogin', 'qalter']:
self.parser.add_argument('-R',
choices=yesno,
help="""\
Available for qsub, qrsh, qsh, qlogin and qalter.
Indicates whether a reservation for this job should be done.
Reservation is never done for immediate jobs, i.e.
jobs submitted using the -now yes option. Please note that
regardless of the reservation request, job reserva‐
tion might be disabled using max_reservation in sched_conf(5)
and might be limited only to a certain number of
high priority jobs.
By default jobs are submitted with the -R n option.
The value specified with this option or the corresponding value
specified in qmon will only be passed to defined
JSV instances if the value is yes. The name of the parameter will be R.
The value will be y also when then long
form yes was specified during submission.
(see -jsv option above or find more information concerning JSV in
jsv(1))""")
if prog in ['qsub', 'qalter']:
self.parser.add_argument('-r',
choices=yesno,
help="""\
Available for qsub and qalter only.
Identifies the ability of a job to be rerun or not.
If the value of -r is 'yes', the job will be rerun if the
job was aborted without leaving a consistent exit state.
(This is typically the case if the node on which the
job is running crashes). If -r is 'no',
the job will not be rerun under any circumstances.
Interactive jobs submitted with qsh, qrsh or qlogin are not rerunnable.
Qalter allows changing this option even while the job executes.
The value specified with this option or the corresponding value specified
in qmon will only be passed to defined
JSV instances if the value is yes. The name of the parameter will be r.
The value will be y also when then long
form yes was specified during submission. (see -jsv option above or
find more information concerning JSV in
jsv(1))""")
if prog in ['qsub', 'qrsh', 'qsh', 'qlogin', 'qalter']:
self.parser.add_argument('-sc',
action='append',
metavar='variable[=value]',
help="""\
-sc variable[=value],...
Available for qsub, qsh, qrsh, qlogin and qalter only.
Sets the given name/value pairs as the job's context. Value may be omitted.
Grid Engine replaces the job's pre‐
viously defined context with the one given as the argument.
Multiple -ac, -dc, and -sc options may be given.
The order is important.
Contexts provide a way to dynamically attach and remove meta-information
to and from a job. The context vari‐
ables are not passed to the job's execution context in its environment.
Qalter allows changing this option even while the job executes.
The outcome of the evaluation of all -ac, -dc, and -sc options
or corresponding values in qmon is passed to
defined JSV instances as parameter with the name ac.
(see -jsv option above or find more information concerning
JSV in jsv(1))""")
if prog in ['qsub']:
self.parser.add_argument('-shell',
choices=yesno,
help="""\
Available only for qsub.
-shell n causes qsub to execute the command line directly,
as if by exec(2). No command shell will be executed
for the job. This option only applies when -b y is also used.
Without -b y, -shell n has no effect.
This option can be used to speed up execution as some overhead,
like the shell startup and sourcing the shell
resource files is avoided.
This option can only be used if no shell-specific command line parsing
is required. If the command line contains
shell syntax, like environment variable substitution or (back) quoting,
a shell must be started. In this case
either do not use the -shell n option or execute the shell as the
command line and pass the path to the exe‐
cutable as a parameter.
If a job executed with the -shell n option fails due to a user error,
such as an invalid path to the executable,
the job will enter the error state.
-shell y cancels the effect of a previous -shell n. Otherwise, it has no effect.
See -b and -noshell for more information.
The value specified with this option or the corresponding value
specified in qmon will only be passed to defined
JSV instances if the value is yes. The name of the parameter
will be shell. The value will be y also when then
long form yes was specified during submission.
(see -jsv option above or find more information concerning JSV
in jsv(1))""")
if prog in ['qsub', 'qrsh', 'qsh', 'qlogin', 'qalter']:
self.parser.add_argument('-soft',
action='store_true',
help="""\
Available for qsub, qsh, qrsh, qlogin and qalter only.
Signifies that all resource requirements following in the command
line will be soft requirements and are to be
filled on an "as available" basis.
As Grid Engine scans the command line and script file for
Grid Engine options and parameters, it builds a list
of resources required by the job. All such resource requests are
considered as absolutely essential for the job
to commence. If the -soft option is encountered during the
scan then all following resources are designated as
"soft requirements" for execution, or "nice-to-have, but not essential".
If the -hard flag (see above) is
encountered at a later stage of the scan, all resource requests following
it once again become "essential". The
-hard and -soft options in effect act as "toggles" during the scan.
If this option or a corresponding value in qmon is
specified then the corresponding -q and -l resource require‐
ments will be passed to defined JSV instances as parameter
with the names q_soft and l_soft. Find for informa‐
tion in the sections describing -q and -l. (see -jsv option
above or find more information concerning JSV in
jsv(1))""")
if prog in ['qsub']:
self.parser.add_argument('-sync',
choices=yesno,
help="""\
Available for qsub.
-sync y causes qsub to wait for the job to complete before exiting.
If the job completes successfully, qsub's
exit code will be that of the completed job.
If the job fails to complete successfully, qsub will print out a
error message indicating why the job failed and will have an exit code of 1.
If qsub is interrupted, e.g. with
CTRL-C, before the job completes, the job will be canceled.
With the -sync n option, qsub will exit with an exit code of 0 as soon as the
job is submitted successfully.
-sync n is default for qsub.
If -sync y is used in conjunction with -now y, qsub will behave
as though only -now y were given until the job
has been successfully scheduled, after which time qsub will behave
as though only -sync y were given.
If -sync y is used in conjunction with -t n[-m[:i]], qsub will
wait for all the job's tasks to complete before
exiting. If all the job's tasks complete successfully, qsub's
exit code will be that of the first completed job
tasks with a non-zero exit code, or 0 if all job tasks exited
with an exit code of 0. If any of the job's tasks
fail to complete successfully, qsub will print out an
error message indicating why the job task(s) failed and
will have an exit code of 1. If qsub is interrupted,
e.g. with CTRL-C, before the job completes, all of the
job's tasks will be canceled.
Information that this switch was specified during
submission is not available in the JSV context. (see -jsv
option above or find more information concerning JSV in jsv(1))"""
)
if prog in ['qsub', 'qsh', 'qalter']:
self.parser.add_argument('-S',
metavar='pathname',
help="""\
-S [[hostname]:]pathname,...
Available for qsub, qsh and qalter.
Specifies the interpreting shell for the job.
Only one pathname component without a host specifier is valid and
only one path name for a given host is allowed.
Shell paths with host assignments define the interpreting shell
for the job if the host is the execution host.
The shell path without host specification is used if the execu‐
tion host matches none of the hosts in the list.
Furthermore, the pathname can be constructed with pseudo
environment variables as described for the -e option
above.
In the case of qsh the specified shell path is used to
execute the corresponding command interpreter in the
xterm(1) (via its -e option) started on behalf of the interactive job.
Qalter allows changing this option even
while the job executes. The modified parameter will only be in effect
after a restart or migration of the job,
however.
If this option or a corresponding value in qmon is
specified then this value will be passed to defined JSV
instances as parameter with the name S. (see -jsv option
above or find more information concerning JSV in
jsv(1))""")
if True or prog in ['qsub', 'qalter']:
self.parser.add_argument('-t',
metavar='n[-m[:s]]',
help="""\
Available for qsub and qalter only.
Submits a so called Array Job, i.e. an array of identical
tasks being differentiated only by an index number and
being treated by Grid Engine almost like a series of jobs.
The option argument to -t specifies the number of
array job tasks and the index number which will be associated with the tasks.
The index numbers will be exported
to the job tasks via the environment variable GE_TASK_ID.
The option arguments n, m and s will be available
through the environment variables GE_TASK_FIRST,
GE_TASK_LAST and GE_TASK_STEPSIZE.
Following restrictions apply to the values n and m:
1 <= n <= MIN(2^31-1, max_aj_tasks)
1 <= m <= MIN(2^31-1, max_aj_tasks)
n <= m
max_aj_tasks is defined in the cluster configuration (see sge_conf(5))
The task id range specified in the option argument may be a single
number, a simple range of the form n-m or a
range with a step size. Hence, the task id range specified by
2-10:2 would result in the task id indexes 2, 4,
6, 8, and 10, for a total of 5 identical tasks, each with
the environment variable GE_TASK_ID containing one of
the 5 index numbers.
All array job tasks inherit the same resource requests and
attribute definitions as specified in the qsub or
qalter command line, except for the -t option.
The tasks are scheduled independently and, provided enough
resources exist, concurrently, very much like separate jobs.
However, an array job or a sub-array there of can
be accessed as a single unit by commands like qmod(1) or qdel(1).
See the corresponding manual pages for fur‐
ther detail.
Array jobs are commonly used to execute the same type of operation
on varying input data sets correlated with
the task index number. The number of tasks in a array job is unlimited.
STDOUT and STDERR of array job tasks will be written into different
files with the default location
<jobname>.['e'|'o']<job_id>'.'<task_id>
In order to change this default, the -e and -o options (see above)
can be used together with the pseudo environ‐
ment variables $HOME, $USER, $JOB_ID, $JOB_NAME, $HOSTNAME, and $GE_TASK_ID.
Note, that you can use the output redirection to divert the output
of all tasks into the same file, but the
result of this is undefined.
If this option or a corresponding value in qmon is specified
then this value will be passed to defined JSV
instances as parameters with the name t_min, t_max and t_step
(see -jsv option above or find more information
concerning JSV in jsv(1))""")
if prog in ['qsub', 'qalter']:
self.parser.add_argument('-tc',
type=int,
metavar='max_running_tasks',
help="""\
-allow users to limit concurrent array job task
execution. Parameter max_running_tasks specifies maximum number
of simultaneously running tasks. For example we have
running SGE with 10 free slots. We call qsub -t 1-100 -tc
2 jobscript. Then only 2 tasks will be scheduled to run even
when 8 slots are free.""")
if prog in ['qsub']:
self.parser.add_argument('-terse',
action='store_true',
help="""\
Available for qsub only.
-terse causes the qsub to display only the job-id of the
job being submitted rather than the regular "Your job
..." string. In case of an error the error is reported on stderr as usual.
This can be helpful for scripts which need to parse qsub output to get the job-id.
Information that this switch was specified during submission
is not available in the JSV context. (see -jsv
option above or find more information concerning JSV in jsv(1))"""
)
if prog in ['qalter']:
self.parser.add_argument('-u',
metavar='username',
help="""\
-u username,...
Available for qalter only. Changes are only made
on those jobs which were submitted by users specified in the
list of usernames. For managers it is possible to use
the qalter -u '*' command to modify all jobs of all
users.
If you use the -u switch it is not permitted to
specify an additional wc_job_range_list.""")
if prog in ['qsub', 'qrsh', 'qalter']:
self.parser.add_argument('-v',
metavar='variable[=value]',
help="""\
-v variable[=value],...
Available for qsub, qrsh (with command argument) and qalter.
Defines or redefines the environment
variables to be exported to the execution context of the job. If the -v
option is present Grid Engine will add the
environment variables defined as arguments to the switch and, option‐
ally, values of specified variables, to the execution context of the job.
Qalter allows changing this option even while the job executes.
The modified parameter will only be in effect
after a restart or migration of the job, however.
All environment variables specified with -v, -V or the
DISPLAY variable provided with -display will be exported
to the defined JSV instances only optionally when this is
requested explicitly during the job submission verifi‐
cation. (see -jsv option above or find more information concerning JSV in jsv(1))"""
)
if prog in ['qrsh', 'qmake']:
self.parser.add_argument('-verbose',
action='store_true',
help="""\
Available only for qrsh and qmake(1).
Unlike qsh and qlogin, qrsh does not output any
informational messages while establishing the session, compliant
with the standard rsh(1) and rlogin(1) system calls.
If the option -verbose is set, qrsh behaves like the qsh
and qlogin commands, printing information about the
process of establishing the rsh(1) or rlogin(1) session.""")
if prog in ['qsub', 'qrsh', 'qsh', 'qlogin', 'qalter']:
self.parser.add_argument('-verify',
action='store_true',
help="""\
Available for qsub, qsh, qrsh, qlogin and qalter.
Instead of submitting a job, prints detailed information
about the would-be job as though qstat(1) -j were used,
including the effects of command-line parameters and
the external environment.""")
if prog in ['qsub', 'qrsh', 'qsh', 'qlogin', 'qalter']:
# TODO parse acceptability of qrsh argument properly
self.parser.add_argument('-V',
action='store_true',
help="""\
Available for qsub, qsh, qrsh with command and qalter.
Specifies that all environment variables active within
the qsub utility be exported to the context of the job.
All environment variables specified with -v, -V or the DISPLAY
variable provided with -display will be exported
to the defined JSV instances only optionally when this is
requested explicitly during the job submission verifi‐
cation. (see -jsv option above or find more information
concerning JSV in jsv(1))""")
if prog in ['qsub', 'qrsh', 'qsh', 'qlogin', 'qalter']:
self.parser.add_argument('-w',
choices='ewnpv',
help="""\
Available for qsub, qsh, qrsh, qlogin and qalter.
Specifies a validation level applied to the job to be submitted
(qsub, qlogin, and qsh) or the specified queued
job (qalter). The information displayed indicates whether the
job can possibly be scheduled assuming an empty
system with no other jobs. Resource requests exceeding the
configured maximal thresholds or requesting unavail‐
able resource attributes are possible causes for jobs to fail this validation.
The specifiers e, w, n and v define the following validation modes:
`e' error - jobs with invalid requests will be
rejected.
`w' warning - only a warning will be displayed
for invalid requests.
`n' none - switches off validation; the default for
qsub, qalter, qrsh, qsh
and qlogin.
`p' poke - does not submit the job but prints a
validation report based on a cluster as is with
all resource utilizations in place.
`v' verify - does not submit the job but prints a
validation report based on an empty cluster.
Note, that the necessary checks are performance consuming
and hence the checking is switched off by default. It
should also be noted that load values are not taken
into account with the verification since they are assumed to
be too volatile. To cause -w e verification to be passed
at submission time, it is possible to specify non-
volatile values (non-consumables) or maximum values
(consumables) in complex_values.""")
if prog in ['qsub', 'qrsh', 'qsh', 'qalter']:
self.parser.add_argument('-wd',
metavar='working_dir',
help="""\
Available for qsub, qsh, qrsh and qalter only.
Execute the job from the directory specified in working_dir.
This switch will activate Grid Engine's path
aliasing facility, if the corresponding configuration files are present
(see ge_aliases(5)).
Qalter allows changing this option even while the job executes.
The modified parameter will only be in effect
after a restart or migration of the job, however.
The parameter value will be available in defined JSV
instances as parameter with the name cwd (see -cwd switch above or
find more information concerning JSV in
jsv(1))""")
if prog in ['qsub', 'qrsh']:
self.parser.add_argument('command',
help="""\
Available for qsub and qrsh only.
The job's scriptfile or binary. If not present or if the operand
is the single-character string '-', qsub reads
the script from standard input.
The command will be available in defined JSV instances as parameter
with the name CMDNAME (see -jsv option above
or find more information concerning JSV in jsv(1))""")
if prog in ['qsub', 'qrsh', 'qalter']:
self.parser.add_argument('command_args',
nargs='*',
help="""\
Available for qsub, qrsh and qalter only.
Arguments to the job. Not valid if the script is entered from standard input.
Qalter allows changing this option even while the job executes.
The modified parameter will only be in effect
after a restart or migration of the job, however.
The number of command arguments is provided to configured
JSV instances as parameter with the name CMDARGS. Also
the argument values can by accessed. Argument names
have the format CMDARG<number> where <number> is a integer
between 0 and CMDARGS - 1. (see -jsv option above or
find more information concerning JSV in jsv(1))""")
if prog in ['qsh']:
self.parser.add_argument('xterm_args',
nargs='*',
help="""\
Available for qsh only.
Arguments to the xterm(1) executable, as defined in the configuration.
For details, refer to ge_conf(5)).
Information concerning xterm_args will be available in JSV context as
parameters with the name CMDARGS and
CMDARG<number>. Find more information above in section command_args.
(see -jsv option above or find more infor‐
mation concerning JSV in jsv(1))""")
# END SGE OPTION PARSER
# Initialize with defaults
self.parse('-cwd -V -j y -terse -pe lammpi 1 echo')
def parse(self, inputstring=''):
"""Helper method: parses a string"""
return self.parse_args(inputstring.split())
def parse_args(self, args=None):
"""Helper method: parses a list"""
if args is None:
self.args = self.parser.parse_args() # default is sys.argv[1:]
else:
self.args = self.parser.parse_args(args)
return self.args
def write_qsub_script(self, filename, echo=False):
"""
Writes the entire command line to a qsub script
filename: name of file to write
echo : echo contents of script to stdout. Default: False
"""
buf = ['#!/usr/bin/env qsub', '# Written using SGE module']
for option, value in self.args.__dict__.items():
if value is True:
value = ''
if option not in ['command', 'command_args', 'xterm_args']:
if isinstance(value, list):
val = ' '.join(value)
else:
val = str(value)
buf.append(' '.join(['#', '-' + option, val]))
args = getattr(self.args, 'command_args', [])
args = getattr(self.args, 'xterm_args', args)
buf.append(' '.join([self.args.command] + args))
if echo:
print('\n'.join(buf))
f = open(filename, 'w')
f.write('\n'.join(buf))
f.close()
def execute(self, mode='local', path=''):
"""
Executes qsub
known modes: local - run locally
echo - echoes out execution string only
path: path to qsub/... executable: Default = nothing
"""
# Form execution string
import random
test_id = ''
if "build.log" in self.args.o:
test_id = self.args.o.split("/")[-2]
elif "run.log" in self.args.o:
test_id = self.args.o.split("/")[-3]
if test_id == '':
test_id = str(random.randint(1, 9999))
import os
program = os.path.join(path, self.prog)
options = []
for option, value in self.args.__dict__.items():
if value is True:
value = ''
if isinstance(value, list):
val = ' '.join(value)
else:
val = str(value)
if option not in ['command', 'command_args', 'xterm_args']:
options.append('-' + option + ' ' + val)
args = getattr(self.args, 'command_args', [])
args = getattr(self.args, 'xterm_args', args)
# ---------------- command file -------------
cwd = os.getcwd()
command_file = cwd + '/command_file_' + str(os.getpid()) + '_' + test_id
try:
with open(command_file, 'w') as f_command:
command_temp = str(self.args.command)
command_temp = command_temp.replace('"', '')
f_command.write(command_temp + "\n/bin/rm -f " + command_file)
except IOError:
error_msg = 'Error: problem with open File: ' + str(f_command)
raise IOError(error_msg)
os.chmod(command_file, 0o0777)
exestring = ' '.join([program] + options + [command_file] + args)
exestring = exestring.replace('-pe lammpi 1', '')
exestring = exestring.replace('-slot', '-pe make')
exestring = exestring.replace('-ll ', '-l ')
exestring = exestring.replace('-t 0', '')
# exestring = exestring.replace('-j y','')
print('INFO: sge command file = ' + command_file)
if mode == 'echo':
return (exestring)
elif mode == 'local':
import subprocess
p = subprocess.Popen(command_file,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True)
print(p.stdout.read())
if __name__ == '__main__':
print('Attempting to validate qsub arguments using argparse')
o = qsubOptions()
o.parse_args()
o.args.t = '1-1000'
print('I will now print the script')
o.write_qsub_script('/dev/null', echo=True)
print('*' * 70)
print('I will now print the command line')
o.execute(mode='echo')
|
ce582ad0f562c79149032c6a4c7013e5978f623f
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/helpers/typeshed/stubs/commonmark/commonmark/inlines.pyi
|
e53f22a10cf8cc629d4981eec0b7d8a0feb83501
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 1,763
|
pyi
|
inlines.pyi
|
import html
from typing import Any
HTMLunescape = html.unescape
ESCAPED_CHAR: Any
rePunctuation: Any
reLinkTitle: Any
reLinkDestinationBraces: Any
reEscapable: Any
reEntityHere: Any
reTicks: Any
reTicksHere: Any
reEllipses: Any
reDash: Any
reEmailAutolink: Any
reAutolink: Any
reSpnl: Any
reWhitespaceChar: Any
reWhitespace: Any
reUnicodeWhitespaceChar: Any
reFinalSpace: Any
reInitialSpace: Any
reSpaceAtEndOfLine: Any
reLinkLabel: Any
reMain: Any
def text(s): ...
def smart_dashes(chars): ...
class InlineParser:
subject: str
brackets: Any
pos: int
refmap: Any
options: Any
def __init__(self, options=...) -> None: ...
def match(self, regexString): ...
def peek(self): ...
def spnl(self): ...
def parseBackticks(self, block): ...
def parseBackslash(self, block): ...
def parseAutolink(self, block): ...
def parseHtmlTag(self, block): ...
def scanDelims(self, c): ...
delimiters: Any
def handleDelim(self, cc, block): ...
def removeDelimiter(self, delim) -> None: ...
@staticmethod
def removeDelimitersBetween(bottom, top) -> None: ...
def processEmphasis(self, stack_bottom) -> None: ...
def parseLinkTitle(self): ...
def parseLinkDestination(self): ...
def parseLinkLabel(self): ...
def parseOpenBracket(self, block): ...
def parseBang(self, block): ...
def parseCloseBracket(self, block): ...
def addBracket(self, node, index, image) -> None: ...
def removeBracket(self) -> None: ...
def parseEntity(self, block): ...
def parseString(self, block): ...
def parseNewline(self, block): ...
def parseReference(self, s, refmap): ...
def parseInline(self, block): ...
def parseInlines(self, block) -> None: ...
parse: Any
|
84f58d8a6a90905c5b4ed1ae20b28b2e7711debc
|
27d7b99866a0bb16883ff2efebbe5ca4b3b27855
|
/setup.py
|
40638939875bb85f08f5e4a886047d158d1b306f
|
[
"BSD-3-Clause"
] |
permissive
|
cherrypy/cherrypy
|
f85e054251d55583c34f381baf6570760d5af4c1
|
0050ed1e75f375d7790efcf69a954c25a9896c7b
|
refs/heads/main
| 2023-09-02T23:02:21.291624
| 2023-08-04T13:52:17
| 2023-08-04T13:52:17
| 57,436,777
| 1,934
| 497
|
BSD-3-Clause
| 2023-05-14T04:47:02
| 2016-04-30T10:09:26
|
Python
|
UTF-8
|
Python
| false
| false
| 4,307
|
py
|
setup.py
|
#! /usr/bin/env python
"""CherryPy package setuptools installer."""
import setuptools
name = 'CherryPy'
repo_slug = 'cherrypy/{}'.format(name.lower())
repo_url = 'https://github.com/{}'.format(repo_slug)
params = dict(
name=name,
use_scm_version=True,
description='Object-Oriented HTTP framework',
author='CherryPy Team',
author_email='team@cherrypy.dev',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: Freely Distributable',
'Operating System :: OS Independent',
'Framework :: CherryPy',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3.11',
'Programming Language :: Python :: Implementation',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: Jython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: HTTP Servers',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Server',
'Topic :: Software Development :: Libraries :: Application Frameworks',
],
url='https://www.cherrypy.dev',
project_urls={
'CI: AppVeyor': 'https://ci.appveyor.com/project/{}'.format(repo_slug),
'CI: Travis': 'https://travis-ci.org/{}'.format(repo_slug),
'CI: Circle': 'https://circleci.com/gh/{}'.format(repo_slug),
'CI: GitHub': 'https://github.com/{}/actions'.format(repo_slug),
'Docs: RTD': 'https://docs.cherrypy.dev',
'GitHub: issues': '{}/issues'.format(repo_url),
'GitHub: repo': repo_url,
'Tidelift: funding':
'https://tidelift.com/subscription/pkg/pypi-cherrypy'
'?utm_source=pypi-cherrypy&utm_medium=referral&utm_campaign=pypi',
},
packages=[
'cherrypy', 'cherrypy.lib',
'cherrypy.tutorial', 'cherrypy.test',
'cherrypy.process',
'cherrypy.scaffold',
],
entry_points={'console_scripts': ['cherryd = cherrypy.__main__:run']},
include_package_data=True,
install_requires=[
'cheroot>=8.2.1',
'portend>=2.1.1',
'more_itertools',
'zc.lockfile',
'jaraco.collections',
],
extras_require={
'docs': [
'sphinx',
'docutils',
'alabaster',
'sphinxcontrib-apidoc>=0.3.0',
'rst.linker>=1.11',
'jaraco.packaging>=3.2',
'setuptools',
],
'json': ['simplejson'],
'routes_dispatcher': ['routes>=2.3.1'],
'ssl': ['pyOpenSSL'],
'testing': [
'coverage', # inspects tests coverage
'codecov', # sends tests coverage to codecov.io
# cherrypy.lib.gctools
'objgraph',
'pytest>=5.3.5',
'pytest-cov',
'pytest-forked',
'pytest-sugar',
'path.py',
'requests_toolbelt',
'pytest-services>=2',
'setuptools',
],
# Enables memcached session support via `cherrypy[memcached_session]`:
'memcached_session': ['python-memcached>=1.58'],
'xcgi': ['flup'],
# https://docs.cherrypy.dev/en/latest/advanced.html?highlight=windows#windows-console-events
':sys_platform == "win32" and implementation_name == "cpython"'
# pywin32 disabled while a build is unavailable. Ref #1920.
' and python_version < "3.10"': [
'pywin32 >= 227',
],
},
setup_requires=[
'setuptools_scm',
],
python_requires='>=3.6',
)
__name__ == '__main__' and setuptools.setup(**params)
|
77378766671526075bc09378f18950423c9c1a1d
|
ae98bcfb1cbf75ca329e8a71ec79f92032670075
|
/wavelink/payloads.py
|
662b670d3e579b5ce5f4a45f83cc9620a30bffbf
|
[
"MIT"
] |
permissive
|
PythonistaGuild/Wavelink
|
18f234d5c88760e147d778540abcf505ab0577ab
|
3d6e3b97bc54dd1d093102eb827b50dc9fd07446
|
refs/heads/main
| 2023-08-28T17:28:20.696752
| 2023-08-23T11:03:29
| 2023-08-23T11:03:29
| 164,062,465
| 373
| 306
|
MIT
| 2023-08-25T17:35:11
| 2019-01-04T05:32:14
|
Python
|
UTF-8
|
Python
| false
| false
| 3,366
|
py
|
payloads.py
|
"""
MIT License
Copyright (c) 2019-Present PythonistaGuild
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import annotations
from typing import TYPE_CHECKING, Any
from discord.enums import try_enum
from .enums import TrackEventType, DiscordVoiceCloseType
if TYPE_CHECKING:
from .player import Player
from .tracks import Playable
from .types.events import EventOp
__all__ = ('TrackEventPayload', 'WebsocketClosedPayload')
class TrackEventPayload:
"""The Wavelink Track Event Payload.
.. warning::
This class should not be created manually, instead you will receive it from the
various wavelink track events.
Attributes
----------
event: :class:`TrackEventType`
An enum of the type of event.
track: :class:`Playable`
The track associated with this event.
original: Optional[:class:`Playable`]
The original requested track before conversion. Could be None.
player: :class:`player.Player`
The player associated with this event.
reason: Optional[str]
The reason this event was fired.
"""
def __init__(self, *, data: EventOp, track: Playable, original: Playable | None, player: Player) -> None:
self.event: TrackEventType = try_enum(TrackEventType, data['type'])
self.track: Playable = track
self.original: Playable | None = original
self.player: Player = player
self.reason: str = data.get('reason')
class WebsocketClosedPayload:
"""The Wavelink WebsocketClosed Event Payload.
.. warning::
This class should not be created manually, instead you will receive it from the
wavelink `on_wavelink_websocket_closed` event.
Attributes
----------
code: :class:`DiscordVoiceCloseType`
An Enum representing the close code from Discord.
reason: Optional[str]
The reason the Websocket was closed.
by_discord: bool
Whether the websocket was closed by Discord.
player: :class:`player.Player`
The player associated with this event.
"""
def __init__(self, *, data: dict[str, Any], player: Player) -> None:
self.code: DiscordVoiceCloseType = try_enum(DiscordVoiceCloseType, data['code'])
self.reason: str = data.get('reason')
self.by_discord: bool = data.get('byRemote')
self.player: Player = player
|
0fd3c8de91421c7d5eb7ba887a122a16bf6b342b
|
1e528494a929deada984822438b3ab569762e6c6
|
/sprytile_panel.py
|
8b0452e7f7a9e62f449970b647bdd5c7dd8bc365
|
[
"MIT"
] |
permissive
|
Sprytile/Sprytile
|
a0233a00a243f263691921d7e1f6af05c5eb5442
|
6b68d0069aef5bfed6ab40d1d5a94a3382b41619
|
refs/heads/master
| 2022-07-10T06:54:01.003723
| 2020-09-26T07:25:35
| 2020-09-26T07:25:35
| 72,276,917
| 860
| 91
|
MIT
| 2022-07-07T23:37:19
| 2016-10-29T09:47:09
|
Python
|
UTF-8
|
Python
| false
| false
| 11,716
|
py
|
sprytile_panel.py
|
import bpy
from . import sprytile_utils
from bpy.types import Panel, UIList
class VIEW3D_UL_SprytileMaterialGridList(bpy.types.UIList):
use_order_name : bpy.props.BoolProperty(default=False, name="Order by Name")
use_order_invert : bpy.props.BoolProperty(default=False, name="Reverse Order")
obj_mats_only : bpy.props.BoolProperty(default=False, name="Object Materials Only", description="Show only materials already added to the selected object")
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
if item.mat_id != "":
mat_data = sprytile_utils.get_mat_data(context, item.mat_id)
if mat_data is None or item.mat_id not in bpy.data.materials:
layout.label(text="Invalid Data")
return
material = bpy.data.materials[item.mat_id]
if material is None:
layout.label(text="Invalid Data")
return
display_icon = layout.icon(material)
texture = sprytile_utils.get_grid_texture(context.object, mat_data.grids[0])
if texture is not None:
display_icon = layout.icon(texture)
row = layout.row(align=True)
if mat_data is not None:
show_icon = "TRIA_DOWN" if mat_data.is_expanded else "TRIA_RIGHT"
row.prop(mat_data, "is_expanded", text="", icon=show_icon, emboss=False)
row.prop(item, "mat_name", text="", emboss=False, icon_value=display_icon)
elif item.grid_id != "":
grid = sprytile_utils.get_grid(context, item.grid_id)
if grid is not None:
split = layout.split(factor=0.65, align=True)
split.prop(grid, "name", text="")
split.label(text="%dx%d" % (grid.grid[0], grid.grid[1]))
else:
layout.label(text="Invalid Data")
else:
layout.label(text="Invalid Data")
def draw_filter(self, context, layout):
row = layout.row()
subrow = row.row(align=True)
subrow.prop(self, "filter_name", text="")
icon = 'ZOOM_OUT' if self.use_filter_invert else 'ZOOM_IN'
subrow.prop(self, "use_filter_invert", text="", icon=icon)
row = layout.row()
subrow = row.row(align=True)
subrow.prop(self, "use_order_name", text="", icon='SORTALPHA')
icon = 'SORT_DESC' if self.use_order_invert else 'SORT_ASC'
subrow.prop(self, "use_order_invert", text="", icon=icon)
subrow.prop(self, "obj_mats_only", text="", icon='MESH_CUBE')
def filter_items(self, context, data, propname):
display = getattr(data, propname)
helper_funcs = bpy.types.UI_UL_list
flt_flags = []
flt_neworder = []
# Filtering by name
if self.filter_name:
flt_flags = helper_funcs.filter_items_by_name(self.filter_name, self.bitflag_filter_item, display, "search_name",
reverse=False)
if not flt_flags:
flt_flags = [self.bitflag_filter_item] * len(display)
# Filtering by selected object
if self.obj_mats_only and context.object and context.object.type == "MESH":
obj_mats = []
for slot in context.object.material_slots:
if slot.material:
obj_mats.append(slot.material)
def filter_func(item):
nonlocal display
if item[1] == 0:
return True
mat_id = display[item[0]].mat_id or display[item[0]].parent_mat_id
mat_idx = bpy.data.materials.find(mat_id)
if mat_idx < 0:
return False
return not bpy.data.materials[mat_id] in obj_mats
flt_flags = [0 if filter_func(x) else self.bitflag_filter_item for x in list(enumerate(flt_flags))]
sort_list = list(enumerate(display))
if self.use_order_name:
sort_list.sort(key=lambda item: item[1].search_name)
if self.use_order_invert:
invert_list = list(enumerate(sort_list))
invert_list_len = len(invert_list) - 1
invert_list_cp = invert_list.copy()
def sort_invert(item):
nonlocal invert_list_cp
if item[1][1].mat_id:
return (invert_list_len - item[0], 0)
else:
i = item[0] - 1
while i >= 0:
if invert_list_cp[i][1][1].mat_id:
return (invert_list_len - i, 1)
i -= 1
return (item[0], 1)
invert_list.sort(key=sort_invert)
sort_list = [x[1] for x in invert_list]
flt_neworder = [x[0] for x in sort_list]
return flt_flags, flt_neworder
class VIEW3D_MT_SprytileGridDropDown(bpy.types.Menu):
bl_idname = 'VIEW3D_MT_SprytileGridDropDown'
bl_label = "Grid drop down"
def draw(self, context):
layout = self.layout
layout.operator("sprytile.tileset_new", icon="PRESET_NEW")
layout.separator()
layout.operator("sprytile.validate_grids", icon="GRID")
class VIEW3D_PT_SprytilePanel(bpy.types.Panel):
bl_idname = "VIEW3D_PT_SprytilePanel"
bl_label = "Sprytile Painter"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_category = "Sprytile"
# Only show panel when selected object is a mesh and in edit mode
@classmethod
def poll(self, context):
if context.object and context.object.type == 'MESH':
return context.object.mode == 'EDIT'
def draw(self, context):
layout = self.layout
scene = context.scene
obj = context.object
if hasattr(context.scene, "sprytile_data") is False:
layout.label(text="No Sprytile Data")
return
sprytile_data = context.scene.sprytile_data
row = layout.row(align=True)
row.prop(sprytile_data, "uv_flip_x", toggle=True)
row.prop(sprytile_data, "uv_flip_y", toggle=True)
row = layout.row(align=True)
row.operator("sprytile.rotate_left", icon="TRIA_DOWN", text="")
row.prop(sprytile_data, "mesh_rotate")
row.operator("sprytile.rotate_right", icon="TRIA_UP", text="")
if sprytile_data.paint_mode == 'PAINT':
box = layout.box()
row = box.row(align=False)
split = row.split(factor=0.65)
left_col = split.column(align=True)
left_col.prop(sprytile_data, "paint_uv_snap", text="Pixel Snap")
left_col.prop(sprytile_data, "paint_stretch_x")
left_col.prop(sprytile_data, "paint_stretch_y")
sub_col = left_col.column(align=True)
sub_col.enabled = sprytile_data.paint_stretch_x or sprytile_data.paint_stretch_y
sub_col.prop(sprytile_data, "paint_edge_snap")
sub_col.prop(sprytile_data, "edge_threshold")
right_col = split.column(align=True)
right_col.label(text="UV Align")
right_col.row(align=True).prop(sprytile_data, "paint_align_top", toggle=True, text="")
right_col.row(align=True).prop(sprytile_data, "paint_align_middle", toggle=True, text="")
right_col.row(align=True).prop(sprytile_data, "paint_align_bottom", toggle=True, text="")
right_col.row(align=True).prop(sprytile_data, "paint_hinting")
#if sprytile_data.paint_mode == 'SET_NORMAL':
# layout.prop(sprytile_data, "paint_hinting")
if sprytile_data.paint_mode == 'FILL':
box = layout.box()
row = box.row(align=True)
row.prop(sprytile_data, "fill_plane_size", text="Fill Size")
row.separator()
row.prop(sprytile_data, "fill_lock_transform", toggle=True, text="", icon="CON_ROTLIMIT")
# View axis and options
row = layout.row(align=True)
row.prop(sprytile_data, "lock_normal", toggle=True, text="", icon="LOCKVIEW_{0}".format("ON" if sprytile_data.lock_normal else "OFF"))
row.prop(sprytile_data, "normal_mode", expand=True)
if sprytile_data.paint_mode == 'FILL':
row.separator()
row.prop(sprytile_data, "auto_merge", toggle=True, text="", icon="AUTOMERGE_{0}".format("ON" if sprytile_data.auto_merge else "OFF"))
if sprytile_data.paint_mode == 'MAKE_FACE':
# row = layout.row(align=True)
row.separator()
row.prop(sprytile_data, "auto_merge", toggle=True, text="", icon="AUTOMERGE_{0}".format("ON" if sprytile_data.auto_merge else "OFF"))
row.prop(sprytile_data, "auto_join", toggle=True, text="", icon="MESH_GRID")
row.prop(sprytile_data, "allow_backface", toggle=True, text="", icon="NORMALS_FACE")
if sprytile_data.paint_mode == 'PAINT':
row.separator()
row.prop(sprytile_data, "allow_backface", toggle=True, text="", icon="NORMALS_FACE")
layout.separator()
row = layout.row()
row.template_list("VIEW3D_UL_SprytileMaterialGridList", "",
scene.sprytile_list, "display",
scene.sprytile_list, "idx", rows=4)
col = row.column(align=True)
col.operator('sprytile.grid_add', icon='ADD', text='')
col.operator('sprytile.grid_remove', icon='REMOVE', text='')
col.menu('VIEW3D_MT_SprytileGridDropDown', icon='DOWNARROW_HLT', text='')
col.separator()
col.operator('sprytile.grid_move', icon='TRIA_UP', text='').direction = -1
col.operator('sprytile.grid_move', icon='TRIA_DOWN', text='').direction = 1
if len(scene.sprytile_mats) == 0:
return
selected_grid = sprytile_utils.get_grid(context, obj.sprytile_gridid)
if selected_grid is None:
return
layout.prop(selected_grid, "grid", text="Grid Size")
row = layout.row()
row.prop(sprytile_data, "show_overlay", text="", icon='GRID')
row.prop(sprytile_data, "outline_preview", text="", icon="BORDERMOVE")
show_icon = "TRIA_DOWN" if sprytile_data.show_extra else "TRIA_RIGHT"
row.prop(sprytile_data, "show_extra", icon=show_icon, emboss=False)
if not sprytile_data.show_extra:
return
split = layout.split(factor=0.3, align=True)
split.prop(selected_grid, "auto_pad", toggle=True)
pad_row = split.row(align=True)
pad_row.enabled = selected_grid.auto_pad
pad_row.prop(selected_grid, "auto_pad_offset")
layout.prop(selected_grid, "padding")
row = layout.row(align=True)
row.label(text="Margins")
col = row.column(align=True)
row_margins = col.row(align=True)
row_margins.prop(selected_grid, "margin", text="Left", index=3)
row_margins.prop(selected_grid, "margin", text="Right", index=1)
row_margins = col.row(align=True)
row_margins.prop(selected_grid, "margin", text="Top", index=0)
row_margins.prop(selected_grid, "margin", text="Bottom", index=2)
layout.prop(selected_grid, "rotate")
layout.prop(selected_grid, "offset")
# module classes
classes = (
VIEW3D_PT_SprytilePanel,
VIEW3D_UL_SprytileMaterialGridList,
VIEW3D_MT_SprytileGridDropDown,
)
def register():
for cl in classes:
bpy.utils.register_class(cl)
def unregister():
for cl in classes:
bpy.utils.unregister_class(cl)
if __name__ == '__main__':
register()
|
5654a398141e6e59f302696b9dc13b570fd85702
|
fe90bf63c34511ec9a4d7cb5a90957fbbb03a504
|
/boundary_layer/plugins/oozie_plugin.py
|
c4bdacd3afa29a5d31537e90ffdfe4b2264f5c31
|
[
"Apache-2.0"
] |
permissive
|
etsy/boundary-layer
|
778b115f94efc5d50986a289daf3ad265b38926c
|
c29594957c1fb47e308fcc89f7edcefc0797fc89
|
refs/heads/master
| 2023-07-21T17:03:15.769537
| 2023-01-04T14:05:53
| 2023-01-04T14:05:53
| 142,857,095
| 263
| 67
|
Apache-2.0
| 2023-07-19T19:57:04
| 2018-07-30T09:51:52
|
Python
|
UTF-8
|
Python
| false
| false
| 3,842
|
py
|
oozie_plugin.py
|
# -*- coding: utf-8 -*-
# Copyright 2018 Etsy Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
import six
from boundary_layer.plugins import util
from boundary_layer.logger import logger
OoziePluginContainer = namedtuple('OoziePluginContainer', ['name', 'priority', 'plugin'])
class OozieMetaPlugin(object):
def __init__(self, plugins, args):
self.args = args
sorted_plugins = sorted(
plugins,
key=lambda p: p.priority.value,
reverse=True)
self._plugin_containers = [
OoziePluginContainer(
name=plugin.name,
priority=plugin.priority,
plugin=plugin.oozie_plugin_cls(args)) for plugin in sorted_plugins]
def action_builders(self):
return util.merge_lists(
pc.plugin.action_builders() for pc in self._plugin_containers)
def plugin_config(self):
all_configs = {
plugin_container.name: plugin_container.plugin.plugin_config()
for plugin_container in self._plugin_containers
}
return {name: config for (name, config) in six.iteritems(all_configs) if config}
def dag_args(self):
return util.merge_dicts(
pc.plugin.dag_args() for pc in self._plugin_containers)
def default_task_args(self):
return util.merge_dicts(
pc.plugin.default_task_args() for pc in self._plugin_containers)
def dag_imports(self):
return util.merge_dicts(
pc.plugin.dag_imports() for pc in self._plugin_containers)
def cluster_config(self):
plugins_with_cluster_configs = [
pc for pc in self._plugin_containers
if pc.plugin.cluster_config()]
if not plugins_with_cluster_configs:
raise Exception('No cluster configurations found for oozie parser!')
if len(plugins_with_cluster_configs) > 1:
logger.info(
'Multiple cluster configurations found. Choosing configuration '
'from plugin `%s`, with priority `%s`',
plugins_with_cluster_configs[0].name,
plugins_with_cluster_configs[0].priority)
return plugins_with_cluster_configs[0].plugin.cluster_config()
def upstream_operators(self):
return util.merge_lists(
pc.plugin.upstream_operators() for pc in self._plugin_containers)
def jsp_macros(self):
return util.merge_dicts(
pc.plugin.jsp_macros() for pc in self._plugin_containers)
class BaseOozieParserPlugin(object):
def action_builders(self):
""" Plugin may provide customized parsers for oozie actions.
Must be a list of OozieActionBuilder classes
"""
return []
def plugin_config(self):
return {}
def dag_args(self):
return {}
def default_task_args(self):
return {}
def cluster_config(self):
return None
def dag_imports(self):
return {}
def upstream_operators(self):
return []
def jsp_macros(self):
return {}
@classmethod
def register_arguments(cls, parser):
# Overrideable method for adding command-line arguments
pass
def __init__(self, args):
self.args = args
|
25e5f7e40606396d3b7ed5a774014f2619b19629
|
302ce5ab1045ee93845608c96580c63d54d730af
|
/src/spikeinterface/curation/tests/test_sortingview_curation.py
|
9177cb553662b68d6f0faff8d651d92c2a253ad8
|
[
"MIT"
] |
permissive
|
SpikeInterface/spikeinterface
|
f900b62720860b2881d2e6b5fa4441e0e560f625
|
ee2237b3f5ce2347b2ec9df90e97b0ee6c738dcf
|
refs/heads/main
| 2023-09-02T11:27:54.687021
| 2023-09-01T13:48:29
| 2023-09-01T13:48:29
| 196,581,117
| 295
| 133
|
MIT
| 2023-09-14T19:12:16
| 2019-07-12T13:07:46
|
Python
|
UTF-8
|
Python
| false
| false
| 6,172
|
py
|
test_sortingview_curation.py
|
import pytest
from pathlib import Path
import os
import spikeinterface as si
from spikeinterface.extractors import read_mearec
from spikeinterface import set_global_tmp_folder
from spikeinterface.postprocessing import (
compute_correlograms,
compute_unit_locations,
compute_template_similarity,
compute_spike_amplitudes,
)
from spikeinterface.curation import apply_sortingview_curation
if hasattr(pytest, "global_test_folder"):
cache_folder = pytest.global_test_folder / "curation"
else:
cache_folder = Path("cache_folder") / "curation"
parent_folder = Path(__file__).parent
ON_GITHUB = bool(os.getenv("GITHUB_ACTIONS"))
KACHERY_CLOUD_SET = bool(os.getenv("KACHERY_CLOUD_CLIENT_ID")) and bool(os.getenv("KACHERY_CLOUD_PRIVATE_KEY"))
set_global_tmp_folder(cache_folder)
# this needs to be run only once
def generate_sortingview_curation_dataset():
import spikeinterface.widgets as sw
local_path = si.download_dataset(remote_path="mearec/mearec_test_10s.h5")
recording, sorting = read_mearec(local_path)
we = si.extract_waveforms(recording, sorting, folder=None, mode="memory")
_ = compute_spike_amplitudes(we)
_ = compute_correlograms(we)
_ = compute_template_similarity(we)
_ = compute_unit_locations(we)
# plot_sorting_summary with curation
w = sw.plot_sorting_summary(we, curation=True, backend="sortingview")
# curation_link:
# https://figurl.org/f?v=gs://figurl/spikesortingview-10&d=sha1://bd53f6b707f8121cadc901562a89b67aec81cc81&label=SpikeInterface%20-%20Sorting%20Summary
@pytest.mark.skipif(ON_GITHUB and not KACHERY_CLOUD_SET, reason="Kachery cloud secrets not available")
def test_gh_curation():
local_path = si.download_dataset(remote_path="mearec/mearec_test_10s.h5")
_, sorting = read_mearec(local_path)
# from GH
# curated link:
# https://figurl.org/f?v=gs://figurl/spikesortingview-10&d=sha1://bd53f6b707f8121cadc901562a89b67aec81cc81&label=SpikeInterface%20-%20Sorting%20Summary&s={%22sortingCuration%22:%22gh://alejoe91/spikeinterface/fix-codecov/spikeinterface/curation/tests/sv-sorting-curation.json%22}
gh_uri = "gh://SpikeInterface/spikeinterface/main/src/spikeinterface/curation/tests/sv-sorting-curation.json"
sorting_curated_gh = apply_sortingview_curation(sorting, uri_or_json=gh_uri, verbose=True)
print(f"From GH: {sorting_curated_gh}")
assert len(sorting_curated_gh.unit_ids) == 9
assert "#8-#9" in sorting_curated_gh.unit_ids
assert "accept" in sorting_curated_gh.get_property_keys()
assert "mua" in sorting_curated_gh.get_property_keys()
assert "artifact" in sorting_curated_gh.get_property_keys()
sorting_curated_gh_accepted = apply_sortingview_curation(sorting, uri_or_json=gh_uri, include_labels=["accept"])
sorting_curated_gh_mua = apply_sortingview_curation(sorting, uri_or_json=gh_uri, exclude_labels=["mua"])
sorting_curated_gh_art_mua = apply_sortingview_curation(
sorting, uri_or_json=gh_uri, exclude_labels=["artifact", "mua"]
)
assert len(sorting_curated_gh_accepted.unit_ids) == 3
assert len(sorting_curated_gh_mua.unit_ids) == 6
assert len(sorting_curated_gh_art_mua.unit_ids) == 5
@pytest.mark.skipif(ON_GITHUB and not KACHERY_CLOUD_SET, reason="Kachery cloud secrets not available")
def test_sha1_curation():
local_path = si.download_dataset(remote_path="mearec/mearec_test_10s.h5")
_, sorting = read_mearec(local_path)
# from SHA1
# curated link:
# https://figurl.org/f?v=gs://figurl/spikesortingview-10&d=sha1://bd53f6b707f8121cadc901562a89b67aec81cc81&label=SpikeInterface%20-%20Sorting%20Summary&s={%22sortingCuration%22:%22sha1://1182ba19671fcc7d3f8e0501b0f8c07fb9736c22%22}
sha1_uri = "sha1://1182ba19671fcc7d3f8e0501b0f8c07fb9736c22"
sorting_curated_sha1 = apply_sortingview_curation(sorting, uri_or_json=sha1_uri, verbose=True)
print(f"From SHA: {sorting_curated_sha1}")
assert len(sorting_curated_sha1.unit_ids) == 9
assert "#8-#9" in sorting_curated_sha1.unit_ids
assert "accept" in sorting_curated_sha1.get_property_keys()
assert "mua" in sorting_curated_sha1.get_property_keys()
assert "artifact" in sorting_curated_sha1.get_property_keys()
sorting_curated_sha1_accepted = apply_sortingview_curation(sorting, uri_or_json=sha1_uri, include_labels=["accept"])
sorting_curated_sha1_mua = apply_sortingview_curation(sorting, uri_or_json=sha1_uri, exclude_labels=["mua"])
sorting_curated_sha1_art_mua = apply_sortingview_curation(
sorting, uri_or_json=sha1_uri, exclude_labels=["artifact", "mua"]
)
assert len(sorting_curated_sha1_accepted.unit_ids) == 3
assert len(sorting_curated_sha1_mua.unit_ids) == 6
assert len(sorting_curated_sha1_art_mua.unit_ids) == 5
def test_json_curation():
local_path = si.download_dataset(remote_path="mearec/mearec_test_10s.h5")
_, sorting = read_mearec(local_path)
# from curation.json
json_file = parent_folder / "sv-sorting-curation.json"
sorting_curated_json = apply_sortingview_curation(sorting, uri_or_json=json_file, verbose=True)
print(f"From JSON: {sorting_curated_json}")
assert len(sorting_curated_json.unit_ids) == 9
assert "#8-#9" in sorting_curated_json.unit_ids
assert "accept" in sorting_curated_json.get_property_keys()
assert "mua" in sorting_curated_json.get_property_keys()
assert "artifact" in sorting_curated_json.get_property_keys()
sorting_curated_json_accepted = apply_sortingview_curation(
sorting, uri_or_json=json_file, include_labels=["accept"]
)
sorting_curated_json_mua = apply_sortingview_curation(sorting, uri_or_json=json_file, exclude_labels=["mua"])
sorting_curated_json_mua1 = apply_sortingview_curation(
sorting, uri_or_json=json_file, exclude_labels=["artifact", "mua"]
)
assert len(sorting_curated_json_accepted.unit_ids) == 3
assert len(sorting_curated_json_mua.unit_ids) == 6
assert len(sorting_curated_json_mua1.unit_ids) == 5
if __name__ == "__main__":
# generate_sortingview_curation_dataset()
test_sha1_curation()
test_gh_curation()
test_json_curation()
|
5b35ca5113e03ebbab924ff18a3d9280c39ac6d5
|
0dbdf273cc1620bd8be12df06f793b86b5c2f66f
|
/src/wt_tools/dxp_unpack.py
|
1aefc6549d922acab2393e932fdb4e0d11fa9299
|
[] |
no_license
|
klensy/wt-tools
|
86fd7d14a506ae03cbd74426077690de6cdf9bbe
|
a56144b4b9ddc2aeac481f822bcd6e291502d0c3
|
refs/heads/dev
| 2023-05-24T23:27:56.293527
| 2021-06-28T16:30:36
| 2021-06-28T16:30:36
| 28,302,097
| 149
| 59
| null | 2023-05-23T01:22:54
| 2014-12-21T14:28:34
|
Python
|
UTF-8
|
Python
| false
| false
| 2,531
|
py
|
dxp_unpack.py
|
import struct, sys, os, errno
dxp2_magic = 'DxP2'
file_names_block_offset = 0x48
second_block_offset_from = 0x10 # + 0x10
dds_block_offset_from = 0x20 # + 0x10
block_3_offset_from = 0x30 # + 0x10
block_4_offset_from = 0xc # + 0x10
def mkdir_p(path):
n_path = ''.join(os.path.split(path)[:-1])
try:
if n_path != '':
os.makedirs(n_path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(n_path):
pass
else:
raise
def main():
if len(sys.argv) != 2:
print('usage: dxp_unpack.py file')
sys.exit(1)
filename = sys.argv[1]
dist_dir = filename + '_u/'
with open(filename, 'rb') as f:
data = f.read()
if len(data) == 0:
print("empty file")
exit(1)
if struct.unpack_from('4s', data, 0)[0].decode('utf-8') != dxp2_magic:
print("wrong dxp type")
exit(1)
total_files = struct.unpack_from('H', data, 0x8)[0]
print("total files:", total_files)
cur_p = file_names_block_offset
# TODO: fix path, like in vromfs_unpacker with abs path
file_names = []
for i in range(total_files):
old_cur_p = cur_p
while data[cur_p] != 0x0:
cur_p += 1
file_names.append(data[old_cur_p: cur_p].decode('utf-8'))
cur_p += 1
for i in file_names:
print(i)
cur_p = struct.unpack_from('I', data, second_block_offset_from)[0] + 0x10
offsets_block_1 = []
for i in range(total_files):
offsets_block_1.append(struct.unpack_from('I', data, cur_p)[0])
cur_p += 0x8
'''
from end:
0x4: unpacked size
0x4: packed size
'''
cur_p = struct.unpack_from('I', data, dds_block_offset_from)[0] + 0x10
dds_block = []
for i in range(total_files):
dds_block.append(data[cur_p: cur_p + 0x20])
cur_p += 0x20
'''
0xc - offset
0x10 - size
'''
cur_p = struct.unpack_from('I', data, block_3_offset_from)[0] + 0x10
block_3 = []
for i in range(total_files):
offset = struct.unpack_from('I', data, cur_p + 0xc)[0]
size = struct.unpack_from('I', data, cur_p + 0x10)[0]
block_3.append((offset, size))
cur_p += 0x18
mkdir_p(dist_dir)
for i, (off, size) in enumerate(block_3):
with open(dist_dir + file_names[i].split('*')[0] + '.ddsx', 'wb') as f:
f.write(dds_block[i])
f.write(data[off: off + size])
if __name__ == '__main__':
main()
|
80ac158815db2295ef5584a5107687fd35e26cea
|
13800b7827598e76428a335559b7bf11867ec2f0
|
/python/ccxt/async_support/coinbasepro.py
|
69a04afd8aa6c869734b7028ce9a5334e2748757
|
[
"MIT"
] |
permissive
|
ccxt/ccxt
|
b40a0466f5c430a3c0c6026552ae697aa80ba6c6
|
e4065f6a490e6fc4dd7a72b375428b2faa570668
|
refs/heads/master
| 2023-09-04T03:41:29.787733
| 2023-09-03T19:25:57
| 2023-09-03T19:25:57
| 91,253,698
| 30,798
| 8,190
|
MIT
| 2023-09-14T21:59:09
| 2017-05-14T15:41:56
|
Python
|
UTF-8
|
Python
| false
| false
| 74,643
|
py
|
coinbasepro.py
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.abstract.coinbasepro import ImplicitAPI
import hashlib
from ccxt.base.types import OrderSide
from ccxt.base.types import OrderType
from typing import Optional
from typing import List
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import NotSupported
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import OnMaintenance
from ccxt.base.errors import AuthenticationError
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class coinbasepro(Exchange, ImplicitAPI):
def describe(self):
return self.deep_extend(super(coinbasepro, self).describe(), {
'id': 'coinbasepro',
'name': 'Coinbase Pro',
'countries': ['US'],
'rateLimit': 100,
'userAgent': self.userAgents['chrome'],
'pro': True,
'has': {
'CORS': True,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createDepositAddress': True,
'createOrder': True,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'fetchAccounts': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDepositAddress': False, # the exchange does not have self method, only createDepositAddress, see https://github.com/ccxt/ccxt/pull/7405
'fetchDeposits': True,
'fetchDepositsWithdrawals': True,
'fetchLedger': True,
'fetchMarginMode': False,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchOrderTrades': True,
'fetchPositionMode': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransactions': 'emulated',
'fetchWithdrawals': True,
'withdraw': True,
},
'timeframes': {
'1m': 60,
'5m': 300,
'15m': 900,
'1h': 3600,
'6h': 21600,
'1d': 86400,
},
'hostname': 'pro.coinbase.com',
'urls': {
'test': {
'public': 'https://api-public.sandbox.pro.coinbase.com',
'private': 'https://api-public.sandbox.pro.coinbase.com',
},
'logo': 'https://user-images.githubusercontent.com/1294454/41764625-63b7ffde-760a-11e8-996d-a6328fa9347a.jpg',
'api': {
'public': 'https://api.{hostname}',
'private': 'https://api.{hostname}',
},
'www': 'https://pro.coinbase.com/',
'doc': 'https://docs.pro.coinbase.com',
'fees': [
'https://docs.pro.coinbase.com/#fees',
'https://support.pro.coinbase.com/customer/en/portal/articles/2945310-fees',
],
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'password': True,
},
'api': {
'public': {
'get': [
'currencies',
'products',
'products/{id}',
'products/{id}/book',
'products/{id}/candles',
'products/{id}/stats',
'products/{id}/ticker',
'products/{id}/trades',
'time',
'products/spark-lines', # experimental
],
},
'private': {
'get': [
'accounts',
'accounts/{id}',
'accounts/{id}/holds',
'accounts/{id}/ledger',
'accounts/{id}/transfers',
'coinbase-accounts',
'fills',
'funding',
'fees',
'margin/profile_information',
'margin/buying_power',
'margin/withdrawal_power',
'margin/withdrawal_power_all',
'margin/exit_plan',
'margin/liquidation_history',
'margin/position_refresh_amounts',
'margin/status',
'oracle',
'orders',
'orders/{id}',
'orders/client:{client_oid}',
'otc/orders',
'payment-methods',
'position',
'profiles',
'profiles/{id}',
'reports/{report_id}',
'transfers',
'transfers/{transfer_id}',
'users/self/exchange-limits',
'users/self/hold-balances',
'users/self/trailing-volume',
'withdrawals/fee-estimate',
'conversions/{conversion_id}',
],
'post': [
'conversions',
'deposits/coinbase-account',
'deposits/payment-method',
'coinbase-accounts/{id}/addresses',
'funding/repay',
'orders',
'position/close',
'profiles/margin-transfer',
'profiles/transfer',
'reports',
'withdrawals/coinbase',
'withdrawals/coinbase-account',
'withdrawals/crypto',
'withdrawals/payment-method',
],
'delete': [
'orders',
'orders/client:{client_oid}',
'orders/{id}',
],
},
},
'commonCurrencies': {
'CGLD': 'CELO',
},
'precisionMode': TICK_SIZE,
'fees': {
'trading': {
'tierBased': True, # complicated tier system per coin
'percentage': True,
'maker': self.parse_number('0.004'), # highest fee of all tiers
'taker': self.parse_number('0.006'), # highest fee of all tiers
},
'funding': {
'tierBased': False,
'percentage': False,
'withdraw': {
'BCH': 0,
'BTC': 0,
'LTC': 0,
'ETH': 0,
'EUR': 0.15,
'USD': 25,
},
'deposit': {
'BCH': 0,
'BTC': 0,
'LTC': 0,
'ETH': 0,
'EUR': 0.15,
'USD': 10,
},
},
},
'exceptions': {
'exact': {
'Insufficient funds': InsufficientFunds,
'NotFound': OrderNotFound,
'Invalid API Key': AuthenticationError,
'invalid signature': AuthenticationError,
'Invalid Passphrase': AuthenticationError,
'Invalid order id': InvalidOrder,
'Private rate limit exceeded': RateLimitExceeded,
'Trading pair not available': PermissionDenied,
'Product not found': InvalidOrder,
},
'broad': {
'Order already done': OrderNotFound,
'order not found': OrderNotFound,
'price too small': InvalidOrder,
'price too precise': InvalidOrder,
'under maintenance': OnMaintenance,
'size is too small': InvalidOrder,
'Cancel only mode': OnMaintenance, # https://github.com/ccxt/ccxt/issues/7690
},
},
})
async def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict [params]: extra parameters specific to the coinbasepro api endpoint
:returns dict: an associative dictionary of currencies
"""
response = await self.publicGetCurrencies(params)
#
# [
# {
# id: 'XTZ',
# name: 'Tezos',
# min_size: '0.000001',
# status: 'online',
# message: '',
# max_precision: '0.000001',
# convertible_to: [],
# details: {
# type: 'crypto',
# symbol: 'Τ',
# network_confirmations: 60,
# sort_order: 53,
# crypto_address_link: 'https://tzstats.com/{{address}}',
# crypto_transaction_link: 'https://tzstats.com/{{txId}}',
# push_payment_methods: ['crypto'],
# group_types: [],
# display_name: '',
# processing_time_seconds: 0,
# min_withdrawal_amount: 1
# }
# }
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'id')
name = self.safe_string(currency, 'name')
code = self.safe_currency_code(id)
details = self.safe_value(currency, 'details', {})
status = self.safe_string(currency, 'status')
active = (status == 'online')
result[code] = {
'id': id,
'code': code,
'info': currency,
'type': self.safe_string(details, 'type'),
'name': name,
'active': active,
'deposit': None,
'withdraw': None,
'fee': None,
'precision': self.safe_number(currency, 'max_precision'),
'limits': {
'amount': {
'min': self.safe_number(details, 'min_size'),
'max': None,
},
'withdraw': {
'min': self.safe_number(details, 'min_withdrawal_amount'),
'max': None,
},
},
'networks': {},
}
return result
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for coinbasepro
:param dict [params]: extra parameters specific to the exchange api endpoint
:returns dict[]: an array of objects representing market data
"""
response = await self.publicGetProducts(params)
#
# [
# {
# id: 'BTCAUCTION-USD',
# base_currency: 'BTC',
# quote_currency: 'USD',
# base_min_size: '0.000016',
# base_max_size: '1500',
# quote_increment: '0.01',
# base_increment: '0.00000001',
# display_name: 'BTCAUCTION/USD',
# min_market_funds: '1',
# max_market_funds: '20000000',
# margin_enabled: False,
# fx_stablecoin: False,
# max_slippage_percentage: '0.02000000',
# post_only: False,
# limit_only: False,
# cancel_only: True,
# trading_disabled: False,
# status: 'online',
# status_message: '',
# auction_mode: False
# },
# {
# id: 'BTC-USD',
# base_currency: 'BTC',
# quote_currency: 'USD',
# base_min_size: '0.000016',
# base_max_size: '1500',
# quote_increment: '0.01',
# base_increment: '0.00000001',
# display_name: 'BTC/USD',
# min_market_funds: '1',
# max_market_funds: '20000000',
# margin_enabled: False,
# fx_stablecoin: False,
# max_slippage_percentage: '0.02000000',
# post_only: False,
# limit_only: False,
# cancel_only: False,
# trading_disabled: False,
# status: 'online',
# status_message: '',
# auction_mode: False
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'id')
baseId, quoteId = id.split('-')
# BTCAUCTION-USD vs BTC-USD conflict workaround, see the output sample above
# baseId = self.safe_string(market, 'base_currency')
# quoteId = self.safe_string(market, 'quote_currency')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
status = self.safe_string(market, 'status')
result.append(self.extend(self.fees['trading'], {
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': self.safe_value(market, 'margin_enabled'),
'swap': False,
'future': False,
'option': False,
'active': (status == 'online'),
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': self.safe_number(market, 'base_increment'),
'price': self.safe_number(market, 'quote_increment'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': None,
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'min_market_funds'),
'max': None,
},
},
'info': market,
}))
return result
async def fetch_accounts(self, params={}):
"""
fetch all the accounts associated with a profile
:param dict [params]: extra parameters specific to the coinbasepro api endpoint
:returns dict: a dictionary of `account structures <https://github.com/ccxt/ccxt/wiki/Manual#account-structure>` indexed by the account type
"""
await self.load_markets()
response = await self.privateGetAccounts(params)
#
# [
# {
# id: '4aac9c60-cbda-4396-9da4-4aa71e95fba0',
# currency: 'BTC',
# balance: '0.0000000000000000',
# available: '0',
# hold: '0.0000000000000000',
# profile_id: 'b709263e-f42a-4c7d-949a-a95c83d065da'
# },
# {
# id: 'f75fa69a-1ad1-4a80-bd61-ee7faa6135a3',
# currency: 'USDC',
# balance: '0.0000000000000000',
# available: '0',
# hold: '0.0000000000000000',
# profile_id: 'b709263e-f42a-4c7d-949a-a95c83d065da'
# },
# ]
#
return self.parse_accounts(response, params)
def parse_account(self, account):
#
# {
# id: '4aac9c60-cbda-4396-9da4-4aa71e95fba0',
# currency: 'BTC',
# balance: '0.0000000000000000',
# available: '0',
# hold: '0.0000000000000000',
# profile_id: 'b709263e-f42a-4c7d-949a-a95c83d065da'
# }
#
currencyId = self.safe_string(account, 'currency')
return {
'id': self.safe_string(account, 'id'),
'type': None,
'code': self.safe_currency_code(currencyId),
'info': account,
}
def parse_balance(self, response):
result = {'info': response}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'hold')
account['total'] = self.safe_string(balance, 'balance')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict [params]: extra parameters specific to the coinbasepro api endpoint
:returns dict: a `balance structure <https://github.com/ccxt/ccxt/wiki/Manual#balance-structure>`
"""
await self.load_markets()
response = await self.privateGetAccounts(params)
return self.parse_balance(response)
async def fetch_order_book(self, symbol: str, limit: Optional[int] = None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int [limit]: the maximum amount of order book entries to return
:param dict [params]: extra parameters specific to the coinbasepro api endpoint
:returns dict: A dictionary of `order book structures <https://github.com/ccxt/ccxt/wiki/Manual#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
# level 1 - only the best bid and ask
# level 2 - top 50 bids and asks(aggregated)
# level 3 - full order book(non aggregated)
request = {
'id': self.market_id(symbol),
'level': 2, # 1 best bidask, 2 aggregated, 3 full
}
response = await self.publicGetProductsIdBook(self.extend(request, params))
#
# {
# "sequence":1924393896,
# "bids":[
# ["0.01825","24.34811287",2],
# ["0.01824","72.5463",3],
# ["0.01823","424.54298049",6],
# ],
# "asks":[
# ["0.01826","171.10414904",4],
# ["0.01827","22.60427028",1],
# ["0.01828","397.46018784",7],
# ]
# }
#
orderbook = self.parse_order_book(response, symbol)
orderbook['nonce'] = self.safe_integer(response, 'sequence')
return orderbook
def parse_ticker(self, ticker, market=None):
#
# fetchTickers
#
# [
# 1639472400, # timestamp
# 4.26, # low
# 4.38, # high
# 4.35, # open
# 4.27 # close
# ]
#
# fetchTicker
#
# publicGetProductsIdTicker
#
# {
# "trade_id":843439,
# "price":"0.997999",
# "size":"80.29769",
# "time":"2020-01-28T02:13:33.012523Z",
# "bid":"0.997094",
# "ask":"0.998",
# "volume":"1903188.03750000"
# }
#
# publicGetProductsIdStats
#
# {
# "open": "34.19000000",
# "high": "95.70000000",
# "low": "7.06000000",
# "volume": "2.41000000"
# }
#
timestamp = None
bid = None
ask = None
last = None
high = None
low = None
open = None
volume = None
symbol = None if (market is None) else market['symbol']
if isinstance(ticker, list):
last = self.safe_string(ticker, 4)
timestamp = self.milliseconds()
else:
timestamp = self.parse8601(self.safe_value(ticker, 'time'))
bid = self.safe_string(ticker, 'bid')
ask = self.safe_string(ticker, 'ask')
high = self.safe_string(ticker, 'high')
low = self.safe_string(ticker, 'low')
open = self.safe_string(ticker, 'open')
last = self.safe_string_2(ticker, 'price', 'last')
volume = self.safe_string(ticker, 'volume')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': high,
'low': low,
'bid': bid,
'bidVolume': None,
'ask': ask,
'askVolume': None,
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': volume,
'quoteVolume': None,
'info': ticker,
}, market)
async def fetch_tickers(self, symbols: Optional[List[str]] = None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param str[]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict [params]: extra parameters specific to the coinbasepro api endpoint
:returns dict: a dictionary of `ticker structures <https://github.com/ccxt/ccxt/wiki/Manual#ticker-structure>`
"""
await self.load_markets()
symbols = self.market_symbols(symbols)
request = {}
response = await self.publicGetProductsSparkLines(self.extend(request, params))
#
# {
# YYY-USD: [
# [
# 1639472400, # timestamp
# 4.26, # low
# 4.38, # high
# 4.35, # open
# 4.27 # close
# ],
# [
# 1639468800,
# 4.31,
# 4.45,
# 4.35,
# 4.35
# ],
# ]
# }
#
result = {}
marketIds = list(response.keys())
delimiter = '-'
for i in range(0, len(marketIds)):
marketId = marketIds[i]
entry = self.safe_value(response, marketId, [])
first = self.safe_value(entry, 0, [])
market = self.safe_market(marketId, None, delimiter)
symbol = market['symbol']
result[symbol] = self.parse_ticker(first, market)
return self.filter_by_array(result, 'symbol', symbols)
async def fetch_ticker(self, symbol: str, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict [params]: extra parameters specific to the coinbasepro api endpoint
:returns dict: a `ticker structure <https://github.com/ccxt/ccxt/wiki/Manual#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'id': market['id'],
}
# publicGetProductsIdTicker or publicGetProductsIdStats
method = self.safe_string(self.options, 'fetchTickerMethod', 'publicGetProductsIdTicker')
response = await getattr(self, method)(self.extend(request, params))
#
# publicGetProductsIdTicker
#
# {
# "trade_id":843439,
# "price":"0.997999",
# "size":"80.29769",
# "time":"2020-01-28T02:13:33.012523Z",
# "bid":"0.997094",
# "ask":"0.998",
# "volume":"1903188.03750000"
# }
#
# publicGetProductsIdStats
#
# {
# "open": "34.19000000",
# "high": "95.70000000",
# "low": "7.06000000",
# "volume": "2.41000000"
# }
#
return self.parse_ticker(response, market)
def parse_trade(self, trade, market=None):
#
# {
# type: 'match',
# trade_id: 82047307,
# maker_order_id: '0f358725-2134-435e-be11-753912a326e0',
# taker_order_id: '252b7002-87a3-425c-ac73-f5b9e23f3caf',
# order_id: 'd50ec984-77a8-460a-b958-66f114b0de9b',
# side: 'sell',
# size: '0.00513192',
# price: '9314.78',
# product_id: 'BTC-USD',
# profile_id: '6244401d-c078-40d9-b305-7ad3551bc3b0',
# sequence: 12038915443,
# time: '2020-01-31T20:03:41.158814Z'
# created_at: '2014-11-07T22:19:28.578544Z',
# liquidity: 'T',
# fee: '0.00025',
# settled: True,
# usd_volume: '0.0924556000000000',
# user_id: '595eb864313c2b02ddf2937d'
# }
#
timestamp = self.parse8601(self.safe_string_2(trade, 'time', 'created_at'))
marketId = self.safe_string(trade, 'product_id')
market = self.safe_market(marketId, market, '-')
feeRate = None
takerOrMaker = None
cost = None
feeCurrencyId = self.safe_string_lower(market, 'quoteId')
if feeCurrencyId is not None:
costField = feeCurrencyId + '_value'
cost = self.safe_string(trade, costField)
liquidity = self.safe_string(trade, 'liquidity')
if liquidity is not None:
takerOrMaker = 'taker' if (liquidity == 'T') else 'maker'
feeRate = self.safe_string(market, takerOrMaker)
feeCost = self.safe_string_2(trade, 'fill_fees', 'fee')
fee = {
'cost': feeCost,
'currency': market['quote'],
'rate': feeRate,
}
id = self.safe_string(trade, 'trade_id')
side = 'sell' if (trade['side'] == 'buy') else 'buy'
orderId = self.safe_string(trade, 'order_id')
# Coinbase Pro returns inverted side to fetchMyTrades vs fetchTrades
makerOrderId = self.safe_string(trade, 'maker_order_id')
takerOrderId = self.safe_string(trade, 'taker_order_id')
if (orderId is not None) or ((makerOrderId is not None) and (takerOrderId is not None)):
side = 'buy' if (trade['side'] == 'buy') else 'sell'
price = self.safe_string(trade, 'price')
amount = self.safe_string(trade, 'size')
return self.safe_trade({
'id': id,
'order': orderId,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'type': None,
'takerOrMaker': takerOrMaker,
'side': side,
'price': price,
'amount': amount,
'fee': fee,
'cost': cost,
}, market)
async def fetch_my_trades(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetch all trades made by the user
:param str symbol: unified market symbol
:param int [since]: the earliest time in ms to fetch trades for
:param int [limit]: the maximum number of trades structures to retrieve
:param dict [params]: extra parameters specific to the coinbasepro api endpoint
:returns Trade[]: a list of `trade structures <https://github.com/ccxt/ccxt/wiki/Manual#trade-structure>`
"""
# 2018-08-23
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'product_id': market['id'],
}
if limit is not None:
request['limit'] = limit
response = await self.privateGetFills(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
async def fetch_trades(self, symbol: str, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int [since]: timestamp in ms of the earliest trade to fetch
:param int [limit]: the maximum amount of trades to fetch
:param dict [params]: extra parameters specific to the coinbasepro api endpoint
:returns Trade[]: a list of `trade structures <https://github.com/ccxt/ccxt/wiki/Manual#public-trades>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'id': market['id'], # fixes issue #2
}
if limit is not None:
request['limit'] = limit # default 100
response = await self.publicGetProductsIdTrades(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
async def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict [params]: extra parameters specific to the coinbasepro api endpoint
:returns dict: a dictionary of `fee structures <https://github.com/ccxt/ccxt/wiki/Manual#fee-structure>` indexed by market symbols
"""
await self.load_markets()
response = await self.privateGetFees(params)
#
# {
# "maker_fee_rate": "0.0050",
# "taker_fee_rate": "0.0050",
# "usd_volume": "43806.92"
# }
#
maker = self.safe_number(response, 'maker_fee_rate')
taker = self.safe_number(response, 'taker_fee_rate')
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'info': response,
'symbol': symbol,
'maker': maker,
'taker': taker,
'percentage': True,
'tierBased': True,
}
return result
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1591514160,
# 0.02507,
# 0.02507,
# 0.02507,
# 0.02507,
# 0.02816506
# ]
#
return [
self.safe_timestamp(ohlcv, 0),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol: str, timeframe='1m', since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int [since]: timestamp in ms of the earliest candle to fetch
:param int [limit]: the maximum amount of candles to fetch
:param dict [params]: extra parameters specific to the coinbasepro api endpoint
:returns int[][]: A list of candles ordered, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
parsedTimeframe = self.safe_integer(self.timeframes, timeframe)
request = {
'id': market['id'],
}
if parsedTimeframe is not None:
request['granularity'] = parsedTimeframe
else:
request['granularity'] = timeframe
if since is not None:
request['start'] = self.iso8601(since)
if limit is None:
# https://docs.pro.coinbase.com/#get-historic-rates
limit = 300 # max = 300
else:
limit = min(300, limit)
parsedTimeframeMilliseconds = parsedTimeframe * 1000
if since % parsedTimeframeMilliseconds == 0:
request['end'] = self.iso8601(self.sum((limit - 1) * parsedTimeframeMilliseconds, since))
else:
request['end'] = self.iso8601(self.sum(limit * parsedTimeframeMilliseconds, since))
response = await self.publicGetProductsIdCandles(self.extend(request, params))
#
# [
# [1591514160,0.02507,0.02507,0.02507,0.02507,0.02816506],
# [1591514100,0.02507,0.02507,0.02507,0.02507,1.63830323],
# [1591514040,0.02505,0.02507,0.02505,0.02507,0.19918178]
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
async def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict [params]: extra parameters specific to the coinbasepro api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
response = await self.publicGetTime(params)
#
# {
# "iso":"2020-05-12T08:00:51.504Z",
# "epoch":1589270451.504
# }
#
return self.safe_timestamp(response, 'epoch')
def parse_order_status(self, status):
statuses = {
'pending': 'open',
'active': 'open',
'open': 'open',
'done': 'closed',
'canceled': 'canceled',
'canceling': 'open',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# createOrder
#
# {
# "id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
# "price": "0.10000000",
# "size": "0.01000000",
# "product_id": "BTC-USD",
# "side": "buy",
# "stp": "dc",
# "type": "limit",
# "time_in_force": "GTC",
# "post_only": False,
# "created_at": "2016-12-08T20:02:28.53864Z",
# "fill_fees": "0.0000000000000000",
# "filled_size": "0.00000000",
# "executed_value": "0.0000000000000000",
# "status": "pending",
# "settled": False
# }
#
timestamp = self.parse8601(self.safe_string(order, 'created_at'))
marketId = self.safe_string(order, 'product_id')
market = self.safe_market(marketId, market, '-')
status = self.parse_order_status(self.safe_string(order, 'status'))
doneReason = self.safe_string(order, 'done_reason')
if (status == 'closed') and (doneReason == 'canceled'):
status = 'canceled'
price = self.safe_string(order, 'price')
filled = self.safe_string(order, 'filled_size')
amount = self.safe_string(order, 'size', filled)
cost = self.safe_string(order, 'executed_value')
feeCost = self.safe_number(order, 'fill_fees')
fee = None
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': market['quote'],
'rate': None,
}
id = self.safe_string(order, 'id')
type = self.safe_string(order, 'type')
side = self.safe_string(order, 'side')
timeInForce = self.safe_string(order, 'time_in_force')
postOnly = self.safe_value(order, 'post_only')
stopPrice = self.safe_number(order, 'stop_price')
clientOrderId = self.safe_string(order, 'client_oid')
return self.safe_order({
'id': id,
'clientOrderId': clientOrderId,
'info': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': market['symbol'],
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'triggerPrice': stopPrice,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': None,
'fee': fee,
'average': None,
'trades': None,
}, market)
async def fetch_order(self, id: str, symbol: Optional[str] = None, params={}):
"""
fetches information on an order made by the user
:param str symbol: not used by coinbasepro fetchOrder
:param dict [params]: extra parameters specific to the coinbasepro api endpoint
:returns dict: An `order structure <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
await self.load_markets()
request = {}
clientOrderId = self.safe_string_2(params, 'clientOrderId', 'client_oid')
method = None
if clientOrderId is None:
method = 'privateGetOrdersId'
request['id'] = id
else:
method = 'privateGetOrdersClientClientOid'
request['client_oid'] = clientOrderId
params = self.omit(params, ['clientOrderId', 'client_oid'])
response = await getattr(self, method)(self.extend(request, params))
return self.parse_order(response)
async def fetch_order_trades(self, id: str, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetch all the trades made from a single order
:param str id: order id
:param str symbol: unified market symbol
:param int [since]: the earliest time in ms to fetch trades for
:param int [limit]: the maximum number of trades to retrieve
:param dict [params]: extra parameters specific to the coinbasepro api endpoint
:returns dict[]: a list of `trade structures <https://github.com/ccxt/ccxt/wiki/Manual#trade-structure>`
"""
await self.load_markets()
market = None
if symbol is not None:
market = self.market(symbol)
request = {
'order_id': id,
}
response = await self.privateGetFills(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
async def fetch_orders(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetches information on multiple orders made by the user
:param str symbol: unified market symbol of the market orders were made in
:param int [since]: the earliest time in ms to fetch orders for
:param int [limit]: the maximum number of orde structures to retrieve
:param dict [params]: extra parameters specific to the coinbasepro api endpoint
:returns Order[]: a list of `order structures <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
request = {
'status': 'all',
}
return await self.fetch_open_orders(symbol, since, limit, self.extend(request, params))
async def fetch_open_orders(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetch all unfilled currently open orders
:param str symbol: unified market symbol
:param int [since]: the earliest time in ms to fetch open orders for
:param int [limit]: the maximum number of open orders structures to retrieve
:param dict [params]: extra parameters specific to the coinbasepro api endpoint
:returns Order[]: a list of `order structures <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['product_id'] = market['id']
if limit is not None:
request['limit'] = limit # default 100
response = await self.privateGetOrders(self.extend(request, params))
return self.parse_orders(response, market, since, limit)
async def fetch_closed_orders(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetches information on multiple closed orders made by the user
:param str symbol: unified market symbol of the market orders were made in
:param int [since]: the earliest time in ms to fetch orders for
:param int [limit]: the maximum number of orde structures to retrieve
:param dict [params]: extra parameters specific to the coinbasepro api endpoint
:returns Order[]: a list of `order structures <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
request = {
'status': 'done',
}
return await self.fetch_open_orders(symbol, since, limit, self.extend(request, params))
async def create_order(self, symbol: str, type: OrderType, side: OrderSide, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float [price]: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict [params]: extra parameters specific to the coinbasepro api endpoint
:returns dict: an `order structure <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
# common params --------------------------------------------------
# 'client_oid': clientOrderId,
'type': type,
'side': side,
'product_id': market['id'],
# 'size': self.amount_to_precision(symbol, amount),
# 'stp': 'dc', # self-trade prevention, dc = decrease and cancel, co = cancel oldest, cn = cancel newest, cb = cancel both
# 'stop': 'loss', # "loss" = stop loss below price, "entry" = take profit above price
# 'stop_price': self.price_to_precision(symbol, price),
# limit order params ---------------------------------------------
# 'price': self.price_to_precision(symbol, price),
# 'size': self.amount_to_precision(symbol, amount),
# 'time_in_force': 'GTC', # GTC, GTT, IOC, or FOK
# 'cancel_after' [optional]* min, hour, day, requires time_in_force to be GTT
# 'post_only': False, # invalid when time_in_force is IOC or FOK
# market order params --------------------------------------------
# 'size': self.amount_to_precision(symbol, amount),
# 'funds': self.cost_to_precision(symbol, amount),
}
clientOrderId = self.safe_string_2(params, 'clientOrderId', 'client_oid')
if clientOrderId is not None:
request['client_oid'] = clientOrderId
stopPrice = self.safe_number_2(params, 'stopPrice', 'stop_price')
if stopPrice is not None:
request['stop_price'] = self.price_to_precision(symbol, stopPrice)
timeInForce = self.safe_string_2(params, 'timeInForce', 'time_in_force')
if timeInForce is not None:
request['time_in_force'] = timeInForce
postOnly = self.safe_value_2(params, 'postOnly', 'post_only', False)
if postOnly:
request['post_only'] = True
params = self.omit(params, ['timeInForce', 'time_in_force', 'stopPrice', 'stop_price', 'clientOrderId', 'client_oid', 'postOnly', 'post_only'])
if type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['size'] = self.amount_to_precision(symbol, amount)
elif type == 'market':
cost = self.safe_number_2(params, 'cost', 'funds')
if cost is None:
if price is not None:
cost = amount * price
else:
params = self.omit(params, ['cost', 'funds'])
if cost is not None:
request['funds'] = self.cost_to_precision(symbol, cost)
else:
request['size'] = self.amount_to_precision(symbol, amount)
response = await self.privatePostOrders(self.extend(request, params))
#
# {
# "id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
# "price": "0.10000000",
# "size": "0.01000000",
# "product_id": "BTC-USD",
# "side": "buy",
# "stp": "dc",
# "type": "limit",
# "time_in_force": "GTC",
# "post_only": False,
# "created_at": "2016-12-08T20:02:28.53864Z",
# "fill_fees": "0.0000000000000000",
# "filled_size": "0.00000000",
# "executed_value": "0.0000000000000000",
# "status": "pending",
# "settled": False
# }
#
return self.parse_order(response, market)
async def cancel_order(self, id: str, symbol: Optional[str] = None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict [params]: extra parameters specific to the coinbasepro api endpoint
:returns dict: An `order structure <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
await self.load_markets()
request = {
# 'product_id': market['id'], # the request will be more performant if you include it
}
clientOrderId = self.safe_string_2(params, 'clientOrderId', 'client_oid')
method = None
if clientOrderId is None:
method = 'privateDeleteOrdersId'
request['id'] = id
else:
method = 'privateDeleteOrdersClientClientOid'
request['client_oid'] = clientOrderId
params = self.omit(params, ['clientOrderId', 'client_oid'])
market = None
if symbol is not None:
market = self.market(symbol)
request['product_id'] = market['symbol'] # the request will be more performant if you include it
return await getattr(self, method)(self.extend(request, params))
async def cancel_all_orders(self, symbol: Optional[str] = None, params={}):
"""
cancel all open orders
:param str symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict [params]: extra parameters specific to the coinbasepro api endpoint
:returns dict[]: a list of `order structures <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['product_id'] = market['symbol'] # the request will be more performant if you include it
return await self.privateDeleteOrders(self.extend(request, params))
async def fetch_payment_methods(self, params={}):
return await self.privateGetPaymentMethods(params)
async def deposit(self, code: str, amount, address, params={}):
"""
Creates a new deposit address, by coinbasepro
:param str code: Unified CCXT currency code(e.g. `"USDT"`)
:param float amount: The amount of currency to send in the deposit(e.g. `20`)
:param str address: Not used by coinbasepro
:param dict [params]: Parameters specific to the exchange API endpoint(e.g. `{"network": "TRX"}`)
:returns: a `transaction structure <https://github.com/ccxt/ccxt/wiki/Manual#transaction-structure>`
"""
await self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
'amount': amount,
}
method = 'privatePostDeposits'
if 'payment_method_id' in params:
# deposit from a payment_method, like a bank account
method += 'PaymentMethod'
elif 'coinbase_account_id' in params:
# deposit into Coinbase Pro account from a Coinbase account
method += 'CoinbaseAccount'
else:
# deposit methodotherwise we did not receive a supported deposit location
# relevant docs link for the Googlers
# https://docs.pro.coinbase.com/#deposits
raise NotSupported(self.id + ' deposit() requires one of `coinbase_account_id` or `payment_method_id` extra params')
response = await getattr(self, method)(self.extend(request, params))
if not response:
raise ExchangeError(self.id + ' deposit() error: ' + self.json(response))
return {
'info': response,
'id': response['id'],
}
async def withdraw(self, code: str, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str tag:
:param dict [params]: extra parameters specific to the coinbasepro api endpoint
:returns dict: a `transaction structure <https://github.com/ccxt/ccxt/wiki/Manual#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
'amount': amount,
}
method = 'privatePostWithdrawals'
if 'payment_method_id' in params:
method += 'PaymentMethod'
elif 'coinbase_account_id' in params:
method += 'CoinbaseAccount'
else:
method += 'Crypto'
request['crypto_address'] = address
if tag is not None:
request['destination_tag'] = tag
response = await getattr(self, method)(self.extend(request, params))
if not response:
raise ExchangeError(self.id + ' withdraw() error: ' + self.json(response))
return self.parse_transaction(response, currency)
def parse_ledger_entry_type(self, type):
types = {
'transfer': 'transfer', # Funds moved between portfolios
'match': 'trade', # Funds moved result of a trade
'fee': 'fee', # Fee result of a trade
'rebate': 'rebate', # Fee rebate
'conversion': 'trade', # Funds converted between fiat currency and a stablecoin
}
return self.safe_string(types, type, type)
def parse_ledger_entry(self, item, currency=None):
# {
# id: '12087495079',
# amount: '-0.0100000000000000',
# balance: '0.0645419900000000',
# created_at: '2021-10-28T17:14:32.593168Z',
# type: 'transfer',
# details: {
# from: '2f74edf7-1440-4586-86dc-ae58c5693691',
# profile_transfer_id: '3ef093ad-2482-40d1-8ede-2f89cff5099e',
# to: 'dda99503-4980-4b60-9549-0b770ee51336'
# }
# },
# {
# id: '11740725774',
# amount: '-1.7565669701255000',
# balance: '0.0016490047745000',
# created_at: '2021-10-22T03:47:34.764122Z',
# type: 'fee',
# details: {
# order_id: 'ad06abf4-95ab-432a-a1d8-059ef572e296',
# product_id: 'ETH-DAI',
# trade_id: '1740617'
# }
# }
id = self.safe_string(item, 'id')
amountString = self.safe_string(item, 'amount')
direction = None
afterString = self.safe_string(item, 'balance')
beforeString = Precise.string_sub(afterString, amountString)
if Precise.string_lt(amountString, '0'):
direction = 'out'
amountString = Precise.string_abs(amountString)
else:
direction = 'in'
amount = self.parse_number(amountString)
after = self.parse_number(afterString)
before = self.parse_number(beforeString)
timestamp = self.parse8601(self.safe_value(item, 'created_at'))
type = self.parse_ledger_entry_type(self.safe_string(item, 'type'))
code = self.safe_currency_code(None, currency)
details = self.safe_value(item, 'details', {})
account = None
referenceAccount = None
referenceId = None
if type == 'transfer':
account = self.safe_string(details, 'from')
referenceAccount = self.safe_string(details, 'to')
referenceId = self.safe_string(details, 'profile_transfer_id')
else:
referenceId = self.safe_string(details, 'order_id')
status = 'ok'
return {
'id': id,
'currency': code,
'account': account,
'referenceAccount': referenceAccount,
'referenceId': referenceId,
'status': status,
'amount': amount,
'before': before,
'after': after,
'fee': None,
'direction': direction,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'type': type,
'info': item,
}
async def fetch_ledger(self, code: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetch the history of changes, actions done by the user or operations that altered balance of the user
:param str code: unified currency code, default is None
:param int [since]: timestamp in ms of the earliest ledger entry, default is None
:param int [limit]: max number of ledger entrys to return, default is None
:param dict [params]: extra parameters specific to the coinbasepro api endpoint
:returns dict: a `ledger structure <https://github.com/ccxt/ccxt/wiki/Manual#ledger-structure>`
"""
# https://docs.cloud.coinbase.com/exchange/reference/exchangerestapi_getaccountledger
if code is None:
raise ArgumentsRequired(self.id + ' fetchLedger() requires a code param')
await self.load_markets()
await self.load_accounts()
currency = self.currency(code)
accountsByCurrencyCode = self.index_by(self.accounts, 'code')
account = self.safe_value(accountsByCurrencyCode, code)
if account is None:
raise ExchangeError(self.id + ' fetchLedger() could not find account id for ' + code)
request = {
'id': account['id'],
# 'start_date': self.iso8601(since),
# 'end_date': self.iso8601(self.milliseconds()),
# 'before': 'cursor', # sets start cursor to before date
# 'after': 'cursor', # sets end cursor to after date
# 'limit': limit, # default 100
# 'profile_id': 'string'
}
if since is not None:
request['start_date'] = self.iso8601(since)
if limit is not None:
request['limit'] = limit # default 100
response = await self.privateGetAccountsIdLedger(self.extend(request, params))
for i in range(0, len(response)):
response[i]['currency'] = code
return self.parse_ledger(response, currency, since, limit)
async def fetch_deposits_withdrawals(self, code: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetch history of deposits and withdrawals
see https://docs.cloud.coinbase.com/exchange/reference/exchangerestapi_gettransfers
see https://docs.cloud.coinbase.com/exchange/reference/exchangerestapi_getaccounttransfers
:param str [code]: unified currency code for the currency of the deposit/withdrawals, default is None
:param int [since]: timestamp in ms of the earliest deposit/withdrawal, default is None
:param int [limit]: max number of deposit/withdrawals to return, default is None
:param dict [params]: extra parameters specific to the coinbasepro api endpoint
:param str [params.id]: account id, when defined, the endpoint used is '/accounts/{account_id}/transfers/' instead of '/transfers/'
:returns dict: a list of `transaction structure <https://github.com/ccxt/ccxt/wiki/Manual#transaction-structure>`
"""
await self.load_markets()
await self.load_accounts()
currency = None
id = self.safe_string(params, 'id') # account id
if id is None:
if code is not None:
currency = self.currency(code)
accountsByCurrencyCode = self.index_by(self.accounts, 'code')
account = self.safe_value(accountsByCurrencyCode, code)
if account is None:
raise ExchangeError(self.id + ' fetchDepositsWithdrawals() could not find account id for ' + code)
id = account['id']
request = {}
if id is not None:
request['id'] = id
if limit is not None:
request['limit'] = limit
response = None
if id is None:
response = await self.privateGetTransfers(self.extend(request, params))
#
# [
# {
# "id": "bee6fd7c-afb2-4e47-8298-671d09997d16",
# "type": "deposit",
# "created_at": "2022-12-21 00:48:45.477503+00",
# "completed_at": null,
# "account_id": "sal3802-36bd-46be-a7b8-alsjf383sldak",
# "user_id": "6382048209f92as392039dlks2",
# "amount": "0.01000000",
# "details": {
# "network": "litecoin",
# "crypto_address": "MKemtnCFUYKsNWaf5EMYMpwSszcXWFDtTY",
# "coinbase_account_id": "fl2b6925-f6ba-403n-jj03-40fl435n430f",
# "coinbase_transaction_id": "63a25bb13cb5cf0001d2cf17", # withdrawals only
# "crypto_transaction_hash": "752f35570736341e2a253f7041a34cf1e196fc56128c900fd03d99da899d94c1",
# "tx_service_transaction_id": "1873249104",
# "coinbase_payment_method_id": ""
# },
# "canceled_at": null,
# "processed_at": null,
# "user_nonce": null,
# "idem": "5e3201b0-e390-5k3k-a913-c32932049242",
# "profile_id": "k3k302a8-c4dk-4f49-9d39-3203923wpk39",
# "currency": "LTC"
# }
# ]
#
for i in range(0, len(response)):
account_id = self.safe_string(response[i], 'account_id')
account = self.safe_value(self.accountsById, account_id)
codeInner = self.safe_string(account, 'code')
response[i]['currency'] = codeInner
else:
response = await self.privateGetAccountsIdTransfers(self.extend(request, params))
#
# [
# {
# "id": "bee6fd7c-afb2-4e47-8298-671d09997d16",
# "type": "deposit",
# "created_at": "2022-12-21 00:48:45.477503+00",
# "completed_at": null,
# "amount": "0.01000000",
# "details": {
# "network": "litecoin",
# "crypto_address": "MKemtnCFUYKsNWaf5EMYMpwSszcXWFDtTY",
# "coinbase_account_id": "fl2b6925-f6ba-403n-jj03-40fl435n430f",
# "coinbase_transaction_id": "63a25bb13cb5cf0001d2cf17", # withdrawals only
# "crypto_transaction_hash": "752f35570736341e2a253f7041a34cf1e196fc56128c900fd03d99da899d94c1",
# "tx_service_transaction_id": "1873249104",
# "coinbase_payment_method_id": ""
# },
# "canceled_at": null,
# "processed_at": null,
# "user_nonce": null,
# "idem": "5e3201b0-e390-5k3k-a913-c32932049242",
# "profile_id": "k3k302a8-c4dk-4f49-9d39-3203923wpk39",
# "currency": "LTC"
# }
# ]
#
for i in range(0, len(response)):
response[i]['currency'] = code
return self.parse_transactions(response, currency, since, limit)
async def fetch_deposits(self, code: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetch all deposits made to an account
:param str code: unified currency code
:param int [since]: the earliest time in ms to fetch deposits for
:param int [limit]: the maximum number of deposits structures to retrieve
:param dict [params]: extra parameters specific to the coinbasepro api endpoint
:returns dict[]: a list of `transaction structures <https://github.com/ccxt/ccxt/wiki/Manual#transaction-structure>`
"""
return await self.fetch_deposits_withdrawals(code, since, limit, self.extend({'type': 'deposit'}, params))
async def fetch_withdrawals(self, code: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetch all withdrawals made from an account
:param str code: unified currency code
:param int [since]: the earliest time in ms to fetch withdrawals for
:param int [limit]: the maximum number of withdrawals structures to retrieve
:param dict [params]: extra parameters specific to the coinbasepro api endpoint
:returns dict[]: a list of `transaction structures <https://github.com/ccxt/ccxt/wiki/Manual#transaction-structure>`
"""
return await self.fetch_deposits_withdrawals(code, since, limit, self.extend({'type': 'withdraw'}, params))
def parse_transaction_status(self, transaction):
canceled = self.safe_value(transaction, 'canceled_at')
if canceled:
return 'canceled'
processed = self.safe_value(transaction, 'processed_at')
completed = self.safe_value(transaction, 'completed_at')
if completed:
return 'ok'
elif processed and not completed:
return 'failed'
else:
return 'pending'
def parse_transaction(self, transaction, currency=None):
#
# privateGetTransfers
#
# [
# {
# "id": "bee6fd7c-afb2-4e47-8298-671d09997d16",
# "type": "deposit",
# "created_at": "2022-12-21 00:48:45.477503+00",
# "completed_at": null,
# "account_id": "sal3802-36bd-46be-a7b8-alsjf383sldak", # only from privateGetTransfers
# "user_id": "6382048209f92as392039dlks2", # only from privateGetTransfers
# "amount": "0.01000000",
# "details": {
# "network": "litecoin",
# "crypto_address": "MKemtnCFUYKsNWaf5EMYMpwSszcXWFDtTY",
# "coinbase_account_id": "fl2b6925-f6ba-403n-jj03-40fl435n430f",
# "coinbase_transaction_id": "63a25bb13cb5cf0001d2cf17", # withdrawals only
# "crypto_transaction_hash": "752f35570736341e2a253f7041a34cf1e196fc56128c900fd03d99da899d94c1",
# "tx_service_transaction_id": "1873249104",
# "coinbase_payment_method_id": ""
# },
# "canceled_at": null,
# "processed_at": null,
# "user_nonce": null,
# "idem": "5e3201b0-e390-5k3k-a913-c32932049242",
# "profile_id": "k3k302a8-c4dk-4f49-9d39-3203923wpk39",
# "currency": "LTC"
# }
# ]
#
details = self.safe_value(transaction, 'details', {})
timestamp = self.parse8601(self.safe_string(transaction, 'created_at'))
currencyId = self.safe_string(transaction, 'currency')
code = self.safe_currency_code(currencyId, currency)
amount = self.safe_number(transaction, 'amount')
type = self.safe_string(transaction, 'type')
address = self.safe_string(details, 'crypto_address')
address = self.safe_string(transaction, 'crypto_address', address)
fee = {
'currency': None,
'cost': None,
'rate': None,
}
if type == 'withdraw':
type = 'withdrawal'
address = self.safe_string(details, 'sent_to_address', address)
feeCost = self.safe_number(details, 'fee')
if feeCost is not None:
if amount is not None:
amount -= feeCost
fee['cost'] = feeCost
fee['currency'] = code
networkId = self.safe_string(details, 'network')
return {
'info': transaction,
'id': self.safe_string(transaction, 'id'),
'txid': self.safe_string(details, 'crypto_transaction_hash'),
'type': type,
'currency': code,
'network': self.network_id_to_code(networkId),
'amount': amount,
'status': self.parse_transaction_status(transaction),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'address': address,
'addressFrom': None,
'addressTo': self.safe_string(details, 'crypto_address'),
'tag': self.safe_string(details, 'destination_tag'),
'tagFrom': None,
'tagTo': None,
'updated': self.parse8601(self.safe_string(transaction, 'processed_at')),
'comment': None,
'fee': fee,
}
async def create_deposit_address(self, code: str, params={}):
"""
create a currency deposit address
:param str code: unified currency code of the currency for the deposit address
:param dict [params]: extra parameters specific to the coinbasepro api endpoint
:returns dict: an `address structure <https://github.com/ccxt/ccxt/wiki/Manual#address-structure>`
"""
await self.load_markets()
currency = self.currency(code)
accounts = self.safe_value(self.options, 'coinbaseAccounts')
if accounts is None:
accounts = await self.privateGetCoinbaseAccounts()
self.options['coinbaseAccounts'] = accounts # cache it
self.options['coinbaseAccountsByCurrencyId'] = self.index_by(accounts, 'currency')
currencyId = currency['id']
account = self.safe_value(self.options['coinbaseAccountsByCurrencyId'], currencyId)
if account is None:
# eslint-disable-next-line quotes
raise InvalidAddress(self.id + " createDepositAddress() could not find currency code " + code + " with id = " + currencyId + " in self.options['coinbaseAccountsByCurrencyId']")
request = {
'id': account['id'],
}
response = await self.privatePostCoinbaseAccountsIdAddresses(self.extend(request, params))
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'destination_tag')
return {
'currency': code,
'address': self.check_address(address),
'tag': tag,
'info': response,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
request = '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if method == 'GET':
if query:
request += '?' + self.urlencode(query)
url = self.implode_hostname(self.urls['api'][api]) + request
if api == 'private':
self.check_required_credentials()
nonce = str(self.nonce())
payload = ''
if method != 'GET':
if query:
body = self.json(query)
payload = body
what = nonce + method + request + payload
secret = None
try:
secret = self.base64_to_binary(self.secret)
except Exception as e:
raise AuthenticationError(self.id + ' sign() invalid base64 secret')
signature = self.hmac(self.encode(what), secret, hashlib.sha256, 'base64')
headers = {
'CB-ACCESS-KEY': self.apiKey,
'CB-ACCESS-SIGN': signature,
'CB-ACCESS-TIMESTAMP': nonce,
'CB-ACCESS-PASSPHRASE': self.password,
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if (code == 400) or (code == 404):
if body[0] == '{':
message = self.safe_string(response, 'message')
feedback = self.id + ' ' + message
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
raise ExchangeError(feedback) # unknown message
raise ExchangeError(self.id + ' ' + body)
return None
async def request(self, path, api='public', method='GET', params={}, headers=None, body=None, config={}):
response = await self.fetch2(path, api, method, params, headers, body, config)
if not isinstance(response, str):
if 'message' in response:
raise ExchangeError(self.id + ' ' + self.json(response))
return response
|
3643013f38b1c22d29572794f6b95f9fab8082b7
|
75c033a6dfc466b54579625018c6e2e59f1e5bd4
|
/setup.py
|
2d72ffa55b29f0387c01476d1b8b2013ffd69704
|
[
"MIT"
] |
permissive
|
crowdbotp/OpenTraj
|
753a0c6d30a5f3134f056c4d53650ce04fa6bcd8
|
e7b12a0897e57a94b02a735248145c85d84dc01f
|
refs/heads/master
| 2023-04-17T02:42:27.501052
| 2022-03-28T19:03:26
| 2022-03-28T19:03:26
| 221,695,317
| 349
| 87
|
MIT
| 2022-12-21T22:58:46
| 2019-11-14T12:42:58
|
Python
|
UTF-8
|
Python
| false
| false
| 718
|
py
|
setup.py
|
from setuptools import setup
setup(
name='opentraj',
version='1.0',
author='Javad Amirian',
author_email='amiryan.j@gmail.com',
packages=['opentraj'],
scripts=['bin/script1','bin/script2'],
url='https://github.com/crowdbotp/OpenTraj',
license='MIT',
description='Tools for analyzing trajectory datasets',
long_description=open('README.md').read(),
install_requires=[
"numpy",
"scipy",
"sklearn",
"pandas",
"tqdm",
"pykalman",
"PyYAML",
],
extras_require={
'test': [
"pylint",
"pytest",
],
'plot': [
"matplotlib",
"seaborn",
]
}
)
|
a9db2e25a6ee9fde56013d57047b4d56db274ddf
|
3bc139860403ebd05e278c95fca26e24d5189271
|
/chia/data_layer/s3_plugin_service.py
|
d17c973f716e4a1b8826c95254c70dca955096e2
|
[
"Apache-2.0"
] |
permissive
|
Chia-Network/chia-blockchain
|
a09183b7240b159419b45f8373a41a1062f77ef3
|
d966f3f9e63aed52dbd73544164202a9f11ce3d2
|
refs/heads/main
| 2023-08-31T09:37:13.741283
| 2023-08-30T18:27:22
| 2023-08-30T18:27:22
| 197,153,676
| 12,936
| 2,474
|
Apache-2.0
| 2023-09-14T19:08:51
| 2019-07-16T08:32:40
|
Python
|
UTF-8
|
Python
| false
| false
| 16,377
|
py
|
s3_plugin_service.py
|
from __future__ import annotations
import asyncio
import concurrent.futures
import functools
import json
import logging
import os
import shutil
import sys
import tempfile
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Dict, List, Optional, Set
from urllib.parse import urlparse
import boto3 as boto3
import yaml
from aiohttp import web
from botocore.exceptions import ClientError
from chia.data_layer.download_data import is_filename_valid
from chia.types.blockchain_format.sized_bytes import bytes32
log = logging.getLogger(__name__)
plugin_name = "Chia S3 Datalayer plugin"
plugin_version = "0.1.0"
@dataclass(frozen=True)
class StoreConfig:
id: bytes32
bucket: Optional[str]
urls: Set[str]
@classmethod
def unmarshal(cls, d: Dict[str, Any]) -> StoreConfig:
upload_bucket = d.get("upload_bucket", None)
if upload_bucket and len(upload_bucket) == 0:
upload_bucket = None
return StoreConfig(bytes32.from_hexstr(d["store_id"]), upload_bucket, d.get("download_urls", set()))
def marshal(self) -> Dict[str, Any]:
return {"store_id": self.id.hex(), "upload_bucket": self.bucket, "download_urls": self.urls}
class S3Plugin:
boto_resource: boto3.resource
port: int
region: str
aws_access_key_id: str
aws_secret_access_key: str
server_files_path: Path
stores: List[StoreConfig]
instance_name: str
def __init__(
self,
region: str,
aws_access_key_id: str,
aws_secret_access_key: str,
server_files_path: Path,
stores: List[StoreConfig],
instance_name: str,
):
self.boto_resource = boto3.resource(
"s3",
region_name=region,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
)
self.stores = stores
self.instance_name = instance_name
self.server_files_path = server_files_path
async def add_store_id(self, request: web.Request) -> web.Response:
"""Add a store id to the config file. Returns False for store ids that are already in the config."""
self.update_instance_from_config()
try:
data = await request.json()
store_id = bytes32.from_hexstr(data["store_id"])
except Exception as e:
log.error(f"failed parsing request {request} {type(e).__name__} {e}")
return web.json_response({"success": False})
bucket = data.get("bucket", None)
urls = data.get("urls", [])
if not bucket and not urls:
return web.json_response({"success": False, "reason": "bucket or urls must be provided"})
for stores in self.stores:
if store_id == stores.id:
return web.json_response({"success": False, "reason": f"store {store_id.hex()} already exists"})
new_store = StoreConfig(store_id, bucket, urls)
self.stores.append(new_store)
self.update_config()
return web.json_response({"success": True, "id": store_id.hex()})
async def remove_store_id(self, request: web.Request) -> web.Response:
"""Remove a store id from the config file. Returns True for store ids that are not in the config."""
self.update_instance_from_config()
try:
data = await request.json()
store_id = bytes32.from_hexstr(data["store_id"])
except Exception as e:
log.error(f"failed parsing request {request} {e}")
return web.json_response({"success": False})
dirty = False
for i, store in enumerate(self.stores):
if store.id == store_id:
del self.stores[i]
dirty = True
break
if dirty:
self.update_config()
return web.json_response({"success": True, "store_id": store_id.hex()})
async def handle_upload(self, request: web.Request) -> web.Response:
self.update_instance_from_config()
try:
data = await request.json()
except Exception as e:
log.error(f"failed parsing request {request} {type(e).__name__} {e}")
return web.json_response({"handle_upload": False})
store_id = bytes32.from_hexstr(data["store_id"])
for store in self.stores:
if store.id == store_id and store.bucket:
return web.json_response({"handle_upload": True, "bucket": store.bucket})
return web.json_response({"handle_upload": False})
async def upload(self, request: web.Request) -> web.Response:
try:
data = await request.json()
store_id = bytes32.from_hexstr(data["store_id"])
bucket_str = self.get_bucket(store_id)
my_bucket = self.boto_resource.Bucket(bucket_str)
full_tree_name: str = data["full_tree_filename"]
diff_name: str = data["diff_filename"]
# filenames must follow the DataLayer naming convention
if not is_filename_valid(full_tree_name) or not is_filename_valid(diff_name):
return web.json_response({"uploaded": False})
# Pull the store_id from the filename to make sure we only upload for configured stores
full_tree_id = bytes32.fromhex(full_tree_name[:64])
diff_tree_id = bytes32.fromhex(diff_name[:64])
if not (full_tree_id == diff_tree_id == store_id):
return web.json_response({"uploaded": False})
full_tree_path = self.server_files_path.joinpath(full_tree_name)
diff_path = self.server_files_path.joinpath(diff_name)
try:
with concurrent.futures.ThreadPoolExecutor() as pool:
await asyncio.get_running_loop().run_in_executor(
pool,
functools.partial(my_bucket.upload_file, full_tree_path, full_tree_path.name),
)
await asyncio.get_running_loop().run_in_executor(
pool, functools.partial(my_bucket.upload_file, diff_path, diff_path.name)
)
except ClientError as e:
log.error(f"failed uploading file to aws {type(e).__name__} {e}")
return web.json_response({"uploaded": False})
except Exception as e:
log.error(f"failed handling request {request} {type(e).__name__} {e}")
return web.json_response({"uploaded": False})
return web.json_response({"uploaded": True})
async def healthz(self, request: web.Request) -> web.Response:
return web.json_response({"success": True})
async def plugin_info(self, request: web.Request) -> web.Response:
return web.json_response(
{
"name": plugin_name,
"version": plugin_version,
"instance": self.instance_name,
}
)
async def handle_download(self, request: web.Request) -> web.Response:
self.update_instance_from_config()
try:
data = await request.json()
except Exception as e:
log.error(f"failed parsing request {request} {type(e).__name__} {e}")
return web.json_response({"handle_download": False})
store_id = bytes32.from_hexstr(data["store_id"])
parse_result = urlparse(data["url"])
for store in self.stores:
if store.id == store_id and parse_result.scheme == "s3" and data["url"] in store.urls:
return web.json_response({"handle_download": True, "urls": list(store.urls)})
return web.json_response({"handle_download": False})
async def download(self, request: web.Request) -> web.Response:
try:
data = await request.json()
url = data["url"]
filename = data["filename"]
# filename must follow the DataLayer naming convention
if not is_filename_valid(filename):
return web.json_response({"downloaded": False})
# Pull the store_id from the filename to make sure we only download for configured stores
filename_tree_id = bytes32.fromhex(filename[:64])
parse_result = urlparse(url)
should_download = False
for store in self.stores:
if store.id == filename_tree_id and parse_result.scheme == "s3" and url in store.urls:
should_download = True
break
if not should_download:
return web.json_response({"downloaded": False})
bucket_str = parse_result.netloc
my_bucket = self.boto_resource.Bucket(bucket_str)
target_filename = self.server_files_path.joinpath(filename)
# Create folder for parent directory
target_filename.parent.mkdir(parents=True, exist_ok=True)
log.info(f"downloading {url} to {target_filename}...")
with concurrent.futures.ThreadPoolExecutor() as pool:
await asyncio.get_running_loop().run_in_executor(
pool, functools.partial(my_bucket.download_file, filename, str(target_filename))
)
except Exception as e:
log.error(f"failed parsing request {request} {type(e).__name__} {e}")
return web.json_response({"downloaded": False})
return web.json_response({"downloaded": True})
async def add_missing_files(self, request: web.Request) -> web.Response:
try:
data = await request.json()
store_id = bytes32.from_hexstr(data["store_id"])
bucket_str = self.get_bucket(store_id)
files = json.loads(data["files"])
my_bucket = self.boto_resource.Bucket(bucket_str)
existing_file_list = []
for my_bucket_object in my_bucket.objects.all():
existing_file_list.append(my_bucket_object.key)
try:
for file_name in files:
# filenames must follow the DataLayer naming convention
if not is_filename_valid(file_name):
log.error(f"failed uploading file {file_name}, invalid file name")
continue
# Pull the store_id from the filename to make sure we only upload for configured stores
if not (bytes32.fromhex(file_name[:64]) == store_id):
log.error(f"failed uploading file {file_name}, store id mismatch")
continue
file_path = self.server_files_path.joinpath(file_name)
if not os.path.isfile(file_path):
log.error(f"failed uploading file to aws, file {file_path} does not exist")
continue
if file_name in existing_file_list:
log.debug(f"skip {file_name} already in bucket")
continue
with concurrent.futures.ThreadPoolExecutor() as pool:
await asyncio.get_running_loop().run_in_executor(
pool,
functools.partial(my_bucket.upload_file, file_path, file_name),
)
except ClientError as e:
log.error(f"failed uploading file to aws {e}")
return web.json_response({"uploaded": False})
except Exception as e:
log.error(f"failed handling request {request} {e}")
return web.json_response({"uploaded": False})
return web.json_response({"uploaded": True})
def get_bucket(self, store_id: bytes32) -> str:
for store in self.stores:
if store.id == store_id and store.bucket:
return store.bucket
raise Exception(f"bucket not found for store id {store_id.hex()}")
def update_instance_from_config(self) -> None:
config = load_config(self.instance_name)
self.stores = read_store_ids_from_config(config)
def update_config(self) -> None:
with open("s3_plugin_config.yml", "r") as file:
full_config = yaml.safe_load(file)
full_config[self.instance_name]["stores"] = [store.marshal() for store in self.stores]
self.save_config("s3_plugin_config.yml", full_config)
def save_config(self, filename: str, config_data: Any) -> None:
path: Path = Path(filename)
with tempfile.TemporaryDirectory(dir=path.parent) as tmp_dir:
tmp_path: Path = Path(tmp_dir) / Path(filename)
with open(tmp_path, "w") as f:
yaml.safe_dump(config_data, f)
try:
os.replace(str(tmp_path), path)
except PermissionError:
shutil.move(str(tmp_path), str(path))
def read_store_ids_from_config(config: Dict[str, Any]) -> List[StoreConfig]:
stores = []
for store in config.get("stores", []):
try:
stores.append(StoreConfig.unmarshal(store))
except Exception as e:
if "store_id" in store:
bad_store_id = f"{store['store_id']!r}"
else:
bad_store_id = "<missing>"
log.info(f"Ignoring invalid store id: {bad_store_id}: {type(e).__name__} {e}")
pass
return stores
def make_app(config: Dict[str, Any], instance_name: str) -> web.Application:
try:
region = config["aws_credentials"]["region"]
aws_access_key_id = config["aws_credentials"]["access_key_id"]
aws_secret_access_key = config["aws_credentials"]["secret_access_key"]
server_files_location = config["server_files_location"]
server_files_path = Path(server_files_location).resolve()
except KeyError as e:
sys.exit(
"config file must have server_files_location, aws_credentials with region, access_key_id. "
f", and secret_access_key. Missing config key: {e.args[0]!r}"
)
log_level = config.get("log_level", "INFO")
log.setLevel(log_level)
fh = logging.FileHandler(config.get("log_filename", "s3_plugin.log"))
fh.setLevel(log_level)
# create formatter and add it to the handlers
file_log_formatter = logging.Formatter(
fmt="%(asctime)s.%(msecs)03d %(name)s %(levelname)s %(message)s", datefmt="%Y-%m-%dT%H:%M:%S"
)
fh.setFormatter(file_log_formatter)
# add the handlers to logger
log.addHandler(fh)
stores = read_store_ids_from_config(config)
s3_client = S3Plugin(
region=region,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
server_files_path=server_files_path,
stores=stores,
instance_name=instance_name,
)
app = web.Application()
app.add_routes([web.post("/handle_upload", s3_client.handle_upload)])
app.add_routes([web.post("/upload", s3_client.upload)])
app.add_routes([web.post("/handle_download", s3_client.handle_download)])
app.add_routes([web.post("/download", s3_client.download)])
app.add_routes([web.post("/add_store_id", s3_client.add_store_id)])
app.add_routes([web.post("/remove_store_id", s3_client.remove_store_id)])
app.add_routes([web.post("/add_missing_files", s3_client.add_missing_files)])
app.add_routes([web.post("/plugin_info", s3_client.plugin_info)])
app.add_routes([web.post("/healthz", s3_client.healthz)])
log.info(f"Starting s3 plugin {instance_name} on port {config['port']}")
return app
def load_config(instance: str) -> Any:
with open("s3_plugin_config.yml", "r") as f:
full_config = yaml.safe_load(f)
return full_config[instance]
def run_server() -> None:
instance_name = sys.argv[1]
try:
config = load_config(instance_name)
except KeyError:
sys.exit(f"Config for instance {instance_name} not found.")
if not config:
sys.exit(f"Config for instance {instance_name} is empty.")
try:
port = config["port"]
except KeyError:
sys.exit("Missing port in config file.")
web.run_app(make_app(config, instance_name), port=port, host="localhost")
log.info(f"Stopped s3 plugin {instance_name}")
if __name__ == "__main__":
run_server()
|
8acb76cd5f18323fdefb7fb31644b449d310bd20
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/schemaregistry/azure-schemaregistry/tests/async_tests/test_schema_registry_async.py
|
2e414c08aba37c123b034567eff863e6339fce05
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 18,665
|
py
|
test_schema_registry_async.py
|
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
import functools
import pytest
import json
from azure.schemaregistry.aio import SchemaRegistryClient
from azure.identity.aio import ClientSecretCredential
from azure.core.exceptions import ClientAuthenticationError, ServiceRequestError, HttpResponseError
from devtools_testutils import AzureRecordedTestCase, EnvironmentVariableLoader
from devtools_testutils.aio import recorded_by_proxy_async
SchemaRegistryEnvironmentVariableLoader = functools.partial(
EnvironmentVariableLoader,
"schemaregistry",
schemaregistry_avro_fully_qualified_namespace="fake_resource_avro.servicebus.windows.net",
schemaregistry_json_fully_qualified_namespace="fake_resource_json.servicebus.windows.net",
schemaregistry_custom_fully_qualified_namespace="fake_resource_custom.servicebus.windows.net",
schemaregistry_group="fakegroup"
)
AVRO_SCHEMA_STR = """{"namespace":"example.avro","type":"record","name":"User","fields":[{"name":"name","type":"string"},{"name":"favorite_number","type":["int","null"]},{"name":"favorite_color","type":["string","null"]}]}"""
JSON_SCHEMA = {
"$id": "https://example.com/person.schema.json",
"$schema": "https://json-schema.org/draft/2020-12/schema",
"title": "User",
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "The person's name."
},
"favoriteNumber": {
"type": "integer",
"description": "The person's favorite positive number.",
"minimum": 0
},
"favoriteColor": {
"description": "The person's favorite color",
"type": "string",
}
}
}
JSON_SCHEMA_STR = json.dumps(JSON_SCHEMA, separators=(",", ":"))
CUSTOM_SCHEMA_STR = "My favorite color is yellow."
AVRO_FORMAT = "Avro"
JSON_FORMAT = "Json"
CUSTOM_FORMAT = "Custom"
avro_args = (AVRO_FORMAT, AVRO_SCHEMA_STR)
json_args = (JSON_FORMAT, JSON_SCHEMA_STR)
custom_args = (CUSTOM_FORMAT, CUSTOM_SCHEMA_STR)
format_params = [avro_args, json_args, custom_args]
format_ids = [AVRO_FORMAT, JSON_FORMAT, CUSTOM_FORMAT]
class ArgPasser:
def __call__(self, fn):
async def _preparer(test_class, format, schema_str, **kwargs):
await fn(test_class, format, schema_str, **kwargs)
return _preparer
class TestSchemaRegistryAsync(AzureRecordedTestCase):
def create_client(self, **kwargs):
fully_qualified_namespace = kwargs.pop("fully_qualified_namespace")
credential = self.get_credential(SchemaRegistryClient, is_async=True)
return self.create_client_from_credential(SchemaRegistryClient, credential, fully_qualified_namespace=fully_qualified_namespace, is_async=True)
@SchemaRegistryEnvironmentVariableLoader()
@pytest.mark.parametrize("format, schema_str", format_params, ids=format_ids)
@ArgPasser()
@recorded_by_proxy_async
async def test_schema_basic_async(self, format, schema_str, **kwargs):
schemaregistry_fully_qualified_namespace = kwargs.pop(f"schemaregistry_{format.lower()}_fully_qualified_namespace")
schemaregistry_group = kwargs.pop("schemaregistry_group")
client = self.create_client(fully_qualified_namespace=schemaregistry_fully_qualified_namespace)
async with client:
name = self.get_resource_name(f"test-schema-basic-async-{format.lower()}")
schema_properties = await client.register_schema(schemaregistry_group, name, schema_str, format.upper(), logging_enable=True)
assert schema_properties.id is not None
assert schema_properties.format == format
returned_schema = await client.get_schema(schema_id=schema_properties.id, logging_enable=True)
assert returned_schema.properties.id == schema_properties.id
assert returned_schema.properties.format == format
assert returned_schema.properties.group_name == schemaregistry_group
assert returned_schema.properties.name == name
assert returned_schema.definition.replace("\/", "/") == schema_str
returned_version_schema = await client.get_schema(group_name=schemaregistry_group, name=name, version=schema_properties.version, logging_enable=True)
assert returned_version_schema.properties.id == schema_properties.id
assert returned_version_schema.properties.format == format
assert returned_version_schema.properties.group_name == schemaregistry_group
assert returned_version_schema.properties.name == name
assert returned_version_schema.properties.version == schema_properties.version
assert returned_version_schema.definition.replace("\/", "/") == schema_str
with pytest.raises(TypeError) as exc:
await client.get_schema(group_name=schemaregistry_group, version=schema_properties.version, logging_enable=True)
assert "Missing" in str(exc)
returned_schema_properties = await client.get_schema_properties(schemaregistry_group, name, schema_str, format, logging_enable=True)
assert returned_schema_properties.id == schema_properties.id
assert returned_schema.properties.group_name == schemaregistry_group
assert returned_schema.properties.name == name
assert returned_schema_properties.format == format
await client._generated_client._config.credential.close()
@SchemaRegistryEnvironmentVariableLoader()
@pytest.mark.parametrize("format, schema_str", format_params, ids=format_ids)
@ArgPasser()
@recorded_by_proxy_async
async def test_schema_update_async(self, format, schema_str, **kwargs):
schemaregistry_fully_qualified_namespace = kwargs.pop(f"schemaregistry_{format.lower()}_fully_qualified_namespace")
schemaregistry_group = kwargs.pop("schemaregistry_group")
client = self.create_client(fully_qualified_namespace=schemaregistry_fully_qualified_namespace)
async with client:
name = self.get_resource_name(f"test-schema-update-async-{format.lower()}")
schema_properties = await client.register_schema(schemaregistry_group, name, schema_str, format)
assert schema_properties.id is not None
assert schema_properties.format == format
schema_str_new = schema_str.replace("color", "food").replace("Color", "Food") # for JSON and Avro string case
new_schema_properties = await client.register_schema(schemaregistry_group, name, schema_str_new, format)
assert new_schema_properties.id is not None
assert new_schema_properties.format == format
assert new_schema_properties.group_name == schemaregistry_group
assert new_schema_properties.name == name
new_schema = await client.get_schema(schema_id=new_schema_properties.id)
assert new_schema.properties.id != schema_properties.id
assert new_schema.properties.id == new_schema_properties.id
assert new_schema.definition.replace("\/", "/") == schema_str_new
assert new_schema.properties.format == format
assert new_schema.properties.group_name == schemaregistry_group
assert new_schema.properties.name == name
old_schema = await client.get_schema(group_name=schemaregistry_group, name=name, version=schema_properties.version, logging_enable=True)
assert old_schema.properties.id != new_schema_properties.id
assert old_schema.properties.id == schema_properties.id
assert old_schema.definition.replace("\/", "/") == schema_str
assert old_schema.properties.format == format
assert old_schema.properties.group_name == schemaregistry_group
assert old_schema.properties.name == name
assert old_schema.properties.version == schema_properties.version
await client._generated_client._config.credential.close()
@SchemaRegistryEnvironmentVariableLoader()
@pytest.mark.parametrize("format, schema_str", format_params, ids=format_ids)
@ArgPasser()
@recorded_by_proxy_async
async def test_schema_same_twice_async(self, format, schema_str, **kwargs):
schemaregistry_fully_qualified_namespace = kwargs.pop(f"schemaregistry_{format.lower()}_fully_qualified_namespace")
schemaregistry_group = kwargs.pop("schemaregistry_group")
client = self.create_client(fully_qualified_namespace=schemaregistry_fully_qualified_namespace)
name = self.get_resource_name(f"test-schema-twice-async-{format.lower()}")
async with client:
schema_properties = await client.register_schema(schemaregistry_group, name, schema_str, format)
schema_properties_second = await client.register_schema(schemaregistry_group, name, schema_str, format)
assert schema_properties.id == schema_properties_second.id
await client._generated_client._config.credential.close()
@SchemaRegistryEnvironmentVariableLoader()
@pytest.mark.parametrize("format, schema_str", format_params, ids=format_ids)
@ArgPasser()
@recorded_by_proxy_async
async def test_schema_negative_wrong_credential_async(self, format, schema_str, **kwargs):
schemaregistry_fully_qualified_namespace = kwargs.pop(f"schemaregistry_{format.lower()}_fully_qualified_namespace")
schemaregistry_group = kwargs.pop("schemaregistry_group")
credential = ClientSecretCredential(tenant_id="fake", client_id="fake", client_secret="fake")
client = SchemaRegistryClient(fully_qualified_namespace=schemaregistry_fully_qualified_namespace, credential=credential)
async with client, credential:
name = self.get_resource_name(f"test-schema-negative-async-{format.lower()}")
with pytest.raises(ClientAuthenticationError):
await client.register_schema(schemaregistry_group, name, schema_str, format)
@pytest.mark.live_test_only
@SchemaRegistryEnvironmentVariableLoader()
@pytest.mark.parametrize("format, schema_str", format_params, ids=format_ids)
@ArgPasser()
@recorded_by_proxy_async
async def test_schema_negative_wrong_endpoint_async(self, format, schema_str, **kwargs):
schemaregistry_group = kwargs.pop("schemaregistry_group")
client = self.create_client(fully_qualified_namespace="fake.servicebus.windows.net")
async with client:
name = self.get_resource_name(f"test-schema-nonexist-async-{format.lower()}")
# accepting both errors for now due to: https://github.com/Azure/azure-sdk-tools/issues/2907
with pytest.raises((ServiceRequestError, HttpResponseError)) as exc_info:
await client.register_schema(schemaregistry_group, name, schema_str, format)
if exc_info.type is HttpResponseError:
response_content = json.loads(exc_info.value.response.content)
assert any([(m in response_content["Message"]) for m in ["Name does not resolve", "Unable to find a record"]])
await client._generated_client._config.credential.close()
@SchemaRegistryEnvironmentVariableLoader()
@pytest.mark.parametrize("format, schema_str", format_params, ids=format_ids)
@ArgPasser()
@recorded_by_proxy_async
async def test_schema_negative_no_schema_async(self, format, schema_str, **kwargs):
schemaregistry_fully_qualified_namespace = kwargs.pop(f"schemaregistry_{format.lower()}_fully_qualified_namespace")
client = self.create_client(fully_qualified_namespace=schemaregistry_fully_qualified_namespace)
async with client:
with pytest.raises(HttpResponseError):
await client.get_schema('a')
with pytest.raises(HttpResponseError):
await client.get_schema('a' * 32)
await client._generated_client._config.credential.close()
@SchemaRegistryEnvironmentVariableLoader()
@pytest.mark.parametrize("format, schema_str", format_params, ids=format_ids)
@ArgPasser()
@recorded_by_proxy_async
async def test_schema_negative_no_schema_version_async(self, format, schema_str, **kwargs):
schemaregistry_fully_qualified_namespace = kwargs.pop(f"schemaregistry_{format.lower()}_fully_qualified_namespace")
schemaregistry_group = kwargs.pop("schemaregistry_group")
client = self.create_client(fully_qualified_namespace=schemaregistry_fully_qualified_namespace)
name = self.get_resource_name(f"test-schema-negative-version-{format.lower()}")
async with client:
schema_properties = await client.register_schema(schemaregistry_group, name, schema_str, format)
version = schema_properties.version + 1
with pytest.raises(HttpResponseError):
await client.get_schema(group_name=schemaregistry_group, name=name, version=version)
await client._generated_client._config.credential.close()
@SchemaRegistryEnvironmentVariableLoader()
@pytest.mark.parametrize("format, schema_str", format_params, ids=format_ids)
@ArgPasser()
@recorded_by_proxy_async
async def test_register_schema_errors(self, format, schema_str, **kwargs):
schemaregistry_fully_qualified_namespace = kwargs.pop(f"schemaregistry_{format.lower()}_fully_qualified_namespace")
schemaregistry_group = kwargs.pop("schemaregistry_group")
client = self.create_client(fully_qualified_namespace=schemaregistry_fully_qualified_namespace)
name = 'test-schema'
async with client:
with pytest.raises(ValueError) as e:
await client.register_schema(None, name, schema_str, format)
with pytest.raises(ValueError) as e:
await client.register_schema(schemaregistry_group, None, schema_str, format)
with pytest.raises(HttpResponseError) as e:
await client.register_schema(schemaregistry_group, name, None, format)
assert e.value.error.code == 'InvalidRequest'
assert e.value.status_code == 400
assert e.value.reason == 'Bad Request'
with pytest.raises(AttributeError) as e:
await client.register_schema(schemaregistry_group, name, schema_str, None)
with pytest.raises(HttpResponseError) as e:
await client.register_schema(schemaregistry_group, name, schema_str, 'invalid-format')
assert e.value.error.code == 'InvalidSchemaType'
assert e.value.status_code == 415
assert e.value.reason == 'Unsupported Media Type'
@SchemaRegistryEnvironmentVariableLoader()
@pytest.mark.parametrize("format, schema_str", format_params, ids=format_ids)
@ArgPasser()
@recorded_by_proxy_async
async def test_get_schema_properties_errors(self, format, schema_str, **kwargs):
schemaregistry_fully_qualified_namespace = kwargs.pop(f"schemaregistry_{format.lower()}_fully_qualified_namespace")
schemaregistry_group = kwargs.pop("schemaregistry_group")
client = self.create_client(fully_qualified_namespace=schemaregistry_fully_qualified_namespace)
name = 'test-schema'
async with client:
with pytest.raises(ValueError) as e:
await client.get_schema_properties(None, name, schema_str, format)
with pytest.raises(ValueError) as e:
await client.get_schema_properties(schemaregistry_group, None, schema_str, format)
with pytest.raises(HttpResponseError) as e:
await client.get_schema_properties(schemaregistry_group, name, None, format)
assert e.value.error.code == 'InvalidRequest'
assert e.value.status_code == 400
assert e.value.reason == 'Bad Request'
with pytest.raises(AttributeError) as e:
await client.get_schema_properties(schemaregistry_group, name, schema_str, None)
with pytest.raises(HttpResponseError) as e:
await client.get_schema_properties(schemaregistry_group, name, schema_str, 'invalid-format')
assert e.value.error.code == 'InvalidSchemaType'
assert e.value.status_code == 415
assert e.value.reason == 'Unsupported Media Type'
with pytest.raises(HttpResponseError) as e:
await client.get_schema_properties(schemaregistry_group, 'never-registered', schema_str, format)
assert e.value.error.code == 'ItemNotFound'
assert e.value.status_code == 404
assert e.value.reason == 'Not Found'
@SchemaRegistryEnvironmentVariableLoader()
@pytest.mark.parametrize("format, schema_str", format_params, ids=format_ids)
@ArgPasser()
@recorded_by_proxy_async
async def test_get_schema_errors(self, format, schema_str, **kwargs):
schemaregistry_fully_qualified_namespace = kwargs.pop(f"schemaregistry_{format.lower()}_fully_qualified_namespace")
client = self.create_client(fully_qualified_namespace=schemaregistry_fully_qualified_namespace)
async with client:
with pytest.raises(ValueError) as e:
await client.get_schema(None)
with pytest.raises(HttpResponseError) as e:
await client.get_schema('fakeschemaid')
assert e.value.error.code == 'InvalidRequest'
assert e.value.status_code == 400
assert e.value.reason == 'Bad Request'
|
df373c2df70f88422c8b2c795720ec8d74324f44
|
568fa58296378fa129ab3349adf010daa44ed45b
|
/tests/common/test_op/ascend/bessel_i0e.py
|
b912e2fe1fda0193b970f63fe0541d42963f8d94
|
[
"Apache-2.0",
"BSD-3-Clause",
"NCSA",
"X11-distribute-modifications-variant",
"Zlib",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"LLVM-exception",
"BSD-2-Clause"
] |
permissive
|
mindspore-ai/akg
|
37f471badc66de6a831f1f45ad84344f34d23ef2
|
99f33858d6972741748cbfc9ab0bf9600428fef7
|
refs/heads/master
| 2023-07-25T23:03:17.672665
| 2023-07-11T07:33:57
| 2023-07-11T07:33:57
| 274,077,856
| 319
| 36
|
Apache-2.0
| 2021-12-30T13:43:08
| 2020-06-22T08:09:05
|
Python
|
UTF-8
|
Python
| false
| false
| 3,983
|
py
|
bessel_i0e.py
|
# Copyright 2020-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""operator dsl function: bessel_i0e"""
from akg import topi
import akg.tvm
import akg.utils as utils
from akg.ops.math import abs, cast, mul, neg, rsqrt, exp, divide, minimum
# const value
ITR_BEFORE = (1.0, 3.5156229, 3.0899424, 1.2067492, 0.2659732, 0.0360768, 0.0045813)
ITR_AFTER = (0.39894228, 0.01328592, 0.00225319, -0.00157565, 0.00916281,
-0.02057706, 0.02635537, -0.01647633, 0.00392377)
LEN_BEFORE = 7
LEN_AFTER = 9
CONST_LIMIT = 15.0 / 4
def _bessel_i0e_compute(input_data):
"""bessel i0e compute"""
shape_input = input_data.shape
dtype_input = input_data.dtype
# chose the type of data in begin
if dtype_input == "float16":
input_data = cast(input_data, "float32", target=utils.CCE)
abs_data = abs(input_data, target=utils.CCE)
# compute bessel_i0e for data in (-3.75, 3.75)
# t = |x| / 3.75
# I0e = e^-|x|(1 + 3.5156229t^2 + 3.0899424t^4 + 1.2067492t^6 + 0.2659732t^8
# + 0.0360768t^10 + 0.0045813t^12)), |x| <= 3.75
broad_const_limit = akg.lang.ascend.broadcast(akg.tvm.const(CONST_LIMIT, "float32"), shape_input)
before_abs_data = minimum(abs_data, broad_const_limit)
data = topi.multiply(before_abs_data, 1.0 / CONST_LIMIT)
square_data = mul(data, data, target=utils.CCE)
before_res = topi.multiply(square_data, ITR_BEFORE[LEN_BEFORE - 1])
before_res = topi.add(before_res, ITR_BEFORE[LEN_BEFORE - 2])
for iter_number in ITR_BEFORE[LEN_BEFORE-3::-1]:
before_res = mul(before_res, square_data, target=utils.CCE)
before_res = topi.add(before_res, iter_number)
exp_data = exp(neg(before_abs_data, target=utils.CCE), target=utils.CCE)
before_res = mul(before_res, exp_data, target=utils.CCE)
# compute bessel_i0e for data in other domain
# t = |x| / 3.75
# I0e(x) = (1 / sqrt(|x|))*(0.39894228 + 0.01328592t^-1 + 0.00225319t^-2 + -0.00157565t^-3
# + 0.00916281t^-4 + -0.02057706t^-5 + 0.02635537t^-6 + -0.01647633t^-7
# + 0.00392377t^-8), |x| >= 3.75
data = divide(broad_const_limit, abs_data, target=utils.CCE)
after_res = topi.multiply(data, ITR_AFTER[LEN_AFTER - 1])
after_res = topi.add(after_res, ITR_AFTER[LEN_AFTER - 2])
for iter_number in ITR_AFTER[LEN_AFTER-3::-1]:
after_res = mul(after_res, data, target=utils.CCE)
after_res = topi.add(after_res, iter_number)
rsqrt_data = rsqrt(abs_data, target=utils.CCE)
after_res = mul(after_res, rsqrt_data, target=utils.CCE)
after_res = minimum(before_res, after_res, target=utils.CCE)
# chose the type of data in end
if dtype_input == "float16":
after_res = cast(after_res, "float16", target=utils.CCE)
return after_res
@utils.check_input_type(akg.tvm.tensor.Tensor, (str, type(None)))
def bessel_i0e(x):
"""
The modified Bessel i0e function.
..math:: `I0e(x) = (e^{-|x|}) * (1 + ( (x/2) / (1!) )^2 + ((x/2)^2 / (2!))^2 + ... +
((x/2)^n / (n!)) ^2)`
Args:
x (tvm.tensor.Tensor): Tensor of type float16, float32.
Returns:
tvm.tensor.Tensor. The modified Bessel i0e function of x element-wise.
Has the same type as x.
"""
# check shape
utils.check_shape(x)
# check input tensor data_type
utils.ops_dtype_check(x.dtype, utils.DtypeForDavinci.ALL_FLOAT)
res = _bessel_i0e_compute(x)
return res
|
eb7787c7fb646b2b9225c21f9b1fe94c7b86cd99
|
812045c3ec6587827aeb18bde666237dfffc21ae
|
/tf_quant_finance/models/hjm/calibration_test.py
|
17a4fc4bbc5eeb748da9ad59ed46d9f564eb1d69
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
google/tf-quant-finance
|
2062082c85e8679b71e69bbeb579fe338c1b0288
|
0d3a2193c0f2d320b65e602cf01d7a617da484df
|
refs/heads/master
| 2023-08-31T01:58:15.415811
| 2023-08-15T07:37:46
| 2023-08-15T07:38:22
| 198,669,252
| 4,165
| 557
|
Apache-2.0
| 2023-08-04T19:25:55
| 2019-07-24T16:09:50
|
Python
|
UTF-8
|
Python
| false
| false
| 12,170
|
py
|
calibration_test.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for calibration.py."""
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
import tf_quant_finance as tff
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
@test_util.run_all_in_graph_and_eager_modes
class HJMCalibrationTest(parameterized.TestCase, tf.test.TestCase):
def setUp(self):
self.prices = np.array([
0.42919881, 0.98046542, 0.59045074, 1.34909391, 0.79491583, 1.81768802,
0.93210461, 2.13625342, 1.05114573, 2.40921088, 1.12941064, 2.58857507,
1.37029637, 3.15081683
])
self.expiries = np.array(
[0.5, 0.5, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0, 5.0, 5.0, 10., 10.])
self.float_leg_start_times = np.array([
[0.5, 1.0, 1.5, 2.0, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5], # 6M x 2Y
[0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0], # 6M x 5Y
[1.0, 1.5, 2.0, 2.5, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0], # 1Y x 2Y
[1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5], # 1Y x 5Y
[2.0, 2.5, 3.0, 3.5, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0], # 2Y x 2Y
[2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5], # 2Y x 5Y
[3.0, 3.5, 4.0, 4.5, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0], # 3Y x 2Y
[3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0, 7.5], # 3Y x 5Y
[4.0, 4.5, 5.0, 5.5, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0], # 4Y x 2Y
[4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5], # 4Y x 5Y
[5.0, 5.5, 6.0, 6.5, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0], # 5Y x 2Y
[5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5], # 5Y x 5Y
[10.0, 10.5, 11.0, 11.5, 12.0, 12.0, 12.0, 12.0, 12.0,
12.0], # 10Y x 2Y
[10.0, 10.5, 11.0, 11.5, 12.0, 12.5, 13.0, 13.5, 14.0, 14.5] # 10Y x 5Y
])
self.float_leg_end_times = self.float_leg_start_times + 0.5
max_maturities = np.array(
[2.5, 5.5, 3.0, 6.0, 4., 7., 5., 8., 6., 9., 7., 10., 12., 15.])
for i in range(self.float_leg_end_times.shape[0]):
self.float_leg_end_times[i] = np.clip(
self.float_leg_end_times[i], 0.0, max_maturities[i])
self.fixed_leg_payment_times = self.float_leg_end_times
self.float_leg_daycount_fractions = (
self.float_leg_end_times - self.float_leg_start_times)
self.fixed_leg_daycount_fractions = self.float_leg_daycount_fractions
self.fixed_leg_coupon = 0.01 * np.ones_like(self.fixed_leg_payment_times)
super(HJMCalibrationTest, self).setUp()
@parameterized.named_parameters(
{
'testcase_name': 'two_factor_price',
'optimizer_fn': None,
'vol_based_calib': False,
'num_hjm_factors': 2,
'time_step': 0.25,
'num_time_steps': None,
'num_instruments': 14,
'max_iter': 10,
}, {
'testcase_name': 'two_factor_vol',
'optimizer_fn': None,
'vol_based_calib': True,
'num_hjm_factors': 2,
'time_step': 0.25,
'num_time_steps': None,
'num_instruments': 14,
'max_iter': 10,
}, {
'testcase_name': 'two_factor_bfgs',
'optimizer_fn': tfp.optimizer.bfgs_minimize,
'vol_based_calib': False,
'num_hjm_factors': 2,
'time_step': 0.25,
'num_time_steps': None,
'num_instruments': 14,
'max_iter': 10,
}, {
'testcase_name': 'three_factor_price',
'optimizer_fn': None,
'vol_based_calib': False,
'num_hjm_factors': 3,
'time_step': 0.25,
'num_time_steps': None,
'num_instruments': 14,
'max_iter': 10,
}, {
'testcase_name': 'three_factor_vol',
'optimizer_fn': None,
'vol_based_calib': True,
'num_hjm_factors': 3,
'time_step': 0.25,
'num_time_steps': None,
'num_instruments': 14,
'max_iter': 10,
})
def test_calibration(self, optimizer_fn, vol_based_calib, num_hjm_factors,
time_step, num_time_steps, num_instruments, max_iter):
"""Tests calibration with constant parameters."""
dtype = tf.float64
mr0 = [0.01, 0.05]
if num_hjm_factors == 3:
mr0 = [0.01, 0.05, 0.1]
vol0 = [0.005, 0.007]
if num_hjm_factors == 3:
vol0 = [0.002, 0.003, 0.008]
zero_rate_fn = lambda x: 0.01 * tf.ones_like(x, dtype=dtype)
times = np.unique(np.reshape(self.expiries[:num_instruments], [-1]))
curve_times = None
random_type = tff.math.random.RandomType.STATELESS_ANTITHETIC
seed = [0, 0]
num_samples = 500
valuation_method = tff.models.ValuationMethod.MONTE_CARLO
def _fn():
(calib_mr, calib_vol, calib_corr), _, _ = (
tff.models.hjm.calibration_from_swaptions(
prices=self.prices[:num_instruments],
expiries=self.expiries[:num_instruments],
floating_leg_start_times=self
.float_leg_start_times[:num_instruments, :],
floating_leg_end_times=self
.float_leg_end_times[:num_instruments, :],
fixed_leg_payment_times=self
.fixed_leg_payment_times[:num_instruments, :],
floating_leg_daycount_fractions=self
.float_leg_daycount_fractions[:num_instruments, :],
fixed_leg_daycount_fractions=self
.fixed_leg_daycount_fractions[:num_instruments, :],
fixed_leg_coupon=self
.fixed_leg_coupon[:num_instruments, :],
reference_rate_fn=zero_rate_fn,
notional=100.,
num_hjm_factors=num_hjm_factors,
mean_reversion=mr0,
volatility=vol0,
optimizer_fn=optimizer_fn,
volatility_based_calibration=vol_based_calib,
swaption_valuation_method=valuation_method,
num_samples=num_samples,
random_type=random_type,
seed=seed,
time_step=time_step,
num_time_steps=num_time_steps,
times=times,
curve_times=curve_times,
time_step_finite_difference=time_step,
num_grid_points_finite_difference=41,
maximum_iterations=max_iter,
dtype=dtype))
return calib_mr, calib_vol, calib_corr
calib_mr, calib_vol, calib_corr = self.evaluate(_fn())
prices = tff.models.hjm.swaption_price(
expiries=self.expiries[:num_instruments],
fixed_leg_payment_times=self
.fixed_leg_payment_times[:num_instruments, :],
fixed_leg_daycount_fractions=self
.fixed_leg_daycount_fractions[:num_instruments, :],
fixed_leg_coupon=self.fixed_leg_coupon[:num_instruments, :],
reference_rate_fn=zero_rate_fn,
num_hjm_factors=num_hjm_factors,
notional=100.,
mean_reversion=calib_mr,
volatility=calib_vol,
corr_matrix=calib_corr,
num_samples=num_samples,
random_type=random_type,
seed=seed,
time_step=time_step,
num_time_steps=num_time_steps,
times=times,
curve_times=curve_times,
time_step_finite_difference=time_step,
num_grid_points_finite_difference=101,
valuation_method=valuation_method,
dtype=dtype)
prices = self.evaluate(prices)
self.assertAllClose(
prices, self.prices[:num_instruments], rtol=0.1, atol=0.1)
@parameterized.named_parameters(
{
'testcase_name': 'vol_based',
'vol_based_calib': True,
}, {
'testcase_name': 'price_based',
'vol_based_calib': False,
})
def test_calibration_batch(self, vol_based_calib):
"""Tests calibration for a batch of models."""
dtype = tf.float64
mr0 = [[0.01, 0.05], [0.1, 0.2]]
vol0 = [[0.005, 0.007], [0.01, 0.015]]
def zero_rate_fn(t):
rates = 0.01 * tf.ones_like(tf.expand_dims(t, axis=0), dtype=dtype)
return tf.concat([rates, rates], axis=0)
times = np.unique(np.reshape(self.expiries, [-1]))
curve_times = None
random_type = tff.math.random.RandomType.STATELESS_ANTITHETIC
seed = [0, 0]
num_samples = 500
valuation_method = tff.models.ValuationMethod.MONTE_CARLO
prices_2d = np.repeat(np.expand_dims(self.prices, axis=0), 2, axis=0)
expiries_2d = np.repeat(np.expand_dims(self.expiries, axis=0), 2, axis=0)
float_leg_start_times_2d = np.repeat(
np.expand_dims(self.float_leg_start_times, axis=0), 2, axis=0)
float_leg_end_times_2d = np.repeat(
np.expand_dims(self.float_leg_end_times, axis=0), 2, axis=0)
fixed_leg_payment_times_2d = np.repeat(
np.expand_dims(self.fixed_leg_payment_times, axis=0), 2, axis=0)
float_leg_daycount_fractions_2d = np.repeat(
np.expand_dims(self.float_leg_daycount_fractions, axis=0), 2, axis=0)
fixed_leg_daycount_fractions_2d = np.repeat(
np.expand_dims(self.fixed_leg_daycount_fractions, axis=0), 2, axis=0)
fixed_leg_coupon_2d = np.repeat(
np.expand_dims(self.fixed_leg_coupon, axis=0), 2, axis=0)
def _fn():
calibration_result, _, _ = (
tff.models.hjm.calibration_from_swaptions(
prices=prices_2d,
expiries=expiries_2d,
floating_leg_start_times=float_leg_start_times_2d,
floating_leg_end_times=float_leg_end_times_2d,
fixed_leg_payment_times=fixed_leg_payment_times_2d,
floating_leg_daycount_fractions=float_leg_daycount_fractions_2d,
fixed_leg_daycount_fractions=fixed_leg_daycount_fractions_2d,
fixed_leg_coupon=fixed_leg_coupon_2d,
reference_rate_fn=zero_rate_fn,
notional=100.,
num_hjm_factors=2,
mean_reversion=mr0,
volatility=vol0,
volatility_based_calibration=vol_based_calib,
calibrate_correlation=False,
swaption_valuation_method=valuation_method,
num_samples=num_samples,
random_type=random_type,
seed=seed,
time_step=0.25,
num_time_steps=None,
times=times,
curve_times=curve_times,
maximum_iterations=10,
dtype=dtype))
(
calib_mr, calib_vol
) = calibration_result.mean_reversion, calibration_result.volatility
return calib_mr, calib_vol
calib_mr, calib_vol = self.evaluate(_fn())
with self.subTest('MR-Shape'):
self.assertAllEqual(calib_mr.shape, [2, 2])
with self.subTest('Vol-Shape'):
self.assertAllEqual(calib_vol.shape, [2, 2])
prices = tff.models.hjm.swaption_price(
expiries=expiries_2d,
fixed_leg_payment_times=fixed_leg_payment_times_2d,
fixed_leg_daycount_fractions=fixed_leg_daycount_fractions_2d,
fixed_leg_coupon=fixed_leg_coupon_2d,
reference_rate_fn=zero_rate_fn,
num_hjm_factors=2,
notional=100.,
mean_reversion=calib_mr,
volatility=calib_vol,
corr_matrix=None,
num_samples=num_samples,
random_type=random_type,
seed=seed,
time_step=0.25,
times=times,
curve_times=curve_times,
valuation_method=valuation_method,
dtype=dtype)
prices = self.evaluate(prices)
with self.subTest('CalibratedPrices'):
self.assertAllClose(prices, prices_2d, rtol=0.1, atol=0.1)
if __name__ == '__main__':
tf.test.main()
|
2bc2321475f909d8c23c04130c4e0254f1babb8d
|
8380b5eb12e24692e97480bfa8939a199d067bce
|
/Fuzzbunch/Resources/Ops/PyScripts/wrappers/hide.py
|
60b270194c53ada51d35f94c7caab0e035db5b0a
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
RamadhanAmizudin/malware
|
788ee745b5bb23b980005c2af08f6cb8763981c2
|
62d0035db6bc9aa279b7c60250d439825ae65e41
|
refs/heads/master
| 2023-02-05T13:37:18.909646
| 2023-01-26T08:43:18
| 2023-01-26T08:43:18
| 53,407,812
| 873
| 291
| null | 2023-01-26T08:43:19
| 2016-03-08T11:44:21
|
C++
|
UTF-8
|
Python
| false
| false
| 416
|
py
|
hide.py
|
import datetime
import sys
import dsz
import ops
import ops.system.systemversion
dsz.control.echo.Off()
version = ops.system.systemversion.get_os_version(maxage=datetime.timedelta.max)
if ((version.versioninfo.major >= 6) and (version.versioninfo.arch == 'x64')):
ops.error('PatchGuard will detect and kill the process hidden via this technique. Command disabled on this platform.')
sys.exit((-1))
|
8333a8f201438f542640f1ee359b1612923bbe3c
|
5c363c50c54175a982330ec888401b3e394373ab
|
/syne_tune/optimizer/schedulers/transfer_learning/bounding_box.py
|
cb4326aa99c9818796b03146a38256fc1cdb3c9b
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
awslabs/syne-tune
|
b14fb008f63def6a172bea6cc451f4e1906647f5
|
c35686e1b5947d45384fd1d41a44e013da53ef43
|
refs/heads/main
| 2023-08-14T14:21:48.995716
| 2023-08-03T12:57:13
| 2023-08-03T12:57:13
| 417,499,108
| 313
| 47
|
Apache-2.0
| 2023-09-14T14:06:54
| 2021-10-15T12:54:45
|
Python
|
UTF-8
|
Python
| false
| false
| 7,036
|
py
|
bounding_box.py
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import logging
from typing import Dict, Callable, Optional, Any
import pandas as pd
from syne_tune.optimizer.scheduler import TrialScheduler, TrialSuggestion
from syne_tune.backend.trial_status import Trial
from syne_tune.optimizer.schedulers.transfer_learning import (
TransferLearningMixin,
TransferLearningTaskEvaluations,
)
from syne_tune.config_space import (
Categorical,
restrict_domain,
choice,
config_space_size,
)
logger = logging.getLogger(__name__)
class BoundingBox(TransferLearningMixin, TrialScheduler):
"""
Simple baseline that computes a bounding-box of the best candidate found in
previous tasks to restrict the search space to only good candidates. The
bounding-box is obtained by restricting to the min-max of the best numerical
hyperparameters and restricting to the set of the best candidates on categorical
parameters. Reference:
| Learning search spaces for Bayesian optimization: Another view of hyperparameter transfer learning.
| Valerio Perrone, Huibin Shen, Matthias Seeger, Cédric Archambeau, Rodolphe Jenatton.
| NeurIPS 2019.
``scheduler_fun`` is used to create the scheduler to be used here, feeding
it with the modified config space. Any additional scheduler arguments
(such as ``points_to_evaluate``) should be encoded inside this function.
Example:
.. code-block::
from syne_tune.optimizer.baselines import RandomSearch
def scheduler_fun(new_config_space: Dict[str, Any], mode: str, metric: str):
return RandomSearch(new_config_space, metric, mode)
bb_scheduler = BoundingBox(scheduler_fun, ...)
Here, ``bb_scheduler`` represents random search, where the hyperparameter
ranges are restricted to contain the best evalutions of previous tasks,
as provided by ``transfer_learning_evaluations``.
:param scheduler_fun: Maps tuple of configuration space (dict), mode (str),
metric (str) to a scheduler. This is required since the final
configuration space is known only after computing a bounding-box.
:param config_space: Initial configuration space to consider, will be updated
to the bounding of the best evaluations of previous tasks
:param metric: Objective name to optimize, must be present in transfer
learning evaluations.
:param mode: Mode to be considered, default to "min".
:param transfer_learning_evaluations: Dictionary from task name to
offline evaluations.
:param num_hyperparameters_per_task: Number of the best configurations to
use per task when computing the bounding box, defaults to 1.
"""
def __init__(
self,
scheduler_fun: Callable[[dict, str, str], TrialScheduler],
config_space: Dict[str, Any],
metric: str,
transfer_learning_evaluations: Dict[str, TransferLearningTaskEvaluations],
mode: Optional[str] = None,
num_hyperparameters_per_task: int = 1,
):
super().__init__(
config_space=config_space,
transfer_learning_evaluations=transfer_learning_evaluations,
metric_names=[metric],
)
if mode is None:
mode = "min"
else:
assert mode in ["min", "max"], "mode must be either 'min' or 'max'."
config_space = self._compute_box(
config_space=config_space,
transfer_learning_evaluations=transfer_learning_evaluations,
mode=mode,
num_hyperparameters_per_task=num_hyperparameters_per_task,
metric=metric,
)
print(f"hyperparameter ranges of best previous configurations {config_space}")
print(f"({config_space_size(config_space)} options)")
self.scheduler = scheduler_fun(config_space, mode, metric)
def _compute_box(
self,
config_space: Dict[str, Any],
transfer_learning_evaluations: Dict[str, TransferLearningTaskEvaluations],
mode: str,
num_hyperparameters_per_task: int,
metric: str,
) -> Dict[str, Any]:
top_k_per_task = self.top_k_hyperparameter_configurations_per_task(
transfer_learning_evaluations=transfer_learning_evaluations,
num_hyperparameters_per_task=num_hyperparameters_per_task,
mode=mode,
metric=metric,
)
hp_df = pd.DataFrame(
[hp for _, top_k_hp in top_k_per_task.items() for hp in top_k_hp]
)
# compute bounding-box on all hyperparameters that are numerical or categorical
new_config_space = {}
for i, (name, domain) in enumerate(config_space.items()):
if hasattr(domain, "sample"):
if isinstance(domain, Categorical):
hp_values = list(sorted(hp_df.loc[:, name].unique()))
new_config_space[name] = choice(hp_values)
elif hasattr(domain, "lower") and hasattr(domain, "upper"):
# domain is numerical, set new lower and upper ranges with bounding-box values
new_config_space[name] = restrict_domain(
numerical_domain=domain,
lower=hp_df.loc[:, name].min(),
upper=hp_df.loc[:, name].max(),
)
else:
# no known way to compute bounding over non numerical domains such as functional
new_config_space[name] = domain
else:
new_config_space[name] = domain
logger.info(
f"new configuration space obtained after computing bounding-box: {new_config_space}"
)
return new_config_space
def suggest(self, trial_id: int) -> Optional[TrialSuggestion]:
return self.scheduler.suggest(trial_id)
def on_trial_add(self, trial: Trial):
self.scheduler.on_trial_add(trial)
def on_trial_complete(self, trial: Trial, result: Dict[str, Any]):
self.scheduler.on_trial_complete(trial, result)
def on_trial_remove(self, trial: Trial):
self.scheduler.on_trial_remove(trial)
def on_trial_error(self, trial: Trial):
self.scheduler.on_trial_error(trial)
def on_trial_result(self, trial: Trial, result: Dict[str, Any]) -> str:
return self.scheduler.on_trial_result(trial, result)
def metric_mode(self) -> str:
return self.scheduler.metric_mode()
|
3a3d0502591256b13164e64e7d9764422db2ea6b
|
c6c8dc7224e554fdb3fb0e1b34c69ef09ebc98ab
|
/pybooru/exceptions.py
|
d6ad4abdd2595077eb61ff4aca53a3cddf0b6e2f
|
[
"MIT"
] |
permissive
|
LuqueDaniel/pybooru
|
49203ffb027ec848d3a7f6696d4deb5b62462b6a
|
db2663c290f2d1c959b2ab363c4a5be2de956f6a
|
refs/heads/master
| 2023-02-04T21:48:35.706162
| 2023-02-01T20:54:21
| 2023-02-01T20:54:21
| 4,486,051
| 118
| 36
|
MIT
| 2023-02-01T20:54:23
| 2012-05-29T18:47:32
|
Python
|
UTF-8
|
Python
| false
| false
| 1,211
|
py
|
exceptions.py
|
# -*- coding: utf-8 -*-
"""pybooru.exceptions
This module contains Pybooru exceptions.
Classes:
* PybooruError -- Main Pybooru exception class.
* PybooruHTTPError -- Manages HTTP status errors.
* PybooruAPIError -- Manages all API errors.
"""
# __furute__ imports
from __future__ import absolute_import
# pybooru imports
from .resources import HTTP_STATUS_CODE
class PybooruError(Exception):
"""Class to catch Pybooru error message."""
pass
class PybooruHTTPError(PybooruError):
"""Class to catch HTTP error message."""
def __init__(self, msg, http_code, url):
"""Initialize PybooruHTTPError.
Keyword arguments:
msg (str): The error message.
http_code (int): The HTTP status code.
url (str): The URL.
"""
super(PybooruHTTPError, self).__init__(msg, http_code, url)
self._msg = "{0}: {1} - {2}, {3} - URL: {4}".format(
msg, http_code, HTTP_STATUS_CODE[http_code][0],
HTTP_STATUS_CODE[http_code][1], url)
def __str__(self):
"""Print exception."""
return self._msg
class PybooruAPIError(PybooruError):
"""Class to catch all API errors."""
pass
|
fb1534573bafe4ee41b90e450b0853663cd966b3
|
4e52ba30ac377ba404bc4cffbd94add2e5edf0e6
|
/stream/clients/python/bookkeeper/kv/futures.py
|
ae626a7e98c832ec2a7cf25911e5495719d35283
|
[
"Apache-2.0"
] |
permissive
|
apache/bookkeeper
|
0bf2bdb66c1e8e18654ba72775fdf0914c01517a
|
1666d820a3d98ee6702e39c7cb0ebe51b1fdfd32
|
refs/heads/master
| 2023-09-01T17:43:09.657566
| 2023-09-01T05:20:10
| 2023-09-01T05:20:10
| 1,575,956
| 1,825
| 1,123
|
Apache-2.0
| 2023-09-13T04:18:18
| 2011-04-06T07:00:07
|
Java
|
UTF-8
|
Python
| false
| false
| 6,354
|
py
|
futures.py
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import threading
import uuid
import bookkeeper.common.future
from bookkeeper.kv import exceptions
class Future(bookkeeper.common.future.Future):
"""Encapsulation of the asynchronous execution of an action.
This object is returned from asychronous bookkeeper calls, and is the
interface to determine the status of those calls.
This object should not be created directly, but is returned by other
methods in this library.
Args:
completed (Optional[Any]): An event, with the same interface as
:class:`threading.Event`. This is provided so that callers
with different concurrency models (e.g. ``threading`` or
``multiprocessing``) can supply an event that is compatible
with that model. The ``wait()`` and ``set()`` methods will be
used. If this argument is not provided, then a new
:class:`threading.Event` will be created and used.
"""
# This could be a sentinel object or None, but the sentinel object's ID
# can change if the process is forked, and None has the possibility of
# actually being a result.
_SENTINEL = uuid.uuid4()
def __init__(self, completed=None):
self._result = self._SENTINEL
self._exception = self._SENTINEL
self._callbacks = []
if completed is None:
completed = threading.Event()
self._completed = completed
def cancel(self):
"""Actions in bookkeeper generally may not be canceled.
This method always returns False.
"""
return False
def cancelled(self):
"""Actions in bookkeeper generally may not be canceled.
This method always returns False.
"""
return False
def running(self):
"""Actions in bookkeeper generally may not be canceled.
Returns:
bool: ``True`` if this method has not yet completed, or
``False`` if it has completed.
"""
if self.done():
return False
return True
def done(self):
"""Return True the future is done, False otherwise.
This still returns True in failure cases; checking :meth:`result` or
:meth:`exception` is the canonical way to assess success or failure.
"""
return (self._exception != self._SENTINEL or
self._result != self._SENTINEL)
def result(self, timeout=None):
"""Return the message ID, or raise an exception.
This blocks until the message has successfully been published, and
returns the message ID.
Args:
timeout (Union[int, float]): The number of seconds before this call
times out and raises TimeoutError.
Returns:
str: The message ID.
Raises:
~.pubsub_v1.TimeoutError: If the request times out.
Exception: For undefined exceptions in the underlying
call execution.
"""
# Attempt to get the exception if there is one.
# If there is not one, then we know everything worked, and we can
# return an appropriate value.
err = self.exception(timeout=timeout)
if err is None:
return self._result
raise err
def exception(self, timeout=None):
"""Return the exception raised by the call, if any.
This blocks until the message has successfully been published, and
returns the exception. If the call succeeded, return None.
Args:
timeout (Union[int, float]): The number of seconds before this call
times out and raises TimeoutError.
Raises:
TimeoutError: If the request times out.
Returns:
Exception: The exception raised by the call, if any.
"""
# Wait until the future is done.
if not self._completed.wait(timeout=timeout):
raise exceptions.TimeoutError('Timed out waiting for result.')
# If the batch completed successfully, this should return None.
if self._result != self._SENTINEL:
return None
# Okay, this batch had an error; this should return it.
return self._exception
def add_done_callback(self, fn):
"""Attach the provided callable to the future.
The provided function is called, with this future as its only argument,
when the future finishes running.
"""
if self.done():
return fn(self)
self._callbacks.append(fn)
def set_result(self, result):
"""Set the result of the future to the provided result.
Args:
result (Any): The result
"""
# Sanity check: A future can only complete once.
if self.done():
raise RuntimeError('set_result can only be called once.')
# Set the result and trigger the future.
self._result = result
self._trigger()
def set_exception(self, exception):
"""Set the result of the future to the given exception.
Args:
exception (:exc:`Exception`): The exception raised.
"""
# Sanity check: A future can only complete once.
if self.done():
raise RuntimeError('set_exception can only be called once.')
# Set the exception and trigger the future.
self._exception = exception
self._trigger()
def _trigger(self):
"""Trigger all callbacks registered to this Future.
This method is called internally by the batch once the batch
completes.
Args:
message_id (str): The message ID, as a string.
"""
self._completed.set()
for callback in self._callbacks:
callback(self)
|
9d57abd042406b4c6a5d9cbae2e798d66199b3a4
|
572afc77a246acb9483b47fc9e1839f47005d736
|
/python/federatedml/protobuf/generated/pipeline_pb2.py
|
9eff56ee560b695d4ac9b7389235e7074f968d5d
|
[
"Apache-2.0"
] |
permissive
|
FederatedAI/FATE
|
7c787c308cca9ff46f287d24569c68de0a1cac07
|
8767db5ec0cb93784f64b290bc39b7b545c530fb
|
refs/heads/master
| 2023-08-17T10:13:00.302529
| 2023-06-14T07:01:38
| 2023-06-14T07:01:38
| 167,349,656
| 4,942
| 1,571
|
Apache-2.0
| 2023-09-14T07:02:29
| 2019-01-24T10:32:43
|
Python
|
UTF-8
|
Python
| false
| true
| 1,867
|
py
|
pipeline_pb2.py
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pipeline.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0epipeline.proto\x12&com.webank.ai.fate.core.mlmodel.buffer\"\xbf\x02\n\x08Pipeline\x12\x15\n\rinference_dsl\x18\x01 \x01(\x0c\x12\x11\n\ttrain_dsl\x18\x02 \x01(\x0c\x12\x1a\n\x12train_runtime_conf\x18\x03 \x01(\x0c\x12\x14\n\x0c\x66\x61te_version\x18\x04 \x01(\t\x12\x10\n\x08model_id\x18\x05 \x01(\t\x12\x15\n\rmodel_version\x18\x06 \x01(\t\x12\x0e\n\x06parent\x18\x07 \x01(\x08\x12\x14\n\x0cloaded_times\x18\x08 \x01(\x05\x12\r\n\x05roles\x18\t \x01(\x0c\x12\x11\n\twork_mode\x18\n \x01(\x05\x12\x16\n\x0einitiator_role\x18\x0b \x01(\t\x12\x1a\n\x12initiator_party_id\x18\x0c \x01(\x05\x12\x1d\n\x15runtime_conf_on_party\x18\r \x01(\x0c\x12\x13\n\x0bparent_info\x18\x0e \x01(\x0c\x42\x0f\x42\rPipelineProtob\x06proto3')
_PIPELINE = DESCRIPTOR.message_types_by_name['Pipeline']
Pipeline = _reflection.GeneratedProtocolMessageType('Pipeline', (_message.Message,), {
'DESCRIPTOR' : _PIPELINE,
'__module__' : 'pipeline_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.core.mlmodel.buffer.Pipeline)
})
_sym_db.RegisterMessage(Pipeline)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
DESCRIPTOR._serialized_options = b'B\rPipelineProto'
_PIPELINE._serialized_start=59
_PIPELINE._serialized_end=378
# @@protoc_insertion_point(module_scope)
|
d1a68b973c794765166b379950c7d41976b37865
|
5db0fab37c2b8a618d85d3b60fab9f806c416474
|
/src/python/pants/backend/javascript/resolve_test.py
|
a623106b3edfcd1e0ba7dd058f42fd7097c79251
|
[
"Apache-2.0"
] |
permissive
|
pantsbuild/pants
|
4988d1ac5474ec95f94ce2218aeb759401e4b011
|
98cbda8545f0d58c586ed2daa76fefd729d5e0d5
|
refs/heads/main
| 2023-09-05T03:44:17.646899
| 2023-09-01T19:52:09
| 2023-09-01T19:52:09
| 7,209,075
| 2,708
| 593
|
Apache-2.0
| 2023-09-14T19:33:33
| 2012-12-17T17:39:04
|
Python
|
UTF-8
|
Python
| false
| false
| 2,932
|
py
|
resolve_test.py
|
# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import json
import pytest
from pants.backend.javascript import resolve
from pants.backend.javascript.package_json import NodeThirdPartyPackageTarget, PackageJsonTarget
from pants.backend.javascript.resolve import ChosenNodeResolve, RequestNodeResolve
from pants.build_graph.address import Address
from pants.core.target_types import TargetGeneratorSourcesHelperTarget
from pants.engine.rules import QueryRule
from pants.testutil.rule_runner import RuleRunner
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
*resolve.rules(),
QueryRule(ChosenNodeResolve, (RequestNodeResolve,)),
],
target_types=[
PackageJsonTarget,
NodeThirdPartyPackageTarget,
TargetGeneratorSourcesHelperTarget,
],
)
def test_gets_expected_resolve_for_standalone_packages(
rule_runner: RuleRunner,
) -> None:
rule_runner.write_files(
{
"src/js/a/BUILD": "package_json()",
"src/js/a/package.json": json.dumps({"name": "ham", "version": "0.0.1"}),
"src/js/b/BUILD": "package_json()",
"src/js/b/package.json": json.dumps({"name": "spam", "version": "0.0.1"}),
}
)
a_tgt = rule_runner.get_target(Address("src/js/a", generated_name="ham"))
b_tgt = rule_runner.get_target(Address("src/js/b", generated_name="spam"))
a_result = rule_runner.request(ChosenNodeResolve, [RequestNodeResolve(a_tgt.address)])
b_result = rule_runner.request(ChosenNodeResolve, [RequestNodeResolve(b_tgt.address)])
assert a_result.resolve_name == "js.a"
assert a_result.file_path == "src/js/a/package-lock.json"
assert b_result.resolve_name == "js.b"
assert b_result.file_path == "src/js/b/package-lock.json"
def test_gets_expected_resolve_for_workspace_packages(
rule_runner: RuleRunner,
) -> None:
rule_runner.write_files(
{
"src/js/BUILD": "package_json()",
"src/js/package.json": json.dumps(
{"name": "ham", "version": "0.0.1", "workspaces": ["./child"]}
),
"src/js/child/BUILD": "package_json()",
"src/js/child/package.json": json.dumps({"name": "spam", "version": "0.0.1"}),
}
)
tgt = rule_runner.get_target(Address("src/js", generated_name="ham"))
result = rule_runner.request(ChosenNodeResolve, [RequestNodeResolve(tgt.address)])
child_tgt = rule_runner.get_target(Address("src/js/child", generated_name="spam"))
child_result = rule_runner.request(ChosenNodeResolve, [RequestNodeResolve(child_tgt.address)])
assert child_result == result
assert child_result.resolve_name == "js"
assert child_result.file_path == "src/js/package-lock.json"
|
ae9ced7ebdb2c948d4e0b42575e394b108721106
|
3405736c71d6224374437dba141815d03e95b89f
|
/tools/sign/convertkey.py
|
34f9f8003905ef3f2cb4b3a1fb5534341e7da503
|
[
"MIT"
] |
permissive
|
nwjs/nw.js
|
dd1338335ec7306b94203f46629fa83081e1c364
|
c2184a6edae6d6cca99d199ad5fedd37e4af3fad
|
refs/heads/nw78
| 2023-08-31T13:00:22.395612
| 2023-07-30T22:47:46
| 2023-07-30T22:47:46
| 3,100,121
| 28,744
| 3,512
|
MIT
| 2023-08-17T22:23:55
| 2012-01-04T06:21:10
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 415
|
py
|
convertkey.py
|
from Crypto.PublicKey import RSA
import sys
key = RSA.importKey(open("private_key.pem").read())
der = key.publickey().exportKey("DER")
i = 0
total = len(der)
sys.stdout.write('''
const uint8 kNWSignaturesPublicKey[] = {
''')
for c in der:
sys.stdout.write("{0:#04x}".format(ord(c)))
i += 1
if i < total: sys.stdout.write(", ")
if i % 10 == 0:
sys.stdout.write("\n ")
print '''
};
'''
|
6bbfd856aa47356eade10cec2478ba142891e0db
|
82a7c9ae5392d847df4b01ae864c81bd14caab74
|
/notifications_extension/views.py
|
44b4d0f769a9d0840a1b6031977511a4f45f22b7
|
[
"MIT"
] |
permissive
|
DjangoChinaOrg/Django-China-API
|
d4ab9be99d94d74f9f5f93620ea6d062d4fa18a6
|
79a5d85fe88ba7784d08d370b8e7519f7274f208
|
refs/heads/dev
| 2021-07-23T17:37:59.471725
| 2018-11-08T09:55:03
| 2018-11-08T09:55:03
| 122,849,421
| 190
| 52
|
MIT
| 2018-10-13T12:18:09
| 2018-02-25T15:37:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,806
|
py
|
views.py
|
from rest_framework import permissions, viewsets, mixins
from notifications.models import Notification
from rest_framework import filters
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.response import Response
from rest_framework import status
from rest_framework.decorators import action
from .serializers import NotificationSerializer
from .filters import NotificationFilter
class NotificationViewSet(mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet):
serializer_class = NotificationSerializer
permission_classes = [permissions.IsAuthenticated, ]
filter_backends = (DjangoFilterBackend, filters.OrderingFilter)
ordering_fields = ('timestamp',)
filter_class = NotificationFilter # 过滤器
def get_queryset(self):
return Notification.objects.filter(recipient=self.request.user).active()
def perform_destroy(self, instance):
pk = self.kwargs['pk']
instance = Notification.objects.get(id=pk)
instance.deleted = True
instance.save()
def update(self, request, *args, **kwargs):
pk = self.kwargs['pk']
instance = Notification.objects.get(id=pk)
if instance.recipient != request.user:
return Response(status=status.HTTP_403_FORBIDDEN)
instance.unread = False
instance.save()
return Response(status=status.HTTP_200_OK)
@action(methods=['post'], detail=False)
def mark_all_as_read(self, request):
Notification.objects.filter(recipient=request.user).mark_all_as_read(recipient=request.user)
return Response(status=status.HTTP_200_OK)
|
807a837d4cf69d4aa7173c4051e4c3c14d413ad2
|
a63d907ad63ba6705420a6fb2788196d1bd3763c
|
/src/datamgr/metadata/metadata/backend/mysql/replica_base.py
|
3ab330a878a927334f7842a1f2dc037e3d41de13
|
[
"MIT"
] |
permissive
|
Tencent/bk-base
|
a38461072811667dc2880a13a5232004fe771a4b
|
6d483b4df67739b26cc8ecaa56c1d76ab46bd7a2
|
refs/heads/master
| 2022-07-30T04:24:53.370661
| 2022-04-02T10:30:55
| 2022-04-02T10:30:55
| 381,257,882
| 101
| 51
|
NOASSERTION
| 2022-04-02T10:30:56
| 2021-06-29T06:10:01
|
Python
|
UTF-8
|
Python
| false
| false
| 3,555
|
py
|
replica_base.py
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# 用于生成映射表的ReplicaBase。会自动合入MixIn信息。
from collections import Sequence
from sqlalchemy.ext.declarative import DeclarativeMeta, declarative_base
from metadata.db_models.meta_service.replica_conf import replica_mixins
class ReplicaMixIn(object):
"""
映射表补充字段MixIn父类。
"""
pass
replica_mixin_classes_info = {}
for module in replica_mixins:
for attr in dir(module):
item = getattr(module, attr)
if isinstance(item, type) and issubclass(item, ReplicaMixIn) and item is not ReplicaMixIn:
replica_mixin_classes_info[attr.split('MixIn')[0]] = item
class ReplicaMeta(DeclarativeMeta):
"""自动生成映射表Model的元类。"""
def __new__(mcs, name, bases, namespace):
# 自动添加mixin
if name == 'Base':
return super(ReplicaMeta, mcs).__new__(mcs, name, bases, namespace)
else:
namespace[str('__abstract__')] = True
table_args = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8', 'mysql_collate': 'utf8_general_ci'}
if namespace.get('__table_args__'):
if isinstance(namespace['__table_args__'], Sequence):
table_args_lst = list(namespace['__table_args__'])
if isinstance(table_args_lst[-1], dict):
table_args_lst[-1].update(table_args)
else:
table_args_lst.append(table_args)
else:
namespace['__table_args__'].update(table_args)
namespace['__table_args__'] = table_args
cls = super(ReplicaMeta, mcs).__new__(mcs, name, tuple(bases), namespace)
mix_bases = [cls]
if name in replica_mixin_classes_info:
mix_bases.insert(0, replica_mixin_classes_info[name])
mixed_cls = super(ReplicaMeta, mcs).__new__(mcs, str('Replica') + name, tuple(mix_bases), {})
return mixed_cls
ReplicaBase = declarative_base(metaclass=ReplicaMeta)
metadata = ReplicaBase.metadata
ReplicaBase.db_name = ReplicaBase._db_name = 'bkdata_meta'
|
9e6fc7a26c5c901d65ab8c83e03639f4b5aa61bd
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/Skimage_numpy/source/skimage/io/_plugins/pil_plugin.py
|
59811dd2e0c89263217db06b52cfea1f9e14d539
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 7,726
|
py
|
pil_plugin.py
|
__all__ = ['imread', 'imsave']
import numpy as np
from six import string_types
from PIL import Image
from ...util import img_as_ubyte, img_as_uint
def imread(fname, dtype=None, img_num=None, **kwargs):
"""Load an image from file.
Parameters
----------
fname : str or file
File name or file-like-object.
dtype : numpy dtype object or string specifier
Specifies data type of array elements.
img_num : int, optional
Specifies which image to read in a file with multiple images
(zero-indexed).
kwargs : keyword pairs, optional
Addition keyword arguments to pass through.
Notes
-----
Files are read using the Python Imaging Libary.
See PIL docs [1]_ for a list of supported formats.
References
----------
.. [1] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html
"""
if isinstance(fname, string_types):
with open(fname, 'rb') as f:
im = Image.open(f)
return pil_to_ndarray(im, dtype=dtype, img_num=img_num)
else:
im = Image.open(fname)
return pil_to_ndarray(im, dtype=dtype, img_num=img_num)
def pil_to_ndarray(im, dtype=None, img_num=None):
"""Import a PIL Image object to an ndarray, in memory.
Parameters
----------
Refer to ``imread``.
"""
try:
# this will raise an IOError if the file is not readable
im.getdata()[0]
except IOError as e:
site = "http://pillow.readthedocs.org/en/latest/installation.html#external-libraries"
pillow_error_message = str(e)
error_message = ('Could not load "%s" \n'
'Reason: "%s"\n'
'Please see documentation at: %s'
% (im.filename, pillow_error_message, site))
raise ValueError(error_message)
frames = []
grayscale = None
i = 0
while 1:
try:
im.seek(i)
except EOFError:
break
frame = im
if img_num is not None and img_num != i:
im.getdata()[0]
i += 1
continue
if im.format == 'PNG' and im.mode == 'I' and dtype is None:
dtype = 'uint16'
if im.mode == 'P':
if grayscale is None:
grayscale = _palette_is_grayscale(im)
if grayscale:
frame = im.convert('L')
else:
if im.format == 'PNG' and 'transparency' in im.info:
frame = im.convert('RGBA')
else:
frame = im.convert('RGB')
elif im.mode == '1':
frame = im.convert('L')
elif 'A' in im.mode:
frame = im.convert('RGBA')
elif im.mode == 'CMYK':
frame = im.convert('RGB')
if im.mode.startswith('I;16'):
shape = im.size
dtype = '>u2' if im.mode.endswith('B') else '<u2'
if 'S' in im.mode:
dtype = dtype.replace('u', 'i')
frame = np.fromstring(frame.tobytes(), dtype)
frame.shape = shape[::-1]
else:
frame = np.array(frame, dtype=dtype)
frames.append(frame)
i += 1
if img_num is not None:
break
if hasattr(im, 'fp') and im.fp:
im.fp.close()
if img_num is None and len(frames) > 1:
return np.array(frames)
elif frames:
return frames[0]
elif img_num:
raise IndexError('Could not find image #%s' % img_num)
def _palette_is_grayscale(pil_image):
"""Return True if PIL image in palette mode is grayscale.
Parameters
----------
pil_image : PIL image
PIL Image that is in Palette mode.
Returns
-------
is_grayscale : bool
True if all colors in image palette are gray.
"""
assert pil_image.mode == 'P'
# get palette as an array with R, G, B columns
palette = np.asarray(pil_image.getpalette()).reshape((256, 3))
# Not all palette colors are used; unused colors have junk values.
start, stop = pil_image.getextrema()
valid_palette = palette[start:stop]
# Image is grayscale if channel differences (R - G and G - B)
# are all zero.
return np.allclose(np.diff(valid_palette), 0)
def ndarray_to_pil(arr, format_str=None):
"""Export an ndarray to a PIL object.
Parameters
----------
Refer to ``imsave``.
"""
if arr.ndim == 3:
arr = img_as_ubyte(arr)
mode = {3: 'RGB', 4: 'RGBA'}[arr.shape[2]]
elif format_str in ['png', 'PNG']:
mode = 'I;16'
mode_base = 'I'
if arr.dtype.kind == 'f':
arr = img_as_uint(arr)
elif arr.max() < 256 and arr.min() >= 0:
arr = arr.astype(np.uint8)
mode = mode_base = 'L'
else:
arr = img_as_uint(arr)
else:
arr = img_as_ubyte(arr)
mode = 'L'
mode_base = 'L'
try:
array_buffer = arr.tobytes()
except AttributeError:
array_buffer = arr.tostring() # Numpy < 1.9
if arr.ndim == 2:
im = Image.new(mode_base, arr.T.shape)
try:
im.frombytes(array_buffer, 'raw', mode)
except AttributeError:
im.fromstring(array_buffer, 'raw', mode) # PIL 1.1.7
else:
image_shape = (arr.shape[1], arr.shape[0])
try:
im = Image.frombytes(mode, image_shape, array_buffer)
except AttributeError:
im = Image.fromstring(mode, image_shape, array_buffer) # PIL 1.1.7
return im
def imsave(fname, arr, format_str=None, **kwargs):
"""Save an image to disk.
Parameters
----------
fname : str or file-like object
Name of destination file.
arr : ndarray of uint8 or float
Array (image) to save. Arrays of data-type uint8 should have
values in [0, 255], whereas floating-point arrays must be
in [0, 1].
format_str: str
Format to save as, this is defaulted to PNG if using a file-like
object; this will be derived from the extension if fname is a string
kwargs: dict
Keyword arguments to the Pillow save function (or tifffile save
function, for Tiff files). These are format dependent. For example,
Pillow's JPEG save function supports an integer ``quality`` argument
with values in [1, 95], while TIFFFile supports a ``compress``
integer argument with values in [0, 9].
Notes
-----
Use the Python Imaging Libary.
See PIL docs [1]_ for a list of other supported formats.
All images besides single channel PNGs are converted using `img_as_uint8`.
Single Channel PNGs have the following behavior:
- Integer values in [0, 255] and Boolean types -> img_as_uint8
- Floating point and other integers -> img_as_uint16
References
----------
.. [1] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html
"""
# default to PNG if file-like object
if not isinstance(fname, string_types) and format_str is None:
format_str = "PNG"
# Check for png in filename
if (isinstance(fname, string_types)
and fname.lower().endswith(".png")):
format_str = "PNG"
arr = np.asanyarray(arr)
if arr.dtype.kind == 'b':
arr = arr.astype(np.uint8)
if arr.ndim not in (2, 3):
raise ValueError("Invalid shape for image array: %s" % arr.shape)
if arr.ndim == 3:
if arr.shape[2] not in (3, 4):
raise ValueError("Invalid number of channels in image array.")
img = ndarray_to_pil(arr, format_str=format_str)
img.save(fname, format=format_str, **kwargs)
|
5444629de85d57f828201222c2eb016f75d7d6b2
|
88ae8695987ada722184307301e221e1ba3cc2fa
|
/third_party/webrtc/tools_webrtc/ios/generate_modulemap.py
|
1b61b8e3d120dd56031d2c92098b491b09983283
|
[
"BSD-3-Clause",
"LicenseRef-scancode-google-patent-license-webrtc",
"LicenseRef-scancode-google-patent-license-webm",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later"
] |
permissive
|
iridium-browser/iridium-browser
|
71d9c5ff76e014e6900b825f67389ab0ccd01329
|
5ee297f53dc7f8e70183031cff62f37b0f19d25f
|
refs/heads/master
| 2023-08-03T16:44:16.844552
| 2023-07-20T15:17:00
| 2023-07-23T16:09:30
| 220,016,632
| 341
| 40
|
BSD-3-Clause
| 2021-08-13T13:54:45
| 2019-11-06T14:32:31
| null |
UTF-8
|
Python
| false
| false
| 1,135
|
py
|
generate_modulemap.py
|
#!/usr/bin/env vpython3
# Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import argparse
import sys
def GenerateModulemap():
parser = argparse.ArgumentParser(description='Generate modulemap')
parser.add_argument("-o", "--out", type=str, help="Output file.")
parser.add_argument("-n", "--name", type=str, help="Name of binary.")
args = parser.parse_args()
with open(args.out, "w") as outfile:
module_template = 'framework module %s {\n' \
' umbrella header "%s.h"\n' \
'\n' \
' export *\n' \
' module * { export * }\n' \
'}\n' % (args.name, args.name)
outfile.write(module_template)
return 0
if __name__ == '__main__':
sys.exit(GenerateModulemap())
|
5fa280eb219f0e2d5da7e36bfd2205c79a186586
|
77fee94c58cd5b6305eef2f13d74b488db428c59
|
/litex/soc/cores/cpu/blackparrot/core.py
|
0f03ccdd7cb844ec8f2a41f02fc8ba226dd3c660
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
enjoy-digital/litex
|
de5919d649c1b884c47a5e0364c2a9a584ebd614
|
405296b7fd99764af21fffd94afa5075c22affa8
|
refs/heads/master
| 2023-08-31T23:52:33.895792
| 2023-08-31T17:34:55
| 2023-08-31T17:36:21
| 45,734,719
| 2,351
| 524
|
NOASSERTION
| 2023-09-14T21:26:26
| 2015-11-07T12:02:12
|
C
|
UTF-8
|
Python
| false
| false
| 6,788
|
py
|
core.py
|
# BlackParrot Chip core support for the LiteX SoC.
#
# Authors: Sadullah Canakci & Cansu Demirkiran <{scanakci,cansu}@bu.edu>
# Copyright (c) 2019, Boston University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
from shutil import copyfile
from migen import *
from litex.gen import *
from litex import get_data_mod
from litex.soc.interconnect import axi
from litex.soc.interconnect import wishbone
from litex.soc.cores.cpu import CPU, CPU_GCC_TRIPLE_RISCV64
# Variants -----------------------------------------------------------------------------------------
CPU_VARIANTS = ["standard", "sim"]
# GCC Flags ----------------------------------------------------------------------------------------
GCC_FLAGS = {
"standard": "-march=rv64imafd -mabi=lp64d ",
"sim": "-march=rv64imafd -mabi=lp64d ",
}
# BlackParrot --------------------------------------------------------------------------------------
class BlackParrot(CPU):
category = "softcore"
family = "riscv"
name = "blackparrot"
human_name = "BlackParrotRV64[imafd]"
variants = CPU_VARIANTS
data_width = 64
endianness = "little"
gcc_triple = CPU_GCC_TRIPLE_RISCV64
linker_output_format = "elf64-littleriscv"
nop = "nop"
io_regions = {0x5000_0000: 0x1000_0000} # Origin, Length.
# Memory Mapping.
@property
def mem_map(self):
# Keep the lower 128MBs for SoC IOs auto-allocation.
return {
"csr" : 0x5800_0000,
"rom" : 0x7000_0000,
"sram" : 0x7100_0000,
"main_ram" : 0x8000_0000,
}
# GCC Flags.
@property
def gcc_flags(self):
flags = "-mno-save-restore "
flags += GCC_FLAGS[self.variant]
flags += "-D__blackparrot__ "
flags += "-mcmodel=medany"
return flags
def __init__(self, platform, variant="standard"):
self.platform = platform
self.variant = variant
self.reset = Signal()
self.idbus = idbus = wishbone.Interface(data_width=64, adr_width=37)
self.periph_buses = [idbus]
self.memory_buses = []
self.cpu_params = dict(
# Clk / Rst.
i_clk_i = ClockSignal("sys"),
i_reset_i = ResetSignal("sys") | self.reset,
# Wishbone (I/D).
i_wbm_dat_i = idbus.dat_r,
o_wbm_dat_o = idbus.dat_w,
i_wbm_ack_i = idbus.ack,
i_wbm_err_i = idbus.err,
i_wbm_rty_i = Open(),
o_wbm_adr_o = idbus.adr,
o_wbm_stb_o = idbus.stb,
o_wbm_cyc_o = idbus.cyc,
o_wbm_sel_o = idbus.sel,
o_wbm_we_o = idbus.we,
o_wbm_cti_o = idbus.cti,
o_wbm_bte_o = idbus.bte,
)
# Copy config loader to /tmp
vdir = get_data_mod("cpu", "blackparrot").data_location
blackparrot = os.path.join(vdir, "black-parrot")
bp_litex = os.path.join(vdir, "bp_litex")
copyfile(os.path.join(bp_litex, "cce_ucode.mem"), "/tmp/cce_ucode.mem")
# Set environmental variables
os.environ["BP"] = blackparrot
os.environ["BP_LITEX_DIR"] = bp_litex
os.environ["BP_COMMON_DIR"] = os.path.join(blackparrot, "bp_common")
os.environ["BP_FE_DIR"] = os.path.join(blackparrot, "bp_fe")
os.environ["BP_BE_DIR"] = os.path.join(blackparrot, "bp_be")
os.environ["BP_ME_DIR"] = os.path.join(blackparrot, "bp_me")
os.environ["BP_TOP_DIR"] = os.path.join(blackparrot, "bp_top")
external = os.path.join(blackparrot, "external")
os.environ["BP_EXTERNAL_DIR"] = external
os.environ["BASEJUMP_STL_DIR"] = os.path.join(external, "basejump_stl")
os.environ["HARDFLOAT_DIR"] = os.path.join(external, "HardFloat")
os.environ["LITEX_FPGA_DIR"] = os.path.join(bp_litex, "fpga")
os.environ["LITEX_SIMU_DIR"] = os.path.join(bp_litex, "simulation")
self.add_sources(platform, variant)
def set_reset_address(self, reset_address):
self.reset_address = reset_address
assert reset_address == 0x7000_0000, "cpu_reset_addr hardcoded to 7x00000000!"
@staticmethod
def add_sources(platform, variant="standard"):
vdir = get_data_mod("cpu", "blackparrot").data_location
bp_litex = os.path.join(vdir, "bp_litex")
filename = os.path.join(bp_litex, {
"standard": "flist.fpga",
"sim" : "flist.verilator"
}[variant])
with open(filename) as openfileobject:
for line in openfileobject:
temp = line
if (temp[0] == '/' and temp[1] == '/'):
continue
elif ("+incdir+" in temp) :
s1 = line.find('$')
vdir = os.path.expandvars(line[s1:]).strip()
platform.add_verilog_include_path(vdir)
elif (temp[0]=='$') :
vdir = os.path.expandvars(line).strip()
platform.add_source(vdir, "systemverilog")
elif (temp[0] == '/'):
assert("No support for absolute path for now")
def do_finalize(self):
assert hasattr(self, "reset_address")
self.specials += Instance("ExampleBlackParrotSystem", **self.cpu_params)
|
9cca6448a24bad5ccf51ae66c1c871aa7ecbd70b
|
538348577d4089f21a110e90a935fc11e171230b
|
/examples/custom_problem.py
|
c9f5ac8ff83ac20f006768ffe711b88c48700ae6
|
[
"MIT"
] |
permissive
|
NiaOrg/NiaPy
|
94fd873c5427f5fc23ce9402ad8633279fc9c6e4
|
70562b0c840bb0a7c45941d35e22244cea12f697
|
refs/heads/master
| 2023-07-07T06:09:42.210076
| 2023-06-27T09:22:17
| 2023-06-27T09:22:17
| 120,444,947
| 254
| 150
|
MIT
| 2023-04-24T11:54:24
| 2018-02-06T11:25:36
|
Python
|
UTF-8
|
Python
| false
| false
| 827
|
py
|
custom_problem.py
|
# encoding=utf8
# This is temporary fix to import module from parent folder
# It will be removed when package is published on PyPI
import sys
sys.path.append('../')
import numpy as np
from niapy.task import Task
from niapy.problems import Problem
from niapy.algorithms.basic import ParticleSwarmAlgorithm
class MyProblem(Problem):
def __init__(self, dimension, lower=-10, upper=10, *args, **kwargs):
super().__init__(dimension, lower, upper, *args, **kwargs)
def _evaluate(self, x):
return np.sum(x ** 2)
# we will run Particle Swarm Algorithm on custom problem
task = Task(problem=MyProblem(dimension=10), max_iters=1000)
algo = ParticleSwarmAlgorithm(population_size=40, c1=2.0, c2=2.0, w=0.7, min_velocity=-4, max_velocity=4)
best = algo.run(task=task)
print('%s -> %s ' % (best[0], best[1]))
|
032fedda5f8a22a80e07af24ed37daf4d579cdcb
|
010279e2ba272d09e9d2c4e903722e5faba2cf7a
|
/contrib/python/joblib/joblib/externals/loky/cloudpickle_wrapper.py
|
099debcb711c6695f0570861293b198047bd6093
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
catboost/catboost
|
854c1a1f439a96f1ae6b48e16644be20aa04dba2
|
f5042e35b945aded77b23470ead62d7eacefde92
|
refs/heads/master
| 2023-09-01T12:14:14.174108
| 2023-09-01T10:01:01
| 2023-09-01T10:22:12
| 97,556,265
| 8,012
| 1,425
|
Apache-2.0
| 2023-09-11T03:32:32
| 2017-07-18T05:29:04
|
Python
|
UTF-8
|
Python
| false
| false
| 3,608
|
py
|
cloudpickle_wrapper.py
|
import inspect
from functools import partial
from joblib.externals.cloudpickle import dumps, loads
WRAP_CACHE = {}
class CloudpickledObjectWrapper:
def __init__(self, obj, keep_wrapper=False):
self._obj = obj
self._keep_wrapper = keep_wrapper
def __reduce__(self):
_pickled_object = dumps(self._obj)
if not self._keep_wrapper:
return loads, (_pickled_object,)
return _reconstruct_wrapper, (_pickled_object, self._keep_wrapper)
def __getattr__(self, attr):
# Ensure that the wrapped object can be used seemlessly as the
# previous object.
if attr not in ["_obj", "_keep_wrapper"]:
return getattr(self._obj, attr)
return getattr(self, attr)
# Make sure the wrapped object conserves the callable property
class CallableObjectWrapper(CloudpickledObjectWrapper):
def __call__(self, *args, **kwargs):
return self._obj(*args, **kwargs)
def _wrap_non_picklable_objects(obj, keep_wrapper):
if callable(obj):
return CallableObjectWrapper(obj, keep_wrapper=keep_wrapper)
return CloudpickledObjectWrapper(obj, keep_wrapper=keep_wrapper)
def _reconstruct_wrapper(_pickled_object, keep_wrapper):
obj = loads(_pickled_object)
return _wrap_non_picklable_objects(obj, keep_wrapper)
def _wrap_objects_when_needed(obj):
# Function to introspect an object and decide if it should be wrapped or
# not.
need_wrap = "__main__" in getattr(obj, "__module__", "")
if isinstance(obj, partial):
return partial(
_wrap_objects_when_needed(obj.func),
*[_wrap_objects_when_needed(a) for a in obj.args],
**{
k: _wrap_objects_when_needed(v)
for k, v in obj.keywords.items()
}
)
if callable(obj):
# Need wrap if the object is a function defined in a local scope of
# another function.
func_code = getattr(obj, "__code__", "")
need_wrap |= getattr(func_code, "co_flags", 0) & inspect.CO_NESTED
# Need wrap if the obj is a lambda expression
func_name = getattr(obj, "__name__", "")
need_wrap |= "<lambda>" in func_name
if not need_wrap:
return obj
wrapped_obj = WRAP_CACHE.get(obj)
if wrapped_obj is None:
wrapped_obj = _wrap_non_picklable_objects(obj, keep_wrapper=False)
WRAP_CACHE[obj] = wrapped_obj
return wrapped_obj
def wrap_non_picklable_objects(obj, keep_wrapper=True):
"""Wrapper for non-picklable object to use cloudpickle to serialize them.
Note that this wrapper tends to slow down the serialization process as it
is done with cloudpickle which is typically slower compared to pickle. The
proper way to solve serialization issues is to avoid defining functions and
objects in the main scripts and to implement __reduce__ functions for
complex classes.
"""
# If obj is a class, create a CloudpickledClassWrapper which instantiates
# the object internally and wrap it directly in a CloudpickledObjectWrapper
if inspect.isclass(obj):
class CloudpickledClassWrapper(CloudpickledObjectWrapper):
def __init__(self, *args, **kwargs):
self._obj = obj(*args, **kwargs)
self._keep_wrapper = keep_wrapper
CloudpickledClassWrapper.__name__ = obj.__name__
return CloudpickledClassWrapper
# If obj is an instance of a class, just wrap it in a regular
# CloudpickledObjectWrapper
return _wrap_non_picklable_objects(obj, keep_wrapper=keep_wrapper)
|
1b13ba7f60fd4de9a05e21c8ccfc99ebb76b6f4a
|
ed83a8a01473055b6563f0a1122738442f69be0a
|
/scratch/__init__.py
|
c03f6cd803c226237ed434d9bcc5cbd4c9b53469
|
[] |
no_license
|
cs50/problems
|
528a6a09e533d7dabaebd5d67bd8d7052d35681b
|
96f8dd1c6b8202dcc67f36f06471a178acc4237f
|
refs/heads/2023/x
| 2023-09-03T20:10:55.654530
| 2023-08-30T21:17:54
| 2023-08-30T21:17:54
| 137,074,067
| 128
| 221
| null | 2023-09-04T17:39:14
| 2018-06-12T13:15:17
|
Python
|
UTF-8
|
Python
| false
| false
| 3,129
|
py
|
__init__.py
|
import json
import os
import shlex
import itertools
import check50
@check50.check()
def valid():
"""project exists and is valid Scratch program"""
# Make sure there is only one .sb3 file.
filenames = [filename for filename in os.listdir() if filename.endswith(".sb3")]
if len(filenames) > 1:
raise check50.Failure("more than one .sb3 file found. Make sure there's only one!")
elif not filenames:
raise check50.Failure("no .sb3 file found")
filename = filenames[0]
# Ensure that unzipped .sb2 file contains .json file.
if check50.run(f"unzip {shlex.quote(filename)}").exit():
raise check50.Failure("invalid .sb3 file")
check50.exists("project.json")
with open("project.json") as f:
project = json.load(f)
return project["targets"]
@check50.check(valid)
def two_sprites(project):
"""project contains at least two sprites"""
num_sprites = sum(not target["isStage"] for target in project)
if num_sprites < 2:
raise check50.Failure(f"only {num_sprites} sprite{'' if num_sprites == 1 else 's'} found, 2 required")
@check50.check(valid)
def non_cat(project):
"""project contains a non-cat sprite"""
cat_sprite_ids = {"bcf454acf82e4504149f7ffe07081dbc",
"0fb9be3e8397c983338cb71dc84d0b25"}
if all(target["isStage"] or {costume["assetId"] for costume in target["costumes"]} == cat_sprite_ids for target in project):
raise check50.Failure("no non-cat sprite found")
@check50.check(valid)
def three_blocks(project):
"""project contains at least three scripts"""
num_blocks = sum(len(target["blocks"]) for target in project)
if num_blocks < 3:
raise check50.Failure(f"only {num_blocks} script{'' if num_blocks == 1 else 's'} found, 3 required")
@check50.check(valid)
def uses_condition(project):
"""project uses at least one condition"""
if not contains_blocks(project, ["control_repeat", "control_if_else", "control_if", "motion_ifonedgebounce"]):
raise check50.Failure("no conditions found, 1 required")
@check50.check(valid)
def uses_loop(project):
"""project uses at least one loop"""
# Search project scripts for a repeat, repeat until, or forever block.
if not contains_blocks(project, ["control_forever", "control_repeat_until", "control_repeat"]):
raise check50.Failure("no loops found, 1 required")
@check50.check(valid)
def uses_variable(project):
"""project uses at least one variable"""
if not any(target["variables"] for target in project):
raise check50.Failure("no variables found, 1 required")
@check50.check(valid)
def uses_custom_block(project):
"""project uses at least one custom block"""
if "custom_block" not in json.dumps(project):
raise check50.Failure("no custom blocks found, 1 required")
def contains_blocks(project, opcodes):
"""Return whether project contains any blocks with their names in opcodes"""
return any(any((isinstance(block, dict) and block["opcode"] in opcodes) for block in target["blocks"].values())
for target in project)
|
bb38f611c4ab6c4b125be708bd5f3aa524878209
|
1517c5b1d372303e03e167bfb7340c76a5a28110
|
/src/django_otp/plugins/otp_email/migrations/0002_sidechanneldevice_email.py
|
4b036abd4ce28f10d6291e08e946f128d41e6b96
|
[
"Unlicense"
] |
permissive
|
django-otp/django-otp
|
75ab08a5a19cf8668c2057f77343424e11fbb560
|
d65a039582509a08c56c35f905380fe3ff8507cb
|
refs/heads/master
| 2023-08-20T06:14:16.968288
| 2023-07-07T18:18:14
| 2023-07-07T18:18:14
| 203,846,576
| 460
| 104
|
Unlicense
| 2023-09-08T09:17:26
| 2019-08-22T17:57:17
|
Python
|
UTF-8
|
Python
| false
| false
| 792
|
py
|
0002_sidechanneldevice_email.py
|
# Generated by Django 3.0.2 on 2020-04-10 02:36
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('otp_email', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='emaildevice',
name='key',
),
migrations.AddField(
model_name='emaildevice',
name='token',
field=models.CharField(blank=True, max_length=16, null=True),
),
migrations.AddField(
model_name='emaildevice',
name='valid_until',
field=models.DateTimeField(default=django.utils.timezone.now, help_text='The timestamp of the moment of expiry of the saved token.'),
),
]
|
28e2ffe3f3013f3ff0e476304f777d73c6c489f3
|
79e904fb9835b8420df0547343eaeaa447684416
|
/kayak/util.py
|
bb79f56b8b2d542a7c764d14186d88811bb0247e
|
[
"MIT"
] |
permissive
|
HIPS/Kayak
|
20b05af5a74a755770f273050dcd05203ab45f3d
|
1a7d4baa849bbd5a6f6d0486136169899cf25523
|
refs/heads/master
| 2021-01-23T07:26:37.445813
| 2017-08-22T13:45:12
| 2017-08-22T13:45:12
| 21,425,991
| 256
| 41
| null | 2015-04-13T17:17:04
| 2014-07-02T13:50:59
|
Python
|
UTF-8
|
Python
| false
| false
| 2,292
|
py
|
util.py
|
# Authors: Harvard Intelligent Probabilistic Systems (HIPS) Group
# http://hips.seas.harvard.edu
# Ryan Adams, David Duvenaud, Scott Linderman,
# Dougal Maclaurin, Jasper Snoek, and others
# Copyright 2014, The President and Fellows of Harvard University
# Distributed under an MIT license. See license.txt file.
import numpy as np
import numpy.random as npr
import itertools as it
from . import EPSILON
from root_nodes import Parameter
def checkgrad(variable, output, epsilon=1e-4, verbose=False):
if not isinstance(variable, Parameter):
raise Exception("Cannot evaluate gradient in terms of non-Parameter type %s", (type(variable)))
# Need to make sure all evals have the same random number generation.
rng_seed = 1
value = output.value
an_grad = output.grad(variable)
fd_grad = np.zeros(variable.shape)
base_value = variable.value.copy()
for in_dims in it.product(*map(range, variable.shape)):
small_array = np.zeros(variable.shape)
small_array[in_dims] = epsilon
variable.value = base_value - 2*small_array
fn_l2 = output.value
variable.value = base_value - small_array
fn_l1 = output.value
variable.value = base_value + small_array
fn_r1 = output.value
variable.value = base_value + 2*small_array
fn_r2 = output.value
fd_grad[in_dims] = ((fn_l2 - fn_r2)/12. + (- fn_l1 + fn_r1)*2./3.) /epsilon # 2nd order method
# fd_grad[in_dims] = (- fn_l1/2. + fn_r1/2.) /epsilon # 1st order method
if verbose:
print np.abs((an_grad[in_dims] - fd_grad[in_dims])/(fd_grad[in_dims]+EPSILON)), an_grad[in_dims], fd_grad[in_dims]
variable.value = base_value
print "Mean finite difference", np.mean(np.abs((an_grad - fd_grad)/(fd_grad+EPSILON)))
return np.mean(np.abs((an_grad - fd_grad)/(fd_grad+EPSILON)))
def logsumexp(X, axis=None):
maxes = np.max(X, axis=axis, keepdims=True)
return np.log(np.sum(np.exp(X - maxes), axis=axis, keepdims=True)) + maxes
def onehot(T, num_labels=None):
if num_labels is None:
num_labels = np.max(T)+1
labels = np.zeros((T.shape[0], num_labels), dtype=bool)
labels[np.arange(T.shape[0], dtype=int), T] = 1
return labels
|
d79678d66e5fa7eee47056c528643e7ea39a967e
|
0760fb4901a75766921a205b55686d6d6f049b30
|
/python/ray/autoscaler/_private/vsphere/utils.py
|
52a8454d0b83d313cb181528b8d3fc5c26dc0f8d
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
ray-project/ray
|
a4bb6940b08b59a61ef0b8e755a52d8563a2f867
|
edba68c3e7cf255d1d6479329f305adb7fa4c3ed
|
refs/heads/master
| 2023-08-31T03:36:48.164405
| 2023-08-31T03:20:38
| 2023-08-31T03:20:38
| 71,932,349
| 29,482
| 5,669
|
Apache-2.0
| 2023-09-14T21:48:14
| 2016-10-25T19:38:30
|
Python
|
UTF-8
|
Python
| false
| false
| 2,567
|
py
|
utils.py
|
import atexit
import ssl
from enum import Enum
import requests
from pyVim.connect import Disconnect, SmartConnect
from vmware.vapi.vsphere.client import create_vsphere_client
class Constants:
TYPE_OF_RESOURCE = "VirtualMachine"
NODE_CATEGORY = "ray"
RAY_HEAD_FROZEN_VM_TAG = "ray-frozen-vm"
VSPHERE_NODE_STATUS = "vsphere-node-status"
CREATING_TAG_TIMEOUT = 120
class VsphereNodeStatus(Enum):
# Enum for SDK clients
CREATING = "creating"
CREATED = "created"
class VmwSdkClient:
class ClientType(Enum):
# Enum for SDK clients
PYVMOMI_SDK = "pyvmomi"
AUTOMATION_SDK = "automation_sdk"
class SessionType(Enum):
VERIFIED = "verified"
UNVERIFIED = "unverified"
def __init__(
self, server, user, password, session_type: SessionType, client_type: ClientType
):
self.server = server
self.user = user
self.password = password
self.session_type = session_type
self.client_type = client_type
def get_client(self):
if self.client_type == self.ClientType.PYVMOMI_SDK:
context_obj = None
if self.session_type == self.SessionType.UNVERIFIED:
context_obj = ssl._create_unverified_context()
smart_connect_obj = SmartConnect(
host=self.server,
user=self.user,
pwd=self.password,
sslContext=context_obj,
)
atexit.register(Disconnect, smart_connect_obj)
return smart_connect_obj.content
else:
session = None
if self.session_type == self.SessionType.UNVERIFIED:
session = self.get_unverified_session()
return create_vsphere_client(
server=self.server,
username=self.user,
password=self.password,
session=session,
)
def get_unverified_session(self):
"""
vCenter provisioned internally have SSH certificates
expired so we use unverified session. Find out what
could be done for production.
Get a requests session with cert verification disabled.
Also disable the insecure warnings message.
Note this is not recommended in production code.
@return: a requests session with verification disabled.
"""
session = requests.session()
session.verify = False
requests.packages.urllib3.disable_warnings()
return session
|
edd0d7ab7e2cd14cefde17279cce691153fac8fb
|
98f1a0bfa5b20a0b81e9e555d76e706c62d949c9
|
/examples/pytorch/sign/dataset.py
|
8a5c92778d3d8196624affc2a876ea2cde02c7f4
|
[
"Apache-2.0"
] |
permissive
|
dmlc/dgl
|
3a8fbca3a7f0e9adf6e69679ad62948df48dfc42
|
bbc8ff6261f2e0d2b5982e992b6fbe545e2a4aa1
|
refs/heads/master
| 2023-08-31T16:33:21.139163
| 2023-08-31T07:49:22
| 2023-08-31T07:49:22
| 130,375,797
| 12,631
| 3,482
|
Apache-2.0
| 2023-09-14T15:48:24
| 2018-04-20T14:49:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,445
|
py
|
dataset.py
|
import dgl
import numpy as np
import torch
def load_dataset(name):
dataset = name.lower()
if dataset == "amazon":
from ogb.nodeproppred.dataset_dgl import DglNodePropPredDataset
dataset = DglNodePropPredDataset(name="ogbn-products")
splitted_idx = dataset.get_idx_split()
train_nid = splitted_idx["train"]
val_nid = splitted_idx["valid"]
test_nid = splitted_idx["test"]
g, labels = dataset[0]
n_classes = int(labels.max() - labels.min() + 1)
g.ndata["label"] = labels.squeeze()
g.ndata["feat"] = g.ndata["feat"].float()
elif dataset in ["reddit", "cora"]:
if dataset == "reddit":
from dgl.data import RedditDataset
data = RedditDataset(self_loop=True)
g = data[0]
else:
from dgl.data import CitationGraphDataset
data = CitationGraphDataset("cora")
g = data[0]
n_classes = data.num_labels
train_mask = g.ndata["train_mask"]
val_mask = g.ndata["val_mask"]
test_mask = g.ndata["test_mask"]
train_nid = torch.LongTensor(train_mask.nonzero().squeeze())
val_nid = torch.LongTensor(val_mask.nonzero().squeeze())
test_nid = torch.LongTensor(test_mask.nonzero().squeeze())
else:
print("Dataset {} is not supported".format(name))
assert 0
return g, n_classes, train_nid, val_nid, test_nid
|
505f171fcdcd407de0f85f94cc23fbda2264b459
|
110044654f706e920380dad2779bb32a77f1f26f
|
/test/CPPSUFFIXES.py
|
9b8dd9089f404dfc329188f04fb3559adf72ed20
|
[
"MIT",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
SCons/scons
|
89327bb9635cee6e7cc59249edca9cd859d7d1ff
|
b2a7d7066a2b854460a334a5fe737ea389655e6e
|
refs/heads/master
| 2023-09-01T19:37:03.603772
| 2023-08-28T04:32:42
| 2023-08-28T04:32:42
| 104,670,160
| 1,827
| 342
|
MIT
| 2023-09-14T15:13:21
| 2017-09-24T19:23:46
|
Python
|
UTF-8
|
Python
| false
| false
| 3,869
|
py
|
CPPSUFFIXES.py
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test the ability to scan additional filesuffixes added to $CPPSUFFIXES.
"""
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
test.write('mycc.py', r"""
import sys
def do_file(outf, inf):
with open(inf, 'r') as ifp:
for line in ifp.readlines():
if line[:10] == '#include <':
do_file(outf, line[10:-2])
else:
outf.write(line)
with open(sys.argv[1], 'w') as outf:
for f in sys.argv[2:]:
do_file(outf, f)
sys.exit(0)
""")
test.write('SConstruct', """
DefaultEnvironment(tools=[])
env = Environment(CPPPATH = ['.'],
CC = r'%(_python_)s mycc.py',
CCFLAGS = [],
CCCOM = '$CC $TARGET $SOURCES',
OBJSUFFIX = '.o')
env.Append(CPPSUFFIXES = ['.x'])
env.Object(target = 'test1', source = 'test1.c')
env.InstallAs('test1_c', 'test1.c')
env.InstallAs('test1_h', 'test1.h')
env.InstallAs('test1_x', 'test1.x')
""" % locals())
test.write('test1.c', """\
test1.c 1
#include <test1.h>
#include <test1.x>
""")
test.write('test1.h', """\
test1.h 1
#include <foo.h>
""")
test.write('test1.x', """\
test1.x 1
#include <foo.h>
""")
test.write('foo.h', "foo.h 1\n")
expect = test.wrap_stdout("""\
%(_python_)s mycc.py test1.o test1.c
Install file: "test1.c" as "test1_c"
Install file: "test1.h" as "test1_h"
Install file: "test1.x" as "test1_x"
""" % locals())
test.run(arguments='.', stdout=expect)
test.must_match('test1.o', """\
test1.c 1
test1.h 1
foo.h 1
test1.x 1
foo.h 1
""", mode='r')
test.up_to_date(arguments='.')
test.write('foo.h', "foo.h 2\n")
expect = test.wrap_stdout("""\
%(_python_)s mycc.py test1.o test1.c
""" % locals())
test.run(arguments='.', stdout=expect)
test.must_match('test1.o', """\
test1.c 1
test1.h 1
foo.h 2
test1.x 1
foo.h 2
""", mode='r')
test.up_to_date(arguments='.')
test.write('test1.x', """\
test1.x 2
#include <foo.h>
""")
expect = test.wrap_stdout("""\
%(_python_)s mycc.py test1.o test1.c
Install file: "test1.x" as "test1_x"
""" % locals())
test.run(arguments='.', stdout=expect)
test.must_match('test1.o', """\
test1.c 1
test1.h 1
foo.h 2
test1.x 2
foo.h 2
""", mode='r')
test.up_to_date(arguments='.')
test.write('test1.h', """\
test1.h 2
#include <foo.h>
""")
expect = test.wrap_stdout("""\
%(_python_)s mycc.py test1.o test1.c
Install file: "test1.h" as "test1_h"
""" % locals())
test.run(arguments='.', stdout=expect)
test.must_match('test1.o', """\
test1.c 1
test1.h 2
foo.h 2
test1.x 2
foo.h 2
""", mode='r')
test.up_to_date(arguments='.')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.