blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
986592bccc62ff162ed29115943468c6dea37481
|
d6fa794ed25855fc4aeb0be95de564b1606a200b
|
/misc/experience.py
|
970f4b41aed599247a57f902538a776f14ac7903
|
[
"Apache-2.0"
] |
permissive
|
jacobandreas/psketch
|
00467106c13601736eb305dd52114bf1612ceacc
|
f9cfca81ce7c489c45e853adcf5ece82466b65e3
|
refs/heads/master
| 2021-07-14T12:45:04.123074
| 2021-07-07T19:11:18
| 2021-07-07T19:11:18
| 67,570,341
| 104
| 43
| null | 2021-07-07T19:11:19
| 2016-09-07T03:56:30
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 110
|
py
|
experience.py
|
from collections import namedtuple
Transition = namedtuple("Transition", ["s1", "m1", "a", "s2", "m2", "r"])
|
225d31d2303c5c3c07bbe15f7f9d1515aa279fb3
|
94a9151461546883c43192527d12a36126b1003b
|
/test/test_languages/testJavaScript.py
|
5bef03f5f22ea543eb323e4749d6c08fc1d56bb4
|
[
"MIT"
] |
permissive
|
terryyin/lizard
|
b4a0805ab05eea7d1a25be27155d87113794c2e6
|
3f4c17fdfd625d7c6860379a77af56813e901026
|
refs/heads/master
| 2023-09-01T15:35:23.888140
| 2023-08-18T02:10:09
| 2023-08-18T02:10:09
| 4,738,329
| 1,595
| 248
|
NOASSERTION
| 2023-08-18T02:10:11
| 2012-06-21T11:31:46
|
Python
|
UTF-8
|
Python
| false
| false
| 3,717
|
py
|
testJavaScript.py
|
import unittest
from lizard import analyze_file, FileAnalyzer, get_extensions
from lizard_languages import JavaScriptReader
def get_js_function_list(source_code):
return analyze_file.analyze_source_code("a.js", source_code).function_list
class Test_tokenizing_JavaScript(unittest.TestCase):
def check_tokens(self, expect, source):
tokens = list(JavaScriptReader.generate_tokens(source))
self.assertEqual(expect, tokens)
def test_dollar_var(self):
self.check_tokens(['$a'], '$a')
def test_tokenizing_javascript_regular_expression(self):
self.check_tokens(['/ab/'], '/ab/')
self.check_tokens([r'/\//'], r'/\//')
self.check_tokens([r'/a/igm'], r'/a/igm')
def test_should_not_confuse_division_as_regx(self):
self.check_tokens(['a','/','b',',','a','/','b'], 'a/b,a/b')
self.check_tokens(['3453',' ','/','b',',','a','/','b'], '3453 /b,a/b')
def test_tokenizing_javascript_regular_expression1(self):
self.check_tokens(['a', '=', '/ab/'], 'a=/ab/')
def test_tokenizing_javascript_comments(self):
self.check_tokens(['/**a/*/'], '''/**a/*/''')
def test_tokenizing_pattern(self):
self.check_tokens(['/\//'], r'''/\//''')
def test_tokenizing_javascript_multiple_line_string(self):
self.check_tokens(['"aaa\\\nbbb"'], '"aaa\\\nbbb"')
class Test_parser_for_JavaScript(unittest.TestCase):
def test_simple_function(self):
functions = get_js_function_list("function foo(){}")
self.assertEqual("foo", functions[0].name)
def test_simple_function_complexity(self):
functions = get_js_function_list("function foo(){m;if(a);}")
self.assertEqual(2, functions[0].cyclomatic_complexity)
def test_parameter_count(self):
functions = get_js_function_list("function foo(a, b){}")
self.assertEqual(2, functions[0].parameter_count)
def test_function_assigning_to_a_name(self):
functions = get_js_function_list("a = function (a, b){}")
self.assertEqual('a', functions[0].name)
def test_not_a_function_assigning_to_a_name(self):
functions = get_js_function_list("abc=3; function (a, b){}")
self.assertEqual('(anonymous)', functions[0].name)
def test_function_without_name_assign_to_field(self):
functions = get_js_function_list("a.b.c = function (a, b){}")
self.assertEqual('a.b.c', functions[0].name)
def test_function_in_a_object(self):
functions = get_js_function_list("var App={a:function(){};}")
self.assertEqual('a', functions[0].name)
def test_function_in_a_function(self):
functions = get_js_function_list("function a(){function b(){}}")
self.assertEqual('b', functions[0].name)
self.assertEqual('a', functions[1].name)
# test "<>" error match in "< b) {} } function b () { return (dispatch, getState) =>"
def test_function_in_arrow(self):
functions = get_js_function_list(
"function a () {f (a < b) {} } function b () { return (dispatch, getState) => {} }")
self.assertEqual('a', functions[0].name)
self.assertEqual('(anonymous)', functions[1].name)
self.assertEqual('b', functions[2].name)
# test long_name, fix "a x, y)" to "a (x, y)"
def test_function_long_name(self):
functions = get_js_function_list(
"function a (x, y) {if (a < b) {} } function b () { return (dispatch, getState) => {} }")
self.assertEqual('a ( x , y )', functions[0].long_name)
self.assertEqual('b ( )', functions[2].long_name)
def test_global(self):
functions = get_js_function_list("{}")
self.assertEqual(0, len(functions))
|
b8c5e5bd1b259e1257c1425c1308590af839cd24
|
3dc647cd07a7361ed401e40d2b7cce8c826c8f6c
|
/Lib/importlib/__init__.py
|
ce61883288aa350f877cd3b3caaeb6865f888bab
|
[
"CC-BY-4.0",
"MIT",
"Python-2.0"
] |
permissive
|
RustPython/RustPython
|
5ddce4a9848b9de8c041ffd2634f83c0105d3f39
|
b864e5da1f18897fc884180b7093df5aa170024f
|
refs/heads/main
| 2023-09-04T12:38:29.458699
| 2023-09-03T12:33:42
| 2023-09-03T12:33:42
| 135,201,145
| 15,815
| 1,302
|
MIT
| 2023-09-14T08:11:45
| 2018-05-28T19:27:01
|
Rust
|
UTF-8
|
Python
| false
| false
| 6,089
|
py
|
__init__.py
|
"""A pure Python implementation of import."""
__all__ = ['__import__', 'import_module', 'invalidate_caches', 'reload']
# Bootstrap help #####################################################
# Until bootstrapping is complete, DO NOT import any modules that attempt
# to import importlib._bootstrap (directly or indirectly). Since this
# partially initialised package would be present in sys.modules, those
# modules would get an uninitialised copy of the source version, instead
# of a fully initialised version (either the frozen one or the one
# initialised below if the frozen one is not available).
import _imp # Just the builtin component, NOT the full Python module
import sys
try:
import _frozen_importlib as _bootstrap
except ImportError:
from . import _bootstrap
_bootstrap._setup(sys, _imp)
else:
# importlib._bootstrap is the built-in import, ensure we don't create
# a second copy of the module.
_bootstrap.__name__ = 'importlib._bootstrap'
_bootstrap.__package__ = 'importlib'
try:
_bootstrap.__file__ = __file__.replace('__init__.py', '_bootstrap.py')
except NameError:
# __file__ is not guaranteed to be defined, e.g. if this code gets
# frozen by a tool like cx_Freeze.
pass
sys.modules['importlib._bootstrap'] = _bootstrap
try:
import _frozen_importlib_external as _bootstrap_external
except ImportError:
from . import _bootstrap_external
_bootstrap_external._set_bootstrap_module(_bootstrap)
_bootstrap._bootstrap_external = _bootstrap_external
else:
_bootstrap_external.__name__ = 'importlib._bootstrap_external'
_bootstrap_external.__package__ = 'importlib'
try:
_bootstrap_external.__file__ = __file__.replace('__init__.py', '_bootstrap_external.py')
except NameError:
# __file__ is not guaranteed to be defined, e.g. if this code gets
# frozen by a tool like cx_Freeze.
pass
sys.modules['importlib._bootstrap_external'] = _bootstrap_external
# To simplify imports in test code
_pack_uint32 = _bootstrap_external._pack_uint32
_unpack_uint32 = _bootstrap_external._unpack_uint32
# Fully bootstrapped at this point, import whatever you like, circular
# dependencies and startup overhead minimisation permitting :)
import warnings
# Public API #########################################################
from ._bootstrap import __import__
def invalidate_caches():
"""Call the invalidate_caches() method on all meta path finders stored in
sys.meta_path (where implemented)."""
for finder in sys.meta_path:
if hasattr(finder, 'invalidate_caches'):
finder.invalidate_caches()
def find_loader(name, path=None):
"""Return the loader for the specified module.
This is a backward-compatible wrapper around find_spec().
This function is deprecated in favor of importlib.util.find_spec().
"""
warnings.warn('Deprecated since Python 3.4 and slated for removal in '
'Python 3.12; use importlib.util.find_spec() instead',
DeprecationWarning, stacklevel=2)
try:
loader = sys.modules[name].__loader__
if loader is None:
raise ValueError('{}.__loader__ is None'.format(name))
else:
return loader
except KeyError:
pass
except AttributeError:
raise ValueError('{}.__loader__ is not set'.format(name)) from None
spec = _bootstrap._find_spec(name, path)
# We won't worry about malformed specs (missing attributes).
if spec is None:
return None
if spec.loader is None:
if spec.submodule_search_locations is None:
raise ImportError('spec for {} missing loader'.format(name),
name=name)
raise ImportError('namespace packages do not have loaders',
name=name)
return spec.loader
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
level = 0
if name.startswith('.'):
if not package:
msg = ("the 'package' argument is required to perform a relative "
"import for {!r}")
raise TypeError(msg.format(name))
for character in name:
if character != '.':
break
level += 1
return _bootstrap._gcd_import(name[level:], package, level)
_RELOADING = {}
def reload(module):
"""Reload the module and return it.
The module must have been successfully imported before.
"""
try:
name = module.__spec__.name
except AttributeError:
try:
name = module.__name__
except AttributeError:
raise TypeError("reload() argument must be a module")
if sys.modules.get(name) is not module:
msg = "module {} not in sys.modules"
raise ImportError(msg.format(name), name=name)
if name in _RELOADING:
return _RELOADING[name]
_RELOADING[name] = module
try:
parent_name = name.rpartition('.')[0]
if parent_name:
try:
parent = sys.modules[parent_name]
except KeyError:
msg = "parent {!r} not in sys.modules"
raise ImportError(msg.format(parent_name),
name=parent_name) from None
else:
pkgpath = parent.__path__
else:
pkgpath = None
target = module
spec = module.__spec__ = _bootstrap._find_spec(name, pkgpath, target)
if spec is None:
raise ModuleNotFoundError(f"spec not found for the module {name!r}", name=name)
_bootstrap._exec(spec, module)
# The module may have replaced itself in sys.modules!
return sys.modules[name]
finally:
try:
del _RELOADING[name]
except KeyError:
pass
|
4a80192acd3f5bd5785129fba96cae73e0f79d41
|
4caa087dcb95a6a7dbe8cc49fde383e9f2aa4426
|
/mmtrack/models/sot/siamrpn.py
|
86291c0795fc5c39c171a8e321ef86a6baecdf87
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmtracking
|
1e55c69cc1a264b3c9546c19332a38e9621430ed
|
e79491ec8f0b8c86fda947fbaaa824c66ab2a991
|
refs/heads/master
| 2023-09-01T15:41:04.322684
| 2023-04-25T13:25:18
| 2023-04-25T13:25:18
| 291,213,368
| 3,263
| 604
|
Apache-2.0
| 2023-08-26T04:05:00
| 2020-08-29T06:16:56
|
Python
|
UTF-8
|
Python
| false
| false
| 18,593
|
py
|
siamrpn.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import numpy as np
import torch
from addict import Dict
from mmdet.core.bbox import bbox_cxcywh_to_xyxy
from mmdet.models.builder import build_backbone, build_head, build_neck
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn.modules.conv import _ConvNd
from mmtrack.core.bbox import (bbox_cxcywh_to_x1y1wh, bbox_xyxy_to_x1y1wh,
calculate_region_overlap, quad2bbox)
from mmtrack.core.evaluation import bbox2region
from ..builder import MODELS
from .base import BaseSingleObjectTracker
@MODELS.register_module()
class SiamRPN(BaseSingleObjectTracker):
"""SiamRPN++: Evolution of Siamese Visual Tracking with Very Deep Networks.
This single object tracker is the implementation of `SiamRPN++
<https://arxiv.org/abs/1812.11703>`_.
"""
def __init__(self,
backbone,
neck=None,
head=None,
pretrains=None,
init_cfg=None,
frozen_modules=None,
train_cfg=None,
test_cfg=None):
super(SiamRPN, self).__init__(init_cfg)
if isinstance(pretrains, dict):
warnings.warn('DeprecationWarning: pretrains is deprecated, '
'please use "init_cfg" instead')
backbone_pretrain = pretrains.get('backbone', None)
if backbone_pretrain:
backbone.init_cfg = dict(
type='Pretrained', checkpoint=backbone_pretrain)
else:
backbone.init_cfg = None
self.backbone = build_backbone(backbone)
if neck is not None:
self.neck = build_neck(neck)
head = head.copy()
head.update(train_cfg=train_cfg.rpn, test_cfg=test_cfg.rpn)
self.head = build_head(head)
self.test_cfg = test_cfg
self.train_cfg = train_cfg
if frozen_modules is not None:
self.freeze_module(frozen_modules)
def init_weights(self):
"""Initialize the weights of modules in single object tracker."""
# We don't use the `init_weights()` function in BaseModule, since it
# doesn't support the initialization method from `reset_parameters()`
# in Pytorch.
if self.with_backbone:
self.backbone.init_weights()
if self.with_neck:
for m in self.neck.modules():
if isinstance(m, _ConvNd) or isinstance(m, _BatchNorm):
m.reset_parameters()
if self.with_head:
for m in self.head.modules():
if isinstance(m, _ConvNd) or isinstance(m, _BatchNorm):
m.reset_parameters()
def forward_template(self, z_img):
"""Extract the features of exemplar images.
Args:
z_img (Tensor): of shape (N, C, H, W) encoding input exemplar
images. Typically H and W equal to 127.
Returns:
tuple(Tensor): Multi level feature map of exemplar images.
"""
z_feat = self.backbone(z_img)
if self.with_neck:
z_feat = self.neck(z_feat)
z_feat_center = []
for i in range(len(z_feat)):
left = (z_feat[i].size(3) - self.test_cfg.center_size) // 2
right = left + self.test_cfg.center_size
z_feat_center.append(z_feat[i][:, :, left:right, left:right])
return tuple(z_feat_center)
def forward_search(self, x_img):
"""Extract the features of search images.
Args:
x_img (Tensor): of shape (N, C, H, W) encoding input search
images. Typically H and W equal to 255.
Returns:
tuple(Tensor): Multi level feature map of search images.
"""
x_feat = self.backbone(x_img)
if self.with_neck:
x_feat = self.neck(x_feat)
return x_feat
def get_cropped_img(self, img, center_xy, target_size, crop_size,
avg_channel):
"""Crop image.
Only used during testing.
This function mainly contains two steps:
1. Crop `img` based on center `center_xy` and size `crop_size`. If the
cropped image is out of boundary of `img`, use `avg_channel` to pad.
2. Resize the cropped image to `target_size`.
Args:
img (Tensor): of shape (1, C, H, W) encoding original input
image.
center_xy (Tensor): of shape (2, ) denoting the center point for
cropping image.
target_size (int): The output size of cropped image.
crop_size (Tensor): The size for cropping image.
avg_channel (Tensor): of shape (3, ) denoting the padding values.
Returns:
Tensor: of shape (1, C, target_size, target_size) encoding the
resized cropped image.
"""
N, C, H, W = img.shape
context_xmin = int(center_xy[0] - crop_size / 2)
context_xmax = int(center_xy[0] + crop_size / 2)
context_ymin = int(center_xy[1] - crop_size / 2)
context_ymax = int(center_xy[1] + crop_size / 2)
left_pad = max(0, -context_xmin)
top_pad = max(0, -context_ymin)
right_pad = max(0, context_xmax - W)
bottom_pad = max(0, context_ymax - H)
context_xmin += left_pad
context_xmax += left_pad
context_ymin += top_pad
context_ymax += top_pad
avg_channel = avg_channel[:, None, None]
if any([top_pad, bottom_pad, left_pad, right_pad]):
new_img = img.new_zeros(N, C, H + top_pad + bottom_pad,
W + left_pad + right_pad)
new_img[..., top_pad:top_pad + H, left_pad:left_pad + W] = img
if top_pad:
new_img[..., :top_pad, left_pad:left_pad + W] = avg_channel
if bottom_pad:
new_img[..., H + top_pad:, left_pad:left_pad + W] = avg_channel
if left_pad:
new_img[..., :left_pad] = avg_channel
if right_pad:
new_img[..., W + left_pad:] = avg_channel
crop_img = new_img[..., context_ymin:context_ymax + 1,
context_xmin:context_xmax + 1]
else:
crop_img = img[..., context_ymin:context_ymax + 1,
context_xmin:context_xmax + 1]
crop_img = torch.nn.functional.interpolate(
crop_img,
size=(target_size, target_size),
mode='bilinear',
align_corners=False)
return crop_img
def _bbox_clip(self, bbox, img_h, img_w):
"""Clip the bbox with [cx, cy, w, h] format."""
bbox[0] = bbox[0].clamp(0., img_w)
bbox[1] = bbox[1].clamp(0., img_h)
bbox[2] = bbox[2].clamp(10., img_w)
bbox[3] = bbox[3].clamp(10., img_h)
return bbox
def init(self, img, bbox):
"""Initialize the single object tracker in the first frame.
Args:
img (Tensor): of shape (1, C, H, W) encoding original input
image.
bbox (Tensor): The given instance bbox of first frame that need be
tracked in the following frames. The shape of the box is (4, )
with [cx, cy, w, h] format.
Returns:
tuple(z_feat, avg_channel): z_feat is a tuple[Tensor] that
contains the multi level feature maps of exemplar image,
avg_channel is Tensor with shape (3, ), and denotes the padding
values.
"""
z_width = bbox[2] + self.test_cfg.context_amount * (bbox[2] + bbox[3])
z_height = bbox[3] + self.test_cfg.context_amount * (bbox[2] + bbox[3])
z_size = torch.round(torch.sqrt(z_width * z_height))
avg_channel = torch.mean(img, dim=(0, 2, 3))
z_crop = self.get_cropped_img(img, bbox[0:2],
self.test_cfg.exemplar_size, z_size,
avg_channel)
z_feat = self.forward_template(z_crop)
return z_feat, avg_channel
def track(self, img, bbox, z_feat, avg_channel):
"""Track the box `bbox` of previous frame to current frame `img`.
Args:
img (Tensor): of shape (1, C, H, W) encoding original input
image.
bbox (Tensor): The bbox in previous frame. The shape of the box is
(4, ) in [cx, cy, w, h] format.
z_feat (tuple[Tensor]): The multi level feature maps of exemplar
image in the first frame.
avg_channel (Tensor): of shape (3, ) denoting the padding values.
Returns:
tuple(best_score, best_bbox): best_score is a Tensor denoting the
score of best_bbox, best_bbox is a Tensor of shape (4, ) in
[cx, cy, w, h] format, and denotes the best tracked bbox in
current frame.
"""
z_width = bbox[2] + self.test_cfg.context_amount * (bbox[2] + bbox[3])
z_height = bbox[3] + self.test_cfg.context_amount * (bbox[2] + bbox[3])
z_size = torch.sqrt(z_width * z_height)
x_size = torch.round(
z_size * (self.test_cfg.search_size / self.test_cfg.exemplar_size))
x_crop = self.get_cropped_img(img, bbox[0:2],
self.test_cfg.search_size, x_size,
avg_channel)
x_feat = self.forward_search(x_crop)
cls_score, bbox_pred = self.head(z_feat, x_feat)
scale_factor = self.test_cfg.exemplar_size / z_size
best_score, best_bbox = self.head.get_bbox(cls_score, bbox_pred, bbox,
scale_factor)
# clip boundary
best_bbox = self._bbox_clip(best_bbox, img.shape[2], img.shape[3])
return best_score, best_bbox
def simple_test_vot(self, img, frame_id, gt_bboxes, img_metas=None):
"""Test using VOT test mode.
Args:
img (Tensor): of shape (1, C, H, W) encoding input image.
frame_id (int): the id of current frame in the video.
gt_bboxes (list[Tensor]): list of ground truth bboxes for each
image with shape (1, 4) in [tl_x, tl_y, br_x, br_y] format or
shape (1, 8) in [x1, y1, x2, y2, x3, y3, x4, y4].
img_metas (list[dict]): list of image information dict where each
dict has: 'img_shape', 'scale_factor', 'flip', and may also
contain 'filename', 'ori_shape', 'pad_shape', and
'img_norm_cfg'. For details on the values of these keys see
`mmtrack/datasets/pipelines/formatting.py:VideoCollect`.
Returns:
bbox_pred (Tensor): in [tl_x, tl_y, br_x, br_y] format.
best_score (Tensor): the tracking bbox confidence in range [0,1],
and the score of initial frame is -1.
"""
if frame_id == 0:
self.init_frame_id = 0
if self.init_frame_id == frame_id:
# initialization
gt_bboxes = gt_bboxes[0][0]
self.memo = Dict()
self.memo.bbox = quad2bbox(gt_bboxes)
self.memo.z_feat, self.memo.avg_channel = self.init(
img, self.memo.bbox)
# 1 denotes the initialization state
bbox_pred = img.new_tensor([1.])
best_score = -1.
elif self.init_frame_id > frame_id:
# 0 denotes unknown state, namely the skipping frame after failure
bbox_pred = img.new_tensor([0.])
best_score = -1.
else:
# normal tracking state
best_score, self.memo.bbox = self.track(img, self.memo.bbox,
self.memo.z_feat,
self.memo.avg_channel)
# convert bbox to region
track_bbox = bbox_cxcywh_to_x1y1wh(self.memo.bbox).cpu().numpy()
track_region = bbox2region(track_bbox)
gt_bbox = gt_bboxes[0][0]
if len(gt_bbox) == 4:
gt_bbox = bbox_xyxy_to_x1y1wh(gt_bbox)
gt_region = bbox2region(gt_bbox.cpu().numpy())
if img_metas is not None and 'img_shape' in img_metas[0]:
image_shape = img_metas[0]['img_shape']
image_wh = (image_shape[1], image_shape[0])
else:
image_wh = None
Warning('image shape are need when calculating bbox overlap')
overlap = calculate_region_overlap(
track_region, gt_region, bounds=image_wh)
if overlap <= 0:
# tracking failure
self.init_frame_id = frame_id + 5
# 2 denotes the failure state
bbox_pred = img.new_tensor([2.])
else:
bbox_pred = bbox_cxcywh_to_xyxy(self.memo.bbox)
return bbox_pred, best_score
def simple_test_ope(self, img, frame_id, gt_bboxes):
"""Test using OPE test mode.
Args:
img (Tensor): of shape (1, C, H, W) encoding input image.
frame_id (int): the id of current frame in the video.
gt_bboxes (list[Tensor]): list of ground truth bboxes for each
image with shape (1, 4) in [tl_x, tl_y, br_x, br_y] format or
shape (1, 8) in [x1, y1, x2, y2, x3, y3, x4, y4].
Returns:
bbox_pred (Tensor): in [tl_x, tl_y, br_x, br_y] format.
best_score (Tensor): the tracking bbox confidence in range [0,1],
and the score of initial frame is -1.
"""
if frame_id == 0:
gt_bboxes = gt_bboxes[0][0]
self.memo = Dict()
self.memo.bbox = quad2bbox(gt_bboxes)
self.memo.z_feat, self.memo.avg_channel = self.init(
img, self.memo.bbox)
best_score = -1.
else:
best_score, self.memo.bbox = self.track(img, self.memo.bbox,
self.memo.z_feat,
self.memo.avg_channel)
bbox_pred = bbox_cxcywh_to_xyxy(self.memo.bbox)
return bbox_pred, best_score
def simple_test(self, img, img_metas, gt_bboxes, **kwargs):
"""Test without augmentation.
Args:
img (Tensor): of shape (1, C, H, W) encoding input image.
img_metas (list[dict]): list of image information dict where each
dict has: 'img_shape', 'scale_factor', 'flip', and may also
contain 'filename', 'ori_shape', 'pad_shape', and
'img_norm_cfg'. For details on the values of these keys see
`mmtrack/datasets/pipelines/formatting.py:VideoCollect`.
gt_bboxes (list[Tensor]): list of ground truth bboxes for each
image with shape (1, 4) in [tl_x, tl_y, br_x, br_y] format or
shape (1, 8) in [x1, y1, x2, y2, x3, y3, x4, y4].
Returns:
dict[str : ndarray]: The tracking results.
"""
frame_id = img_metas[0].get('frame_id', -1)
assert frame_id >= 0
assert len(img) == 1, 'only support batch_size=1 when testing'
test_mode = self.test_cfg.get('test_mode', 'OPE')
assert test_mode in ['OPE', 'VOT']
if test_mode == 'VOT':
bbox_pred, best_score = self.simple_test_vot(
img, frame_id, gt_bboxes, img_metas)
else:
bbox_pred, best_score = self.simple_test_ope(
img, frame_id, gt_bboxes)
results = dict()
if best_score == -1.:
results['track_bboxes'] = np.concatenate(
(bbox_pred.cpu().numpy(), np.array([best_score])))
else:
results['track_bboxes'] = np.concatenate(
(bbox_pred.cpu().numpy(), best_score.cpu().numpy()[None]))
return results
def forward_train(self, img, img_metas, gt_bboxes, search_img,
search_img_metas, search_gt_bboxes, is_positive_pairs,
**kwargs):
"""
Args:
img (Tensor): of shape (N, C, H, W) encoding input exemplar images.
Typically H and W equal to 127.
img_metas (list[dict]): list of image information dict where each
dict has: 'img_shape', 'scale_factor', 'flip', and may also
contain 'filename', 'ori_shape', 'pad_shape', and
'img_norm_cfg'. For details on the values of these keys see
`mmtrack/datasets/pipelines/formatting.py:VideoCollect`.
gt_bboxes (list[Tensor]): Ground truth bboxes for each exemplar
image with shape (1, 4) in [tl_x, tl_y, br_x, br_y] format.
search_img (Tensor): of shape (N, 1, C, H, W) encoding input search
images. 1 denotes there is only one search image for each
exemplar image. Typically H and W equal to 255.
search_img_metas (list[list[dict]]): The second list only has one
element. The first list contains search image information dict
where each dict has: 'img_shape', 'scale_factor', 'flip', and
may also contain 'filename', 'ori_shape', 'pad_shape', and
'img_norm_cfg'. For details on the values of these keys see
`mmtrack/datasets/pipelines/formatting.py:VideoCollect`.
search_gt_bboxes (list[Tensor]): Ground truth bboxes for each
search image with shape (1, 5) in [0.0, tl_x, tl_y, br_x, br_y]
format.
is_positive_pairs (list[bool]): list of bool denoting whether each
exemplar image and corresponding search image is positive pair.
Returns:
dict[str, Tensor]: a dictionary of loss components.
"""
search_img = search_img[:, 0]
z_feat = self.forward_template(img)
x_feat = self.forward_search(search_img)
cls_score, bbox_pred = self.head(z_feat, x_feat)
losses = dict()
bbox_targets = self.head.get_targets(search_gt_bboxes,
cls_score.shape[2:],
is_positive_pairs)
head_losses = self.head.loss(cls_score, bbox_pred, *bbox_targets)
losses.update(head_losses)
return losses
|
cf582d807e95a90e9714fc7727af753f26c4ea10
|
afd2087e80478010d9df66e78280f75e1ff17d45
|
/test/jit/test_profiler.py
|
5389751a5becc8e37bd2b48d8b086a2eb6e569ed
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
pytorch/pytorch
|
7521ac50c47d18b916ae47a6592c4646c2cb69b5
|
a6f7dd4707ac116c0f5fb5f44f42429f38d23ab4
|
refs/heads/main
| 2023-08-03T05:05:02.822937
| 2023-08-03T00:40:33
| 2023-08-03T04:14:52
| 65,600,975
| 77,092
| 24,610
|
NOASSERTION
| 2023-09-14T21:58:39
| 2016-08-13T05:26:41
|
Python
|
UTF-8
|
Python
| false
| false
| 10,256
|
py
|
test_profiler.py
|
# Owner(s): ["oncall: jit"]
import os
import sys
import torch
from torch.testing._internal.common_utils import skipIfTorchDynamo
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, warmup_backward, FileCheck
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
@skipIfTorchDynamo()
class TestProfiler(JitTestCase):
def setUp(self):
self.prev_exec = torch._C._jit_set_profiling_executor(True)
self.prev_profiling = torch._C._get_graph_executor_optimize(True)
self.inline_autodiff = torch._C._debug_set_autodiff_subgraph_inlining(False)
self.texpr_fuser_state = torch._C._jit_texpr_fuser_enabled()
self.can_fuse_on_cpu = torch._C._jit_can_fuse_on_cpu()
torch._C._jit_set_texpr_fuser_enabled(True)
torch._C._jit_override_can_fuse_on_cpu(True)
self.default_dtype = torch.get_default_dtype()
self.old_reduction_enabled = torch._C._jit_set_texpr_reductions_enabled(True)
torch.set_default_dtype(torch.double)
self.old_fusion_inlining = torch._C._debug_get_fusion_group_inlining()
torch._C._debug_set_fusion_group_inlining(False)
self.old_te_must_use_llvm_cpu = torch._C._jit_get_te_must_use_llvm_cpu()
torch._C._jit_set_te_must_use_llvm_cpu(False)
def tearDown(self):
torch._C._jit_set_profiling_executor(self.prev_exec)
torch._C._get_graph_executor_optimize(self.prev_profiling)
torch._C._debug_set_autodiff_subgraph_inlining(self.inline_autodiff)
torch._C._jit_set_texpr_fuser_enabled(self.texpr_fuser_state)
torch._C._jit_override_can_fuse_on_cpu(self.can_fuse_on_cpu)
torch.set_default_dtype(self.default_dtype)
torch._C._jit_set_texpr_reductions_enabled(self.old_reduction_enabled)
torch._C._debug_set_fusion_group_inlining(self.old_fusion_inlining)
torch._C._jit_set_te_must_use_llvm_cpu(self.old_te_must_use_llvm_cpu)
def test_tensor_type_not_determined_by_inputs(self):
@torch.jit.script
def scalar_type_input(x, y, z):
return x + y + 4 + z.item()
x = torch.tensor([2, 2])
scalar_type_input(x, x, torch.tensor(1))
scalar_type_input(x, x, torch.tensor(1))
scalar_type_input(x, x, torch.tensor(1.0))
g = torch.jit.last_executed_optimized_graph()
# item & add should not get pulled into the fusion group -
# we expect to see Fusion Group (item / add) Fusion Group in ir dump
FileCheck().check("TensorExpr").check("Scalar = aten::item").check_next("Tensor = aten::add").check("TensorExpr").run(g)
@torch.jit.script
def non_const_dtype(x, y, cond: bool):
dtype = torch.int16 if cond else torch.int32
return (x + y + 3).sum(dtype=dtype)
non_const_dtype(x, x, True)
non_const_dtype(x, x, True)
g = torch.jit.last_executed_optimized_graph()
# because dtype is non-const, sum should not get pulled into the Fusion Group
FileCheck().check("TensorExpr").check("TensorExpr").check_not("aten::sum").run(g)
def test_specialize_backward(self):
def test_fuse(a, b):
c = a * b
d = c * b
return d
test_fuse.__disable_jit_function_caching__ = True
scripted_f = torch.jit.script(test_fuse)
x = torch.ones(1, requires_grad=True)
y = torch.ones(1, requires_grad=True)
scripted_f(x, y)
b = scripted_f(x, y)
warmup_backward(b)
g = torch.jit.last_executed_optimized_graph()
# Backward has an if node guarding specializations,
# within the if node true block there is only one if node
# that guards a tensorexpr group
optimized_block = next(g.findNode("prim::If").blocks())
if_nodes = list(optimized_block.findAllNodes("prim::If"))
self.assertEqual(len(if_nodes), 1)
FileCheck().check("Group[Subgraph").run(str(if_nodes[0]))
# no broadcasts occurred, sum_to_size have been specialized out
self.assertIsNone(optimized_block.findNode("aten::_grad_sum_to_size"))
broadcast_f = torch.jit.script(test_fuse)
x = torch.ones([2, 2], requires_grad=True)
y = torch.ones([1], requires_grad=True)
broadcast_f(x, y)
b = broadcast_f(x, y)
b.backward(torch.ones([2, 2], dtype=torch.float), retain_graph=True)
b.backward(torch.ones([2, 2], dtype=torch.float))
# warmup_backward(b, torch.ones([2, 2], dtype=torch.float))
g = torch.jit.last_executed_optimized_graph()
optimized_block = next(g.findNode("prim::If").blocks())
# broadcasts occurred, currently expect to see aten::_grad_sum_to_size
self.assertIsNotNone(optimized_block.findNode("aten::_grad_sum_to_size"))
def test_specialized_types(self):
@torch.jit.script
def test_fuse(a, b):
c = a * b
d = c * b
return d
x = torch.tensor([.5])
for _ in range(3):
test_fuse(x, x)
g = torch.jit.last_executed_optimized_graph()
# Types should remain specialized for typecheck outputs & fusion outputs
FileCheck().check("Double(").check_same("prim::TypeCheck").check_same("\n").check("Double").check_same("TensorExpr").run(g)
# other outputs should not be specialized
FileCheck().check("Tensor = prim::If").run(g)
def test_aliasing_merge(self):
@torch.jit.script
def foo(a, b):
c = a * b
d = c * b
d.add_(b)
e = d * b
return d + e
x = torch.ones(1)
y = torch.ones(1)
foo(x, y)
b = foo(x, y)
g = torch.jit.last_executed_optimized_graph()
self.assertEqual(len(list(g.findAllNodes("prim::TypeCheck"))), 2)
FileCheck().check("TensorExpr").check("aten::add_").check("TensorExpr").run(g)
def test_use_not_profiled(self):
def foo(t1, t2, t3, t4, t: float):
h = t1 + t2 + t3 + t4
if t > 0.5:
# Putting a use of t1 in a never-executed conditional prevents
return t1 + 1
return h
t = torch.rand(8, dtype=torch.float)
foo_script = torch.jit.script(foo)
for _ in range(torch._C._jit_get_num_profiled_runs() + 1):
foo_script(t, t, t, t, 0.1)
self.assertEqual(foo(t, t, t, t, 0.1), foo_script(t, t, t, t, 0.1))
g = torch.jit.last_executed_optimized_graph()
# all adds fused
FileCheck().check("graph").check_not("aten::add").check("prim::If").run(g)
def test_not_fusing_scalar_ops(self):
@torch.jit.script
def foo(x: int, y: int):
return x + y + 2 + 4 + 5 + 6
foo(1, 2)
foo(2, 3)
g = torch.jit.last_executed_optimized_graph()
FileCheck().check_not("TensorExpr").run(g)
def test_not_optimizing_property(self):
@torch.jit.script
def foo(x, y):
return x + y + 1 + 2 + 3, x.size()
x = torch.ones(1)
foo(x, x)
foo(x, x)
g = torch.jit.last_executed_optimized_graph()
FileCheck().check("aten::size").run(g)
x = torch.ones([2, 3, 5])
self.assertEqual(foo(x, x), (x + x + 1 + 2 + 3, x.size()))
def test_fallback_graph_not_specialized(self):
@torch.jit.script
def foo(a, b):
c = a * b
d = c * b
e = d * b
return d + e
x = torch.ones(1)
y = torch.ones(1)
foo(x, y)
foo(x, y)
g = torch.jit.last_executed_optimized_graph()
FileCheck().check("CallFunction").check_next("Tensor = prim::TupleUnpack").run(g)
def test_autograd_fallback_graph(self):
@torch.jit.script
def foo(a, b):
c = a * b
d = c * b
e = d * b
return d + e
x = torch.ones(1, requires_grad=True)
y = torch.ones(1, requires_grad=True)
foo(x, y)
b = foo(x, y)
b.backward(torch.ones([1], dtype=torch.float), retain_graph=True)
b.backward(torch.ones([1], dtype=torch.float))
g = torch.jit.last_executed_optimized_graph()
FileCheck().check("fallback_function").check_next("CallFunction").run(g)
def test_tensor_constant(self):
def foo(a, b):
return a + b + torch.tensor([2])
x = torch.ones(1, requires_grad=False)
foo_script = torch.jit.script(foo)
foo_script(x, x)
foo_script(x, x)
self.assertEqual(foo_script(x, x), foo(x, x))
g = torch.jit.last_executed_optimized_graph()
FileCheck().check_count("aten::add", 2, exactly=True).run(g)
def test_local_fusion_strategy(self):
@torch.jit.script
def foo(x):
return x + x + x
torch.jit.set_fusion_strategy([("STATIC", 1)])
for _ in range(3):
foo(torch.rand([10]))
torch.jit.set_fusion_strategy([("STATIC", 10)])
for i in range(10):
foo(torch.rand([i]))
foo(torch.rand([i]))
g = torch.jit.last_executed_optimized_graph()
FileCheck().check_count(":TensorExprGroup", 2, exactly=True).run(g)
def test_iterative_fusion(self):
@torch.jit.script
def foo(a, b, c, d):
a = a + b
b.add_(3)
c = c + b + d
a = a + 1
return a, c
x = torch.ones(1, requires_grad=False)
foo(x, x, x, x)
foo(x, x, x, x)
# when we iterate through the block, we will start
# by fusing a = a + b with a = a + 1
# if we were to continue iteration from that fusion point,
# would miss the fusion opportunity of c = c + d + b
g = torch.jit.last_executed_optimized_graph()
self.assertEqual(len(list(g.findAllNodes("prim::TensorExprGroup"))), 2)
|
9ecef0198891d39a7a63ba244c5c29682992f03b
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/helpers/typeshed/stubs/SQLAlchemy/sqlalchemy/ext/asyncio/__init__.pyi
|
e065d748f177c8f8fa813c3ba60046b646d1dd38
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 691
|
pyi
|
__init__.pyi
|
from .engine import (
AsyncConnection as AsyncConnection,
AsyncEngine as AsyncEngine,
AsyncTransaction as AsyncTransaction,
create_async_engine as create_async_engine,
)
from .events import AsyncConnectionEvents as AsyncConnectionEvents, AsyncSessionEvents as AsyncSessionEvents
from .result import AsyncMappingResult as AsyncMappingResult, AsyncResult as AsyncResult, AsyncScalarResult as AsyncScalarResult
from .scoping import async_scoped_session as async_scoped_session
from .session import (
AsyncSession as AsyncSession,
AsyncSessionTransaction as AsyncSessionTransaction,
async_object_session as async_object_session,
async_session as async_session,
)
|
ae6bf4dcf365951a210909b471a19d9c27840a80
|
f52b9b0419c7347441cf72b45b9e83b3ba2f076a
|
/doc/source/_static/plotter.py
|
14f35f28d860b0326aefcef0bf0d4a2bc62c2f2a
|
[
"MIT"
] |
permissive
|
coin-or/pulp
|
9ea6dcec98b389e6440aa6fce63722186598151c
|
bd7cace5ada53576912b1aac59fb7409a5c06b33
|
refs/heads/master
| 2023-09-05T03:54:19.103230
| 2023-08-08T06:54:27
| 2023-08-08T06:54:27
| 32,243,098
| 1,824
| 424
|
NOASSERTION
| 2023-09-04T22:02:53
| 2015-03-15T03:21:16
|
Python
|
UTF-8
|
Python
| false
| false
| 1,306
|
py
|
plotter.py
|
#!/usr/bin/env python
from matplotlib import rc
rc("text", usetex=True)
rc("font", family="serif")
def plot_interval(a, c, x_left, x_right, i, fbound):
lh = c * (1 - a[0])
rh = c * (1 + a[1])
x = arange(x_left, x_right + 1)
y = 0 * x
arrow_r = Arrow(c, 0, c * a[1], 0, 0.2)
arrow_l = Arrow(c, 0, -c * a[0], 0, 0.2)
plot(x, y)
text(
(x_left + lh) / 2.0,
0.1,
f"freebound interval [{lh}, {rh}] is penalty-free",
)
text((x_left + lh) / 2.0, 0.2, f"rhs={c}, {fbound}")
cur_ax = gca()
cur_ax.add_patch(arrow_l)
cur_ax.add_patch(arrow_r)
axis([x_left, x_right, -0.1, 0.3])
yticks([])
title(rf"Elasticized constraint\_{i} $C(x)= {c} $")
figure()
subplots_adjust(hspace=0.5)
fbound = "proportionFreeBound"
i = 1
subplot(2, 1, i)
a = [0.01, 0.01]
c = 200
x_left = 0.97 * c
x_right = 1.03 * c
fb_string = f"{fbound} = {a[0]}"
plot_interval(a, c, x_left, x_right, i, fb_string)
i += 1
subplot(2, 1, i)
a = [0.02, 0.05]
c = 500
x_left = 0.9 * c # scale of window
x_right = 1.2 * c # scale of window
fb_string = f"{fbound}List = [{a[0]},{a[1]}]"
plot_interval(a, c, x_left, x_right, i, fb_string)
savefig("freebound.jpg")
savefig("freebound.pdf")
# vim: fenc=utf-8: ft=python:sw=4:et:nu:fdm=indent:fdn=1:syn=python
|
796fa672f2ba453436b2aedeb1146e6855408922
|
a0eb6744e6f7f509b96d21f0bc8b3f8387f6861c
|
/notebook/numpy_hsplit.py
|
a5a7dbbe64ba14e9cb7e5581026f2ed3e456e7d8
|
[
"MIT"
] |
permissive
|
nkmk/python-snippets
|
a6c66bdf999502e52f4795a3074ced63bf440817
|
f9dd286a9cf93f474e20371f8fffc4732cb3c4d5
|
refs/heads/master
| 2023-08-03T04:20:05.606293
| 2023-07-26T13:21:11
| 2023-07-26T13:21:11
| 98,900,570
| 253
| 77
|
MIT
| 2020-10-25T01:12:53
| 2017-07-31T14:54:47
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 591
|
py
|
numpy_hsplit.py
|
import numpy as np
a = np.arange(16).reshape(4, 4)
print(a)
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]
# [12 13 14 15]]
a0, a1 = np.hsplit(a, 2)
print(a0)
# [[ 0 1]
# [ 4 5]
# [ 8 9]
# [12 13]]
print(a1)
# [[ 2 3]
# [ 6 7]
# [10 11]
# [14 15]]
a0, a1 = np.hsplit(a, [1])
print(a0)
# [[ 0]
# [ 4]
# [ 8]
# [12]]
print(a1)
# [[ 1 2 3]
# [ 5 6 7]
# [ 9 10 11]
# [13 14 15]]
a_1d = np.arange(6)
print(a_1d)
# [0 1 2 3 4 5]
# np.split(a_1d, 2, 1)
# IndexError: tuple index out of range
a0, a1 = np.hsplit(a_1d, 2)
print(a0)
# [0 1 2]
print(a1)
# [3 4 5]
|
66223809002d31206bb04f339bf17faf925e6b2f
|
06051bac2aeaecc738cd488965c52e976ab59fe3
|
/logmine_pkg/cluster_merge.py
|
9a0868729f79c37026d11a7c233e16931d5b41d2
|
[
"MIT"
] |
permissive
|
trungdq88/logmine
|
ce9588440b13925306bcefe46cdda1babe638c83
|
6ac777a41bbb870707a6f1471b6b78f1af17e127
|
refs/heads/master
| 2022-01-10T14:42:25.066856
| 2022-01-06T03:37:57
| 2022-01-06T03:37:57
| 218,324,124
| 148
| 37
|
MIT
| 2022-01-06T03:37:17
| 2019-10-29T15:45:49
|
Python
|
UTF-8
|
Python
| false
| false
| 964
|
py
|
cluster_merge.py
|
from .clusterer import Clusterer
class ClusterMerge():
def __init__(self, config):
self.clusterer = Clusterer(**config)
self.pattern_generator = self.clusterer.pattern_generator
def merge(self, base_list, other_list):
for [reprA, countA, patternA] in other_list:
exists = False
for i in range(len(base_list)):
[reprB, countB, patternB] = base_list[i]
score = self.clusterer.scorer.distance(
reprA, reprB, self.clusterer.max_dist)
if score <= self.clusterer.max_dist:
exists = True
base_list[i][1] += countA
merged_pattern = self.pattern_generator.create_pattern(
patternA, patternB)
base_list[i][2] = merged_pattern
break
if not exists:
base_list.append([reprA, countA, patternA])
|
90bb6de5a24436fec83af9d1d1427055591027d2
|
993f18c21402d7a4ff21ddb7ff2ec6c80e466f20
|
/onnx/backend/test/case/node/squeeze.py
|
e11cb39de451b36f09330d1895dfdf4f10f29daf
|
[
"Apache-2.0"
] |
permissive
|
onnx/onnx
|
10d3916803c7babff89ec0fa9045127bcccad376
|
8a475b34cb3875df311a46f57571646498f5bda7
|
refs/heads/main
| 2023-08-18T18:50:03.388353
| 2023-08-16T22:18:46
| 2023-08-16T22:18:46
| 102,692,863
| 16,164
| 4,150
|
Apache-2.0
| 2023-09-14T17:10:38
| 2017-09-07T04:53:45
|
Python
|
UTF-8
|
Python
| false
| false
| 1,052
|
py
|
squeeze.py
|
# Copyright (c) ONNX Project Contributors
#
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import onnx
from onnx.backend.test.case.base import Base
from onnx.backend.test.case.node import expect
class Squeeze(Base):
@staticmethod
def export_squeeze() -> None:
node = onnx.helper.make_node(
"Squeeze",
inputs=["x", "axes"],
outputs=["y"],
)
x = np.random.randn(1, 3, 4, 5).astype(np.float32)
axes = np.array([0], dtype=np.int64)
y = np.squeeze(x, axis=0)
expect(node, inputs=[x, axes], outputs=[y], name="test_squeeze")
@staticmethod
def export_squeeze_negative_axes() -> None:
node = onnx.helper.make_node(
"Squeeze",
inputs=["x", "axes"],
outputs=["y"],
)
x = np.random.randn(1, 3, 1, 5).astype(np.float32)
axes = np.array([-2], dtype=np.int64)
y = np.squeeze(x, axis=-2)
expect(node, inputs=[x, axes], outputs=[y], name="test_squeeze_negative_axes")
|
91b78d086dd3bc18f814571689bfae277f2ba9eb
|
e22eeb5256e17a96a98b3ff25433aec2d641cd2c
|
/openstack/tests/unit/cloud/test_server_group.py
|
fc14432213a58fb0dfd40f689e99676da9ff8065
|
[
"Apache-2.0"
] |
permissive
|
openstack/openstacksdk
|
b4b95fd7869653feea5a3b783e9a5c588235c039
|
d474eb84c605c429bb9cccb166cabbdd1654d73c
|
refs/heads/master
| 2023-09-03T22:50:03.398512
| 2023-07-27T14:09:35
| 2023-08-29T16:28:46
| 16,223,378
| 124
| 130
|
Apache-2.0
| 2023-09-06T02:52:47
| 2014-01-25T02:48:00
|
Python
|
UTF-8
|
Python
| false
| false
| 2,765
|
py
|
test_server_group.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from openstack.tests import fakes
from openstack.tests.unit import base
class TestServerGroup(base.TestCase):
def setUp(self):
super(TestServerGroup, self).setUp()
self.group_id = uuid.uuid4().hex
self.group_name = self.getUniqueString('server-group')
self.policies = ['affinity']
self.fake_group = fakes.make_fake_server_group(
self.group_id, self.group_name, self.policies
)
def test_create_server_group(self):
self.register_uris(
[
self.get_nova_discovery_mock_dict(),
dict(
method='POST',
uri=self.get_mock_url(
'compute', 'public', append=['os-server-groups']
),
json={'server_group': self.fake_group},
validate=dict(
json={
'server_group': {
'name': self.group_name,
'policies': self.policies,
}
}
),
),
]
)
self.cloud.create_server_group(
name=self.group_name, policies=self.policies
)
self.assert_calls()
def test_delete_server_group(self):
self.register_uris(
[
self.get_nova_discovery_mock_dict(),
dict(
method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['os-server-groups']
),
json={'server_groups': [self.fake_group]},
),
dict(
method='DELETE',
uri=self.get_mock_url(
'compute',
'public',
append=['os-server-groups', self.group_id],
),
json={'server_groups': [self.fake_group]},
),
]
)
self.assertTrue(self.cloud.delete_server_group(self.group_name))
self.assert_calls()
|
fb23bb2186854cd1c0188daedea91e0002d44542
|
23a6340f5b63c2bb31ad686bc26001511e47e05f
|
/tests/builder/custom_params_test.py
|
4230862ec12d41dee6b5ad7ae96bf7c1d69692f4
|
[
"Apache-2.0"
] |
permissive
|
googlefonts/glyphsLib
|
f945a3ddc7e905e5ac52a5fc3d77ee886138813e
|
8577f8309725681842db493f46bd27b17b92d159
|
refs/heads/main
| 2023-08-25T18:57:17.481591
| 2023-08-20T20:56:23
| 2023-08-20T20:56:23
| 44,563,779
| 110
| 29
|
Apache-2.0
| 2023-09-04T08:44:15
| 2015-10-19T21:00:22
|
Python
|
UTF-8
|
Python
| false
| false
| 27,318
|
py
|
custom_params_test.py
|
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
import unittest
from textwrap import dedent
from unittest import mock
from unittest.mock import patch
import glyphsLib
import defcon
import ufoLib2
from glyphsLib.builder.builders import UFOBuilder
from glyphsLib.builder import to_ufos
from glyphsLib.builder.custom_params import (
_set_default_params,
GLYPHS_UFO_CUSTOM_PARAMS,
)
from glyphsLib.builder.constants import (
UFO2FT_FILTERS_KEY,
UFO2FT_USE_PROD_NAMES_KEY,
FONT_CUSTOM_PARAM_PREFIX,
MASTER_CUSTOM_PARAM_PREFIX,
UFO_FILENAME_CUSTOM_PARAM,
GLYPHLIB_PREFIX,
UFO_FILENAME_KEY,
FULL_FILENAME_KEY,
)
from glyphsLib.classes import GSFont, GSFontMaster, GSCustomParameter, GSGlyph, GSLayer
from glyphsLib.types import parse_datetime
DATA = os.path.join(os.path.dirname(os.path.dirname(__file__)), "data")
class SetCustomParamsTestBase(object):
ufo_module = None # subclasses must override this
def setUp(self):
self.ufo = self.ufo_module.Font()
self.font = GSFont()
self.master = GSFontMaster()
self.font.masters.insert(0, self.master)
self.builder = UFOBuilder(self.font)
def set_custom_params(self):
self.builder.to_ufo_custom_params(self.ufo, self.font)
self.builder.to_ufo_custom_params(self.ufo, self.master)
def test_normalizes_curved_quotes_in_names(self):
self.master.customParameters = [
GSCustomParameter(name="‘bad’", value=1),
GSCustomParameter(name="“also bad”", value=2),
]
self.set_custom_params()
self.assertIn(MASTER_CUSTOM_PARAM_PREFIX + "'bad'", self.ufo.lib)
self.assertIn(MASTER_CUSTOM_PARAM_PREFIX + '"also bad"', self.ufo.lib)
def test_set_fsSelection_flags_none(self):
self.ufo.info.openTypeOS2Selection = None
self.font = glyphsLib.to_glyphs([self.ufo], minimize_ufo_diffs=True)
self.assertEqual(self.font.customParameters["Use Typo Metrics"], None)
self.assertEqual(self.font.customParameters["Has WWS Names"], None)
self.assertEqual(
self.font.customParameters["openTypeOS2SelectionUnsupportedBits"], None
)
self.set_custom_params()
self.assertEqual(self.ufo.info.openTypeOS2Selection, None)
def test_set_fsSelection_flags_empty(self):
self.ufo.info.openTypeOS2Selection = []
self.font = glyphsLib.to_glyphs([self.ufo], minimize_ufo_diffs=True)
self.assertEqual(self.font.customParameters["Use Typo Metrics"], None)
self.assertEqual(self.font.customParameters["Has WWS Names"], None)
self.assertEqual(
self.font.customParameters["openTypeOS2SelectionUnsupportedBits"], []
)
self.set_custom_params()
self.assertEqual(self.ufo.info.openTypeOS2Selection, [])
def test_set_fsSelection_flags_all(self):
self.ufo.info.openTypeOS2Selection = [1, 2, 3, 4, 7, 8, 9]
self.font = glyphsLib.to_glyphs([self.ufo], minimize_ufo_diffs=True)
self.assertEqual(self.font.customParameters["Use Typo Metrics"], True)
self.assertEqual(self.font.customParameters["Has WWS Names"], True)
self.assertEqual(
self.font.customParameters["openTypeOS2SelectionUnsupportedBits"],
[1, 2, 3, 4, 9],
)
self.set_custom_params()
self.assertEqual(self.ufo.info.openTypeOS2Selection, [1, 2, 3, 4, 7, 8, 9])
def test_set_fsSelection_flags(self):
self.assertEqual(self.ufo.info.openTypeOS2Selection, None)
self.master.customParameters["Has WWS Names"] = False
self.set_custom_params()
self.assertEqual(self.ufo.info.openTypeOS2Selection, None)
self.master.customParameters["Use Typo Metrics"] = True
self.set_custom_params()
self.assertEqual(self.ufo.info.openTypeOS2Selection, [7])
self.ufo = self.ufo_module.Font()
self.master.customParameters = [
GSCustomParameter(name="Use Typo Metrics", value=True),
GSCustomParameter(name="Has WWS Names", value=True),
]
self.set_custom_params()
self.assertEqual(self.ufo.info.openTypeOS2Selection, [7, 8])
def test_integer_parameters(self):
"""Test casting glyphsapp customParameters whose values are just
integers into ufo equivalents."""
integer_params = [
"underlinePosition",
"underlineThickness",
"strikeoutPosition",
"strikeoutSize",
"subscriptXSize",
"subscriptYSize",
"subscriptXOffset",
"subscriptYOffset",
"superscriptXSize",
"superscriptYSize",
"superscriptXOffset",
"superscriptYOffset",
]
params_to_check = [
(k, v) for (k, v) in GLYPHS_UFO_CUSTOM_PARAMS if k in integer_params
]
for glyphs_key, ufo_key in params_to_check:
self.master.customParameters[glyphs_key] = 10
self.set_custom_params()
self.assertEqual(getattr(self.ufo.info, ufo_key), 10)
for param in self.master.customParameters:
if param.name == glyphs_key:
param.value = -2
break
self.set_custom_params()
self.assertEqual(getattr(self.ufo.info, ufo_key), -2)
@patch("glyphsLib.builder.custom_params.parse_glyphs_filter")
def test_parse_glyphs_filter(self, mock_parse_glyphs_filter):
pre_filter = "AddExtremes"
filter1 = "Transformations;OffsetX:40;OffsetY:60;include:uni0334,uni0335"
filter2 = "Transformations;OffsetX:10;OffsetY:-10;exclude:uni0334,uni0335"
self.master.customParameters.extend(
[
GSCustomParameter(name="PreFilter", value=pre_filter),
GSCustomParameter(name="Filter", value=filter1),
GSCustomParameter(name="Filter", value=filter2),
]
)
self.set_custom_params()
self.assertEqual(mock_parse_glyphs_filter.call_count, 3)
self.assertEqual(
mock_parse_glyphs_filter.call_args_list[0],
mock.call(pre_filter, is_pre=True),
)
self.assertEqual(
mock_parse_glyphs_filter.call_args_list[1], mock.call(filter1, is_pre=False)
)
self.assertEqual(
mock_parse_glyphs_filter.call_args_list[2], mock.call(filter2, is_pre=False)
)
def test_set_defaults(self):
_set_default_params(self.ufo)
self.assertEqual(self.ufo.info.openTypeOS2Type, [3])
self.assertEqual(self.ufo.info.postscriptUnderlinePosition, -100)
self.assertEqual(self.ufo.info.postscriptUnderlineThickness, 50)
def test_set_codePageRanges_empty(self):
self.font.customParameters["codePageRanges"] = []
self.set_custom_params()
self.assertEqual(self.ufo.info.openTypeOS2CodePageRanges, [])
self.font = glyphsLib.to_glyphs([self.ufo], minimize_ufo_diffs=True)
self.assertEqual(self.font.customParameters["codePageRanges"], [])
def test_set_codePageRanges(self):
self.font.customParameters["codePageRanges"] = [1252, 1250]
self.font.customParameters["codePageRangesUnsupportedBits"] = [15]
self.set_custom_params()
self.assertEqual(self.ufo.info.openTypeOS2CodePageRanges, [0, 1, 15])
self.font = glyphsLib.to_glyphs([self.ufo], minimize_ufo_diffs=True)
self.assertEqual(self.font.customParameters["codePageRanges"], [1252, 1250])
self.assertEqual(
self.font.customParameters["codePageRangesUnsupportedBits"], [15]
)
def test_set_openTypeOS2CodePageRanges(self):
self.font.customParameters["openTypeOS2CodePageRanges"] = [1252, 1250]
self.font.customParameters["codePageRangesUnsupportedBits"] = [15]
self.set_custom_params()
self.assertEqual(self.ufo.info.openTypeOS2CodePageRanges, [0, 1, 15])
self.font = glyphsLib.to_glyphs([self.ufo], minimize_ufo_diffs=True)
self.assertEqual(self.font.customParameters["codePageRanges"], [1252, 1250])
self.assertEqual(
self.font.customParameters["codePageRangesUnsupportedBits"], [15]
)
def test_gasp_table(self):
gasp_table = {"65535": "15", "20": "7", "8": "10"}
self.font.customParameters["GASP Table"] = gasp_table
self.set_custom_params()
ufo_range_records = self.ufo.info.openTypeGaspRangeRecords
self.assertIsNotNone(ufo_range_records)
self.assertEqual(len(ufo_range_records), 3)
rec1, rec2, rec3 = ufo_range_records
self.assertEqual(rec1["rangeMaxPPEM"], 8)
self.assertEqual(rec1["rangeGaspBehavior"], [1, 3])
self.assertEqual(rec2["rangeMaxPPEM"], 20)
self.assertEqual(rec2["rangeGaspBehavior"], [0, 1, 2])
self.assertEqual(rec3["rangeMaxPPEM"], 65535)
self.assertEqual(rec3["rangeGaspBehavior"], [0, 1, 2, 3])
def test_set_disables_nice_names(self):
self.font.disablesNiceNames = False
self.set_custom_params()
self.assertEqual(True, self.ufo.lib[FONT_CUSTOM_PARAM_PREFIX + "useNiceNames"])
def test_set_disable_last_change(self):
glyph = GSGlyph()
glyph.name = "a"
self.font.glyphs.append(glyph)
layer = GSLayer()
layer.layerId = self.font.masters[0].id
layer.associatedMasterId = self.font.masters[0].id
layer.width = 100
glyph.layers.append(layer)
glyph.lastChange = parse_datetime("2017-10-03 07:35:46 +0000")
self.font.customParameters["Disable Last Change"] = True
self.ufo = to_ufos(self.font)[0]
self.assertEqual(
True, self.ufo.lib[FONT_CUSTOM_PARAM_PREFIX + "disablesLastChange"]
)
self.assertNotIn(GLYPHLIB_PREFIX + "lastChange", self.ufo["a"].lib)
# https://github.com/googlefonts/glyphsLib/issues/268
def test_xHeight(self):
self.ufo.info.xHeight = 300
self.master.customParameters["xHeight"] = "500"
self.set_custom_params()
# Additional xHeight values are Glyphs-specific and stored in lib
self.assertEqual(self.ufo.lib[MASTER_CUSTOM_PARAM_PREFIX + "xHeight"], "500")
# The xHeight from the property is not modified
self.assertEqual(self.ufo.info.xHeight, 300)
# TODO: (jany) check that the instance custom param wins over the
# interpolated value
def test_replace_feature(self):
self.ufo.features.text = dedent(
"""
feature liga {
# only the first match is replaced
sub f i by fi;
} liga;
feature calt {
sub e' t' c by ampersand;
} calt;
feature liga {
sub f l by fl;
} liga;
"""
)
repl = "liga; sub f f by ff;"
self.master.customParameters["Replace Feature"] = repl
self.set_custom_params()
self.assertEqual(
self.ufo.features.text,
dedent(
"""
feature liga {
sub f f by ff;
} liga;
feature calt {
sub e' t' c by ampersand;
} calt;
feature liga {
sub f l by fl;
} liga;
"""
),
)
# only replace feature body if tag already present
original = self.ufo.features.text
repl = "numr; sub one by one.numr;\nsub two by two.numr;\n"
self.master.customParameters["Replace Feature"] = repl
self.set_custom_params()
self.assertEqual(self.ufo.features.text, original)
def test_replace_prefix(self):
self.ufo.features.text = dedent(
"""\
# Prefix: AAA
include(../aaa.fea);
# Prefix: FOO
# foo
# Prefix: ZZZ
include(../zzz.fea);
# Prefix: BAR
# bar
feature liga {
sub f i by f_i;
} liga;
table GDEF {
GlyphClassDef
[f i], # Base
[f_i], # Liga
, # Mark
;
} GDEF;
"""
)
self.master.customParameters.append(
GSCustomParameter("Replace Prefix", "FOO; include(../foo.fea);")
)
self.master.customParameters.append(
GSCustomParameter("Replace Prefix", "BAR; include(../bar.fea);")
)
self.set_custom_params()
self.assertEqual(
self.ufo.features.text,
dedent(
"""\
# Prefix: AAA
include(../aaa.fea);
# Prefix: FOO
include(../foo.fea);
# Prefix: ZZZ
include(../zzz.fea);
# Prefix: BAR
include(../bar.fea);
table GDEF {
GlyphClassDef
[f i], # Base
[f_i], # Liga
, # Mark
;
} GDEF;
feature liga {
sub f i by f_i;
} liga;
"""
),
)
def test_useProductionNames(self):
for value in (True, False):
self.master.customParameters["Don't use Production Names"] = value
self.set_custom_params()
self.assertIn(UFO2FT_USE_PROD_NAMES_KEY, self.ufo.lib)
self.assertEqual(self.ufo.lib[UFO2FT_USE_PROD_NAMES_KEY], not value)
def test_default_fstype(self):
# No specified fsType => set default value
self.set_custom_params()
self.assertEqual(self.ufo.info.openTypeOS2Type, [3])
def test_set_fstype(self):
# Set another fsType => store that
self.master.customParameters["fsType"] = [2]
self.set_custom_params()
self.assertEqual(self.ufo.info.openTypeOS2Type, [2])
def test_empty_fstype(self):
# Set empty fsType => store empty
self.master.customParameters["fsType"] = []
self.set_custom_params()
self.assertEqual(self.ufo.info.openTypeOS2Type, [])
def test_version_string(self):
# TODO: (jany) test the automatic replacement that is described in the
# Glyphs Handbook
self.font.customParameters["versionString"] = "Version 2.040"
self.set_custom_params()
self.assertEqual(self.ufo.info.openTypeNameVersion, "Version 2.040")
def test_ufo2ft_filter_roundtrip(self):
ufo_filters = [
{"name": "propagateAnchors", "pre": True, "include": ["a", "b", "c"]}
]
glyphs_filter = "propagateAnchors;include:a,b,c"
# Test the one-way conversion of (Pre)Filters into ufo2ft filters. See the
# docstring for FilterParamHandler.
self.master.customParameters["PreFilter"] = glyphs_filter
self.set_custom_params()
self.assertEqual(self.ufo.lib[UFO2FT_FILTERS_KEY], ufo_filters)
# Test the round-tripping of ufo2ft filters from UFO -> Glyphs master -> UFO.
# See the docstring for FilterParamHandler.
font_rt = glyphsLib.to_glyphs([self.ufo])
self.assertNotIn("PreFilter", font_rt.masters[0].customParameters)
self.assertEqual(font_rt.masters[0].userData[UFO2FT_FILTERS_KEY], ufo_filters)
ufo_rt = glyphsLib.to_ufos(font_rt, ufo_module=self.ufo_module)[0]
self.assertEqual(ufo_rt.lib[UFO2FT_FILTERS_KEY], ufo_filters)
def test_color_palettes(self):
glyphs_palettes = [
["68,0,59,255", "220,187,72,255", "42,255", "87,255", "0,138,255,255"]
]
ufo_palettes = [
[
(0.26666666666666666, 0.0, 0.23137254901960785, 1.0),
(0.8627450980392157, 0.7333333333333333, 0.2823529411764706, 1.0),
(0.16470588235294117, 0.16470588235294117, 0.16470588235294117, 1.0),
(0.3411764705882353, 0.3411764705882353, 0.3411764705882353, 1.0),
(0.0, 0.5411764705882353, 1.0, 1.0),
]
]
self.font.customParameters["Color Palettes"] = glyphs_palettes
self.set_custom_params()
self.assertEqual(
self.ufo.lib["com.github.googlei18n.ufo2ft.colorPalettes"], ufo_palettes
)
# Test the round-tripping
font = glyphsLib.to_glyphs([self.ufo])
self.assertEqual(font.customParameters["Color Palettes"], glyphs_palettes)
def test_meta_table(self):
glyphs_meta = [
{"data": "de-Latn", "tag": "dlng"},
{"data": "en-Latn", "tag": "dlng"},
{"data": "sr-Cyrl", "tag": "slng"},
{"data": "\x00\x00...", "tag": "appl"},
]
self.font.customParameters["meta Table"] = glyphs_meta
ufo_meta = {
"dlng": ["de-Latn", "en-Latn"],
"slng": ["sr-Cyrl"],
"appl": "\x00\x00...",
}
self.set_custom_params()
self.assertEqual(self.ufo.lib["public.openTypeMeta"], ufo_meta)
font = glyphsLib.to_glyphs([self.ufo])
self.assertEqual(font.customParameters["meta Table"], glyphs_meta)
def test_name_table_entry(self):
self.font.customParameters.append(
GSCustomParameter("Name Table Entry", "1024; FOO; BAZ")
)
self.font.customParameters.append(
GSCustomParameter("Name Table Entry", "2048 1; FOO")
)
self.font.customParameters.append(
GSCustomParameter("Name Table Entry", "4096 1 2; FOO")
)
self.font.customParameters.append(
GSCustomParameter("Name Table Entry", "8192 1 2 3; FOO")
)
self.font.customParameters.append(
GSCustomParameter("Name Table Entry", "0x4000 074; BAZ")
)
self.set_custom_params()
ufo_records = [
{
"nameID": 1024,
"platformID": 3,
"encodingID": 1,
"languageID": 0x409,
"string": "FOO; BAZ",
},
{
"nameID": 2048,
"platformID": 1,
"encodingID": 0,
"languageID": 0,
"string": "FOO",
},
{
"nameID": 4096,
"platformID": 1,
"encodingID": 2,
"languageID": 0,
"string": "FOO",
},
{
"nameID": 8192,
"platformID": 1,
"encodingID": 2,
"languageID": 3,
"string": "FOO",
},
{
"nameID": 16384,
"platformID": 60,
"encodingID": 1,
"languageID": 0x409,
"string": "BAZ",
},
]
self.assertEqual(
[dict(r) for r in self.ufo.info.openTypeNameRecords], ufo_records
)
font = glyphsLib.to_glyphs([self.ufo])
self.assertEqual(font.customParameters[0].value, "1024 3 1 1033; FOO; BAZ")
self.assertEqual(font.customParameters[1].value, "2048 1 0 0; FOO")
self.assertEqual(font.customParameters[2].value, "4096 1 2 0; FOO")
self.assertEqual(font.customParameters[3].value, "8192 1 2 3; FOO")
self.assertEqual(font.customParameters[4].value, "16384 60 1 1033; BAZ")
class SetCustomParamsTestUfoLib2(SetCustomParamsTestBase, unittest.TestCase):
ufo_module = ufoLib2
class SetCustomParamsTestDefcon(SetCustomParamsTestBase, unittest.TestCase):
ufo_module = defcon
def test_ufo_filename(ufo_module):
"""Test that new-style UFO_FILENAME_CUSTOM_PARAM is written instead of
(UFO_FILENAME_KEY|FULL_FILENAME_KEY)."""
font = glyphsLib.GSFont(os.path.join(DATA, "UFOFilenameTest.glyphs"))
ds = glyphsLib.to_designspace(
font, minimize_glyphs_diffs=True, ufo_module=ufo_module
)
assert ds.sources[0].filename == "MyFontMaster.ufo"
assert ds.instances[0].filename == "../build/instance_ufos/MyFont.ufo"
assert "com.schriftgestaltung.customParameters" not in ds.instances[0].lib
font_rt = glyphsLib.to_glyphs(ds, minimize_ufo_diffs=True)
assert (
font_rt.masters[0].customParameters[UFO_FILENAME_CUSTOM_PARAM]
== "MyFontMaster.ufo"
)
assert UFO_FILENAME_KEY not in font_rt.masters[0].userData
assert (
font_rt.instances[0].customParameters[UFO_FILENAME_CUSTOM_PARAM]
== "../build/instance_ufos/MyFont.ufo"
)
assert FULL_FILENAME_KEY not in font_rt.instances[0].customParameters
ds_rt = glyphsLib.to_designspace(
font_rt, minimize_glyphs_diffs=True, ufo_module=ufo_module
)
assert ds_rt.sources[0].filename == "MyFontMaster.ufo"
assert ds_rt.instances[0].filename == "../build/instance_ufos/MyFont.ufo"
def test_ufo_filename_with_legacy(ufo_module):
"""Test that new-style UFO_FILENAME_CUSTOM_PARAM overrides legacy
(UFO_FILENAME_KEY|FULL_FILENAME_KEY)."""
font = glyphsLib.GSFont(os.path.join(DATA, "UFOFilenameTest.glyphs"))
font.masters[0].customParameters[UFO_FILENAME_CUSTOM_PARAM] = "aaa.ufo"
font.instances[0].customParameters[UFO_FILENAME_CUSTOM_PARAM] = "bbb.ufo"
ds = glyphsLib.to_designspace(
font, minimize_glyphs_diffs=True, ufo_module=ufo_module
)
assert ds.sources[0].filename == "aaa.ufo"
assert ds.instances[0].filename == "bbb.ufo"
def test_ufo_filename_with_instance_empty(ufo_module):
font = glyphsLib.GSFont(os.path.join(DATA, "UFOFilenameTest.glyphs"))
font.masters[0].customParameters[UFO_FILENAME_CUSTOM_PARAM] = "aaa.ufo"
del font.instances[0].customParameters[UFO_FILENAME_CUSTOM_PARAM]
del font.instances[0].customParameters[FULL_FILENAME_KEY]
ds = glyphsLib.to_designspace(
font, minimize_glyphs_diffs=True, ufo_module=ufo_module
)
assert ds.sources[0].filename == "aaa.ufo"
# Instance filename should be whatever the default is.
assert ds.instances[0].filename == "instance_ufos/NewFont-Regular.ufo"
def test_ufo_opentype_name_preferred_family_subfamily_name():
from glyphsLib.interpolation import apply_instance_data_to_ufo
filenames = [
"UFOInstanceParametersTestV2.glyphs",
# NOTE: In the format of version 3, the preferred family and subfamily
# names are not actually saved in custom paramters but properties.
"UFOInstanceParametersTestV3.glyphs",
]
for filename in filenames:
file = glyphsLib.GSFont(os.path.join(DATA, filename))
space = glyphsLib.to_designspace(file, minimal=True)
assert len(space.sources) == 2, filename
assert len(space.instances) == 3, filename
for instance, name in zip(space.instances, ["Thin", "Regular", "Black"]):
source = copy.deepcopy(space.sources[0])
apply_instance_data_to_ufo(source.font, instance, space)
actual = source.font.info.openTypeNamePreferredFamilyName
assert actual == "Typographic New Font", filename
actual = source.font.info.openTypeNamePreferredSubfamilyName
assert actual == f"Typographic {name}", filename
def test_ufo_opentype_name_records():
from glyphsLib.interpolation import apply_instance_data_to_ufo
filenames = [
"UFOInstanceParametersTestV2.glyphs",
"UFOInstanceParametersTestV3.glyphs",
]
for filename in filenames:
file = glyphsLib.GSFont(os.path.join(DATA, filename))
space = glyphsLib.to_designspace(file, minimal=True)
assert len(space.sources) == 2, filename
for source in space.sources:
actual = list(map(dict, source.font.info.openTypeNameRecords))
expected = [
{
"nameID": 42,
"platformID": 0,
"encodingID": 4,
"languageID": 0,
"string": "File",
},
]
assert actual == expected, filename
assert len(space.instances) == 3, filename
for instance, name in zip(space.instances, ["Thin", "Regular", "Black"]):
source = copy.deepcopy(space.sources[0])
apply_instance_data_to_ufo(source.font, instance, space)
actual = list(map(dict, source.font.info.openTypeNameRecords))
expected = [
{
"nameID": 42,
"platformID": 0,
"encodingID": 4,
"languageID": 0,
"string": "File",
},
{
"nameID": 43,
"platformID": 0,
"encodingID": 4,
"languageID": 0,
"string": f"{name} Instance",
},
]
assert actual == expected, filename
def test_ufo_opentype_os2_selection():
from glyphsLib.interpolation import apply_instance_data_to_ufo
filenames = [
"UFOInstanceParametersTestV2.glyphs",
"UFOInstanceParametersTestV3.glyphs",
]
for filename in filenames:
file = glyphsLib.GSFont(os.path.join(DATA, filename))
space = glyphsLib.to_designspace(file, minimal=True)
assert len(space.sources) == 2, filename
assert len(space.instances) == 3, filename
for instance in space.instances:
source = copy.deepcopy(space.sources[0])
apply_instance_data_to_ufo(source.font, instance, space)
actual = source.font.info.openTypeOS2Selection
assert actual == [7, 8], filename
def test_mutiple_params(ufo_module):
"""Test multiple custom parameters with the same name on GSFont."""
font = GSFont(os.path.join(DATA, "CustomPrametersTest.glyphs"))
assert len(font.customParameters) == 3
assert all("Virtual Master" == c.name for c in font.customParameters)
assert font.customParameters[0].value == [{"Axis": "Spacing", "Location": 0}]
assert font.customParameters[1].value == [{"Axis": "Spacing", "Location": -100}]
assert font.customParameters[2].value == [{"Axis": "Spacing", "Location": 100}]
instance = font.instances[0]
assert len(instance.customParameters) == 2
assert all("Replace Feature" == c.name for c in instance.customParameters)
assert instance.customParameters[0].value == "ccmp;sub space by space;"
assert instance.customParameters[1].value == "liga;sub space space by space;"
|
f42c32a17f474f025c0e22f3db84f63d9ffcdc13
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/ios/chrome/browser/ui/default_promo/DEPS
|
5b41351ad9ca6dc2337311bb6d1565215fff1fa2
|
[
"BSD-3-Clause"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 154
|
DEPS
|
include_rules = [
"+ios/chrome/browser/ui/infobars",
"+ios/chrome/browser/ui/policy/user_policy_util.h",
"+ios/chrome/browser/ui/promos_manager",
]
|
|
7d81e6302418aa07d6175d9a25e253d773b9e63d
|
2d0bada349646b801a69c542407279cc7bc25013
|
/src/vai_quantizer/vai_q_pytorch/nndct_shared/utils/parameters.py
|
d9b3e486b8420aa0887d226825e6975cea12af1b
|
[
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSD-3-Clause-Open-MPI",
"LicenseRef-scancode-free-unknown",
"Libtool-exception",
"GCC-exception-3.1",
"LicenseRef-scancode-mit-old-style",
"OFL-1.1",
"JSON",
"LGPL-2.1-only",
"LGPL-2.0-or-later",
"ICU",
"LicenseRef-scancode-other-permissive",
"GPL-2.0-or-later",
"GPL-3.0-only",
"LicenseRef-scancode-issl-2018",
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-unicode",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-3.0-or-later",
"Zlib",
"BSD-Source-Code",
"ClArtistic",
"LicenseRef-scancode-unknown-license-reference",
"ISC",
"NCSA",
"LicenseRef-scancode-proprietary-license",
"GPL-2.0-only",
"CC-BY-4.0",
"FSFULLR",
"Minpack",
"Unlicense",
"BSL-1.0",
"NAIST-2003",
"LicenseRef-scancode-protobuf",
"LicenseRef-scancode-public-domain",
"Libpng",
"Spencer-94",
"Intel",
"GPL-1.0-or-later",
"MPL-2.0"
] |
permissive
|
Xilinx/Vitis-AI
|
31e664f7adff0958bb7d149883ab9c231efb3541
|
f74ddc6ed086ba949b791626638717e21505dba2
|
refs/heads/master
| 2023-08-31T02:44:51.029166
| 2023-07-27T06:50:28
| 2023-07-27T06:50:28
| 215,649,623
| 1,283
| 683
|
Apache-2.0
| 2023-08-17T09:24:55
| 2019-10-16T21:41:54
|
Python
|
UTF-8
|
Python
| false
| false
| 6,512
|
py
|
parameters.py
|
#
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from nndct_shared.base import NNDCT_OP
def get_batchnorm_params(param_list, param_getter, center=True, scale=True):
#order: gamma,beta,mean,var
if all(param_getter(p) is not None for p in param_list):
param_shape = param_getter(param_list[-1]).shape
bn_params = []
if center and scale:
bn_params = [param_getter(p) for p in param_list]
elif center:
bn_params = [np.ones(param_shape),
param_getter(param_list[0])
] + [param_getter(p) for p in param_list[-2:]]
elif scale:
bn_params = [param_getter(node.op.params[0]),
np.zeros(param_shape)
] + [param_getter(p) for p in param_list[-2:]]
if len(bn_params) == 2:
#no mean and var
bn_params.extend([np.zeros(param_shape), np.ones(param_shape)])
else:
bn_params = [None] * 4
assert len(
bn_params
) == 4, "batch norm should has 4 variables: gamma, beta, mean, var, please check!"
return bn_params
def get_batchnorm_param_names(param_list, center=True, scale=True):
if center and scale:
assert len(
param_list) == 4, "expect 4 parameters names, got " + str(param_list)
return {
'gamma': param_list[0],
'beta': param_list[1],
'mean': param_list[2],
'var': param_list[3]
}
elif center:
assert len(
param_list) == 3, "expect 3 parameters names, got " + str(param_list)
return {'beta': param_list[0], 'mean': param_list[1], 'var': param_list[2]}
elif scale:
assert len(
param_list) == 3, "expect 3 parameters names, got " + str(param_list)
return {'gamma': param_list[0], 'mean': param_list[1], 'var': param_list[2]}
def get_in_out_channel_idx(ndim, optype, data_formats):
#TODO: same shape with different format, is this possible?
if ndim == 1:
return 0, 0
if optype == NNDCT_OP.CONV2D:
if data_formats[optype] == 'HWIO':
in_idx, out_idx = 2, 3
elif data_formats[optype] == 'OIHW':
in_idx, out_idx = 1, 0
else:
raise Exception("data format of conv2d kernel {} is not supported".format(
data_formats[NNDCT_OP.CONV2D]))
elif optype == NNDCT_OP.DEPTHWISE_CONV2D:
if data_formats[optype] == 'HWIO':
in_idx, out_idx = 2, 2
elif data_formats[optype] == 'OIHW':
in_idx, out_idx = 1, 1
else:
raise Exception(
"data format of depthwise_conv2d kernel {} is not supported".format(
data_formats[NNDCT_OP.CONV2D]))
elif optype in [NNDCT_OP.DENSE, NNDCT_OP.BASIC_LSTM]:
if data_formats[optype] == 'IO':
in_idx, out_idx = 0, 1
elif data_formats[optype] == 'OI':
in_idx, out_idx = 1, 0
else:
raise Exception("data format of 2 dim mat {} is not supported".format(
data_formats[NNDCT_OP.CONV2D]))
else:
raise Exception("unexpected optype: " + str(optype))
return in_idx, out_idx
def get_tensor_out_dim(tensor, optype, data_formats):
_, out_idx = get_in_out_channel_idx(tensor.ndim, optype, data_formats)
return tensor.shape[out_idx]
def get_tensor_in_dim(tensor, optype, data_formats):
in_idx, _ = get_in_out_channel_idx(tensor.ndim, optype, data_formats)
return tensor.shape[in_idx]
def delete_in_out_channel_indexs(data,
in_idx=None,
out_idx=None,
in_channel_array=None,
out_channel_array=None):
if in_idx is not None and in_channel_array is not None and not (
in_idx == out_idx and out_channel_array is not None):
data = np.delete(data, in_channel_array, axis=in_idx)
if out_idx is not None and out_channel_array is not None:
data = np.delete(data, out_channel_array, axis=out_idx)
return data
def insert_in_out_channel_indexs(data,
in_idx=None,
out_idx=None,
in_channel_array=None,
out_channel_array=None):
if in_idx is not None and in_channel_array is not None and not (
in_idx == out_idx and out_channel_array is not None):
for pos in sorted(in_channel_array.tolist()):
data = np.insert(data, pos, 0, axis=in_idx)
if out_idx is not None and out_channel_array is not None:
for pos in sorted(out_channel_array.tolist()):
data = np.insert(data, pos, 0, axis=out_idx)
return data
def expand_in_out_channel_indexs(data,
in_idx=None,
out_idx=None,
in_channel_array=None,
out_channel_array=None):
# assert len(data.shape) in [1,2,4], 'unexpected param data shape'
in_dim = None
out_dim = None
if in_channel_array is not None and in_idx is not None and not (
in_idx == out_idx and out_channel_array is not None):
in_dim = data.shape[in_idx] + len(in_channel_array)
if out_idx is not None and out_channel_array is not None:
out_dim = data.shape[out_idx] + len(out_channel_array)
assert in_dim is not None or out_dim is not None
expand_shape = [0] * len(data.shape)
expand_idxs = [0] * len(data.shape)
for idx, dim in enumerate(data.shape):
if in_dim is not None and idx == in_idx:
expand_shape[idx] = in_dim
idx_in_channel = sorted(
np.array(list(set(range(in_dim)) - set(in_channel_array))))
expand_idxs[idx] = idx_in_channel
elif out_dim is not None and idx == out_idx:
expand_shape[idx] = out_dim
idx_out_channel = sorted(
np.array(list(set(range(out_dim)) - set(out_channel_array))))
expand_idxs[idx] = idx_out_channel
else:
expand_shape[idx] = dim
expand_idxs[idx] = np.array(range(dim))
expand_data = np.zeros(expand_shape, dtype=data.dtype)
expand_data[np.ix_(*expand_idxs)] = data
return expand_data
|
d944e449ce39567cb55446337d76f39698be3e7f
|
eb7afa613940f5a3f202352a94dd996edcb6bed5
|
/boto3_type_annotations/boto3_type_annotations/lex_models/client.py
|
908801b11d3cf7e1085f9253bad024cd404ed882
|
[
"MIT"
] |
permissive
|
alliefitter/boto3_type_annotations
|
e4da614e27a1d2ad3c9c653c50b8e30108180da5
|
2a88aa562b1aee6e8a6cc30402980884b3707fbb
|
refs/heads/master
| 2020-04-05T22:05:12.689913
| 2019-11-28T03:32:13
| 2019-11-28T03:32:13
| 157,244,330
| 131
| 11
|
MIT
| 2023-04-21T17:17:03
| 2018-11-12T16:38:57
|
Python
|
UTF-8
|
Python
| false
| false
| 4,817
|
py
|
client.py
|
from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from typing import Union
from botocore.paginate import Paginator
from botocore.waiter import Waiter
from typing import List
class Client(BaseClient):
def can_paginate(self, operation_name: str = None):
pass
def create_bot_version(self, name: str, checksum: str = None) -> Dict:
pass
def create_intent_version(self, name: str, checksum: str = None) -> Dict:
pass
def create_slot_type_version(self, name: str, checksum: str = None) -> Dict:
pass
def delete_bot(self, name: str):
pass
def delete_bot_alias(self, name: str, botName: str):
pass
def delete_bot_channel_association(self, name: str, botName: str, botAlias: str):
pass
def delete_bot_version(self, name: str, version: str):
pass
def delete_intent(self, name: str):
pass
def delete_intent_version(self, name: str, version: str):
pass
def delete_slot_type(self, name: str):
pass
def delete_slot_type_version(self, name: str, version: str):
pass
def delete_utterances(self, botName: str, userId: str):
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
pass
def get_bot(self, name: str, versionOrAlias: str) -> Dict:
pass
def get_bot_alias(self, name: str, botName: str) -> Dict:
pass
def get_bot_aliases(self, botName: str, nextToken: str = None, maxResults: int = None, nameContains: str = None) -> Dict:
pass
def get_bot_channel_association(self, name: str, botName: str, botAlias: str) -> Dict:
pass
def get_bot_channel_associations(self, botName: str, botAlias: str, nextToken: str = None, maxResults: int = None, nameContains: str = None) -> Dict:
pass
def get_bot_versions(self, name: str, nextToken: str = None, maxResults: int = None) -> Dict:
pass
def get_bots(self, nextToken: str = None, maxResults: int = None, nameContains: str = None) -> Dict:
pass
def get_builtin_intent(self, signature: str) -> Dict:
pass
def get_builtin_intents(self, locale: str = None, signatureContains: str = None, nextToken: str = None, maxResults: int = None) -> Dict:
pass
def get_builtin_slot_types(self, locale: str = None, signatureContains: str = None, nextToken: str = None, maxResults: int = None) -> Dict:
pass
def get_export(self, name: str, version: str, resourceType: str, exportType: str) -> Dict:
pass
def get_import(self, importId: str) -> Dict:
pass
def get_intent(self, name: str, version: str) -> Dict:
pass
def get_intent_versions(self, name: str, nextToken: str = None, maxResults: int = None) -> Dict:
pass
def get_intents(self, nextToken: str = None, maxResults: int = None, nameContains: str = None) -> Dict:
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
pass
def get_slot_type(self, name: str, version: str) -> Dict:
pass
def get_slot_type_versions(self, name: str, nextToken: str = None, maxResults: int = None) -> Dict:
pass
def get_slot_types(self, nextToken: str = None, maxResults: int = None, nameContains: str = None) -> Dict:
pass
def get_utterances_view(self, botName: str, botVersions: List, statusType: str) -> Dict:
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
pass
def put_bot(self, name: str, locale: str, childDirected: bool, description: str = None, intents: List = None, clarificationPrompt: Dict = None, abortStatement: Dict = None, idleSessionTTLInSeconds: int = None, voiceId: str = None, checksum: str = None, processBehavior: str = None, createVersion: bool = None) -> Dict:
pass
def put_bot_alias(self, name: str, botVersion: str, botName: str, description: str = None, checksum: str = None) -> Dict:
pass
def put_intent(self, name: str, description: str = None, slots: List = None, sampleUtterances: List = None, confirmationPrompt: Dict = None, rejectionStatement: Dict = None, followUpPrompt: Dict = None, conclusionStatement: Dict = None, dialogCodeHook: Dict = None, fulfillmentActivity: Dict = None, parentIntentSignature: str = None, checksum: str = None, createVersion: bool = None) -> Dict:
pass
def put_slot_type(self, name: str, description: str = None, enumerationValues: List = None, checksum: str = None, valueSelectionStrategy: str = None, createVersion: bool = None) -> Dict:
pass
def start_import(self, payload: bytes, resourceType: str, mergeStrategy: str) -> Dict:
pass
|
45a4de47a9b3cde94f040d3f6a9e9de502fcc26f
|
0b134572e3ac3903ebb44df6d4138cbab9d3327c
|
/app/grandchallenge/retina_api/mixins.py
|
3e720390102348df0c442d1761a2f657ed1bfb27
|
[
"Apache-2.0"
] |
permissive
|
comic/grand-challenge.org
|
660de3bafaf8f4560317f1dfd9ae9585ec272896
|
dac25f93b395974b32ba2a8a5f9e19b84b49e09d
|
refs/heads/main
| 2023-09-01T15:57:14.790244
| 2023-08-31T14:23:04
| 2023-08-31T14:23:04
| 4,557,968
| 135
| 53
|
Apache-2.0
| 2023-09-14T13:41:03
| 2012-06-05T09:26:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,522
|
py
|
mixins.py
|
from django.conf import settings
from django.contrib.auth.mixins import AccessMixin
from rest_framework import permissions
def is_in_retina_graders_group(user):
"""
Checks if the user is in the retina graders group
:param user: Django User model
:return: true/false
"""
return user.groups.filter(name=settings.RETINA_GRADERS_GROUP_NAME).exists()
def is_in_retina_admins_group(user):
"""
Checks if the user is in the retina admins group
:param user: Django User model
:return: true/false
"""
return user.groups.filter(name=settings.RETINA_ADMINS_GROUP_NAME).exists()
def is_in_retina_group(user):
"""
Checks if the user is in the retina graders or retina admins group
:param user: Django User model
:return: true/false
"""
return is_in_retina_graders_group(user) or is_in_retina_admins_group(user)
class RetinaAPIPermission(permissions.BasePermission):
"""
Permission class for APIViews in retina app.
Checks if user is in retina graders or admins group
"""
def has_permission(self, request, view):
return is_in_retina_group(request.user)
class RetinaAPIPermissionMixin(AccessMixin):
"""
Mixin for non APIViews in retina app.
Verify that the current user is in the retina_graders group.
"""
def dispatch(self, request, *args, **kwargs):
if not is_in_retina_group(request.user):
return self.handle_no_permission()
return super().dispatch(request, *args, **kwargs)
|
fae7436de9869df0f9451c3ba19d168f49d4b459
|
c058f51b99f91faebf27183b2b579e9f96e0d8f5
|
/test/sampling/pathwise/features/test_maps.py
|
842d2164c9c65daed6ed5ad58853d820a7c05423
|
[
"MIT"
] |
permissive
|
pytorch/botorch
|
255d62f698cc615c750e9343c278a63c7e96a586
|
4cc5ed59b2e8a9c780f786830c548e05cc74d53c
|
refs/heads/main
| 2023-08-22T15:23:51.071048
| 2023-08-22T05:30:38
| 2023-08-22T05:30:38
| 142,940,093
| 2,891
| 373
|
MIT
| 2023-09-13T00:16:13
| 2018-07-30T23:59:57
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,783
|
py
|
test_maps.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from unittest.mock import MagicMock, patch
import torch
from botorch.sampling.pathwise.features import KernelEvaluationMap, KernelFeatureMap
from botorch.utils.testing import BotorchTestCase
from gpytorch.kernels import MaternKernel
from torch import Size
class TestFeatureMaps(BotorchTestCase):
def test_kernel_evaluation_map(self):
kernel = MaternKernel(nu=2.5, ard_num_dims=2, batch_shape=Size([2]))
kernel.to(device=self.device)
with torch.random.fork_rng():
torch.manual_seed(0)
kernel.lengthscale = 0.1 + 0.3 * torch.rand_like(kernel.lengthscale)
with self.assertRaisesRegex(RuntimeError, "Shape mismatch"):
KernelEvaluationMap(kernel=kernel, points=torch.rand(4, 3, 2))
for dtype in (torch.float32, torch.float64):
kernel.to(dtype=dtype)
X0, X1 = torch.rand(5, 2, dtype=dtype, device=self.device).split([2, 3])
kernel_map = KernelEvaluationMap(kernel=kernel, points=X1)
self.assertEqual(kernel_map.batch_shape, kernel.batch_shape)
self.assertEqual(kernel_map.num_outputs, X1.shape[-1])
self.assertTrue(kernel_map(X0).to_dense().equal(kernel(X0, X1).to_dense()))
with patch.object(
kernel_map, "output_transform", new=lambda z: torch.concat([z, z], dim=-1)
):
self.assertEqual(kernel_map.num_outputs, 2 * X1.shape[-1])
def test_kernel_feature_map(self):
d = 2
m = 3
weight = torch.rand(m, d, device=self.device)
bias = torch.rand(m, device=self.device)
kernel = MaternKernel(nu=2.5, batch_shape=Size([3])).to(self.device)
feature_map = KernelFeatureMap(
kernel=kernel,
weight=weight,
bias=bias,
input_transform=MagicMock(side_effect=lambda x: x),
output_transform=MagicMock(side_effect=lambda z: z.exp()),
)
X = torch.rand(2, d, device=self.device)
features = feature_map(X)
feature_map.input_transform.assert_called_once_with(X)
feature_map.output_transform.assert_called_once()
self.assertTrue((X @ weight.transpose(-2, -1) + bias).exp().equal(features))
# Test batch_shape and num_outputs
self.assertIs(feature_map.batch_shape, kernel.batch_shape)
self.assertEqual(feature_map.num_outputs, weight.shape[-2])
with patch.object(feature_map, "output_transform", new=None):
self.assertEqual(feature_map.num_outputs, weight.shape[-2])
|
7260a6ee524ff3143a05adea7c3cbecd9186e6dc
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/ai/modelscope/modelscope/models/nlp/mglm/arguments.py
|
4fa33c65aabd3f77791e65ef0e8bc2934c2e7a73
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 28,115
|
py
|
arguments.py
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""argparser configuration"""
import argparse
import os
import deepspeed
import json
import torch
from .utils import get_hostname
def add_model_config_args(parser):
"""Model arguments"""
group = parser.add_argument_group('model', 'model configuration')
group.add_argument(
'--transformer-xl',
action='store_true',
help='use transformer-xl for training')
group.add_argument(
'--pretrained-bert',
action='store_true',
help='use a pretrained bert-large-uncased model instead'
'of initializing from scratch. See '
'--tokenizer-model-type to specify which pretrained '
'BERT model to use')
group.add_argument(
'--encoder-decoder',
action='store_true',
help='use the encoder-decoder architecture for blocklm')
group.add_argument(
'--attention-dropout',
type=float,
default=0.1,
help='dropout probability for attention weights')
group.add_argument(
'--num-attention-heads',
type=int,
default=16,
help='num of transformer attention heads')
group.add_argument(
'--hidden-size', type=int, default=1024, help='tansformer hidden size')
group.add_argument(
'--intermediate-size',
type=int,
default=None,
help='transformer embedding dimension for FFN'
'set to 4*`--hidden-size` if it is None')
group.add_argument(
'--num-layers', type=int, default=24, help='num decoder layers')
group.add_argument(
'--layernorm-epsilon',
type=float,
default=1e-5,
help='layer norm epsilon')
group.add_argument(
'--hidden-dropout',
type=float,
default=0.1,
help='dropout probability for hidden state transformer')
group.add_argument(
'--output-dropout',
type=float,
default=0.1,
help='dropout probability for pooled output')
group.add_argument(
'--max-position-embeddings',
type=int,
default=512,
help='maximum number of position embeddings to use')
group.add_argument(
'--vocab-size',
type=int,
default=250112,
help='vocab size to use for non-character-level '
'tokenization. This value will only be used when '
'creating a tokenizer')
group.add_argument(
'--deep-init',
action='store_true',
help='initialize bert model similar to gpt2 model.'
'scales initialization of projection layers by a '
'factor of 1/sqrt(2N). Necessary to train bert '
'models larger than BERT-Large.')
group.add_argument(
'--make-vocab-size-divisible-by',
type=int,
default=128,
help='Pad the vocab size to be divisible by this value.'
'This is added for computational efficieny reasons.')
group.add_argument(
'--cpu-optimizer', action='store_true', help='Run optimizer on CPU')
group.add_argument(
'--cpu_torch_adam',
action='store_true',
help='Use Torch Adam as optimizer on CPU.')
return parser
def add_fp16_config_args(parser):
"""Mixed precision arguments."""
group = parser.add_argument_group('fp16', 'fp16 configurations')
group.add_argument(
'--fp16', action='store_true', help='Run model in fp16 mode')
group.add_argument(
'--fp32-embedding', action='store_true', help='embedding in fp32')
group.add_argument(
'--fp32-layernorm', action='store_true', help='layer norm in fp32')
group.add_argument(
'--fp32-tokentypes',
action='store_true',
help='embedding token types in fp32')
group.add_argument(
'--fp32-allreduce', action='store_true', help='all-reduce in fp32')
group.add_argument(
'--hysteresis',
type=int,
default=2,
help='hysteresis for dynamic loss scaling')
group.add_argument(
'--loss-scale',
type=float,
default=None,
help='Static loss scaling, positive power of 2 '
'values can improve fp16 convergence. If None, dynamic'
'loss scaling is used.')
group.add_argument(
'--loss-scale-window',
type=float,
default=1000,
help='Window over which to raise/lower dynamic scale')
group.add_argument(
'--min-scale',
type=float,
default=1,
help='Minimum loss scale for dynamic loss scale')
group.add_argument('--attention-scale', type=float, default=1.0)
return parser
def add_training_args(parser):
"""Training arguments."""
group = parser.add_argument_group('train', 'training configurations')
group.add_argument(
'--experiment-name',
type=str,
default='gpt-345M',
help='The experiment name for summary and checkpoint')
group.add_argument(
'--batch-size', type=int, default=4, help='Data Loader batch size')
group.add_argument(
'--gradient-accumulation-steps',
type=int,
default=1,
help='Data Loader batch size')
group.add_argument(
'--weight-decay',
type=float,
default=0.01,
help='weight decay coefficient for L2 regularization')
group.add_argument(
'--checkpoint-activations',
action='store_true',
help='checkpoint activation to allow for training '
'with larger models and sequences')
group.add_argument(
'--checkpoint-num-layers',
type=int,
default=1,
help='chunk size (number of layers) for checkpointing')
group.add_argument(
'--deepspeed-activation-checkpointing',
action='store_true',
help='uses activation checkpointing from deepspeed')
group.add_argument(
'--epochs',
type=int,
default=None,
help='Number of finetunning epochs. Zero results in evaluation only.')
group.add_argument(
'--clip-grad', type=float, default=1.0, help='gradient clipping')
group.add_argument(
'--train-iters',
type=int,
default=0,
help='total number of iterations to train over all training runs')
group.add_argument('--label-smoothing', type=float, default=0.0)
group.add_argument(
'--log-interval', type=int, default=100, help='report interval')
group.add_argument(
'--summary-dir',
type=str,
default='',
help='The directory to store the summary')
group.add_argument('--seed', type=int, default=1234, help='random seed')
# Batch producer arguments
group.add_argument(
'--reset-position-ids',
action='store_true',
help='Reset posistion ids after end-of-document token.')
group.add_argument(
'--reset-attention-mask',
action='store_true',
help='Reset self attention maske after '
'end-of-document token.')
# Learning rate.
group.add_argument(
'--lr-decay-iters',
type=int,
default=None,
help='number of iterations to decay LR over,'
' If None defaults to `--train-iters`*`--epochs`')
group.add_argument(
'--lr-decay-style',
type=str,
default='linear',
choices=['constant', 'linear', 'cosine', 'exponential'],
help='learning rate decay function')
group.add_argument('--lr-decay-ratio', type=float, default=0.1)
group.add_argument(
'--lr', type=float, default=1.0e-4, help='initial learning rate')
group.add_argument(
'--warmup',
type=float,
default=0.01,
help='percentage of data to warmup on (.01 = 1% of all '
'training iters). Default 0.01')
group.add_argument(
'--switch-linear',
action='store_true',
help='Switch to linear decay for cosine decay')
# model checkpointing
group.add_argument(
'--save',
type=str,
default=None,
help='Output directory to save checkpoints to.')
group.add_argument('--new-save-directory', action='store_true')
group.add_argument(
'--save-epoch',
type=int,
default=1,
help='number of epochs between saves')
group.add_argument(
'--save-interval',
type=int,
default=5000,
help='number of iterations between saves')
group.add_argument(
'--no-save-optim',
action='store_true',
help='Do not save current optimizer.')
group.add_argument(
'--no-save-rng',
action='store_true',
help='Do not save current rng state.')
group.add_argument(
'--load',
type=str,
default=None,
help='Path to a directory containing a model checkpoint.')
group.add_argument(
'--no-load-optim',
action='store_true',
help='Do not load optimizer when loading checkpoint.')
group.add_argument(
'--no-load-rng',
action='store_true',
help='Do not load rng state when loading checkpoint.')
group.add_argument(
'--no-load-lr-scheduler',
action='store_true',
help='Do not load lr scheduler when loading checkpoint.')
group.add_argument(
'--no-deepspeed-load',
action='store_true',
help='Not use deepspeed when loading checkpoint')
group.add_argument(
'--finetune',
action='store_true',
help='Load model for finetuning. Do not load optimizer '
'or rng state from checkpoint and set iteration to 0. '
'Assumed when loading a release checkpoint.')
group.add_argument(
'--resume-dataloader',
action='store_true',
help='Resume the dataloader when resuming training. '
'Does not apply to tfrecords dataloader, try resuming'
'with a different seed in this case.')
# distributed training args
group.add_argument(
'--distributed-backend',
default='nccl',
help=
'which backend to use for distributed training. One of [gloo, nccl]',
choices=['nccl', 'gloo'])
group.add_argument(
'--DDP-impl',
default='torch',
choices=['local', 'torch', 'none'],
help='which DistributedDataParallel implementation to use.')
group.add_argument(
'--local_rank',
type=int,
default=None,
help='local rank passed from distributed launcher')
# BlockLM training args
group.add_argument(
'--block-lm',
action='store_true',
help='whether use the BlockLM pre-training')
group.add_argument(
'--masked-lm',
action='store_true',
help='whether to use the mlm objective')
group.add_argument('--bert-prob', type=float, default=0.5)
group.add_argument('--gpt-infill-prob', type=float, default=0.5)
group.add_argument('--gpt-min-ratio', type=float, default=0.5)
group.add_argument('--gap-sentence-prob', type=float, default=0.0)
group.add_argument('--gap-sentence-ratio', type=float, default=0.15)
group.add_argument('--avg-block-length', type=int, default=3)
group.add_argument('--short-seq-prob', type=float, default=0.0)
group.add_argument('--single-span-prob', type=float, default=0.0)
group.add_argument(
'--task-mask',
action='store_true',
help='Use different mask for generation and blank filling')
group.add_argument(
'--no-shuffle-block',
action='store_true',
help='not shuffle the blocks when filling the blank')
group.add_argument(
'--no-block-position',
action='store_true',
help='Use (rough) absolute positions instead of block positions')
group.add_argument(
'--sentinel-token',
action='store_true',
help='Use sentinel (mask) tokens to replace 2d position encoding')
group.add_argument('--block-mask-prob', type=float, default=0.0)
group.add_argument('--context-mask-ratio', type=float, default=0.0)
group.add_argument(
'--random-position',
action='store_true',
help='Use random start position to cover all the position embeddings')
return parser
def add_evaluation_args(parser):
"""Evaluation arguments."""
group = parser.add_argument_group('validation',
'validation configurations')
group.add_argument(
'--eval-batch-size',
type=int,
default=None,
help='Data Loader batch size for evaluation datasets.'
'Defaults to `--batch-size`')
group.add_argument(
'--eval-iters',
type=int,
default=100,
help='number of iterations to run for evaluation'
'validation/test for')
group.add_argument(
'--eval-interval',
type=int,
default=1000,
help='interval between running evaluation on validation set')
group.add_argument(
'--eval-epoch',
type=int,
default=1,
help='epoch between running evaluation on validation set')
group.add_argument(
'--eval-seq-length',
type=int,
default=None,
help='Maximum sequence length to process for '
'evaluation. Defaults to `--seq-length`')
group.add_argument(
'--eval-max-preds-per-seq',
type=int,
default=None,
help='Maximum number of predictions to use for '
'evaluation. Defaults to '
'math.ceil(`--eval-seq-length`*.15/10)*10')
group.add_argument('--overlapping-eval', type=int, default=32)
return parser
def add_text_generate_args(parser):
"""Text generate arguments."""
group = parser.add_argument_group('Text generation', 'configurations')
group.add_argument('--temperature', type=float, default=1.0)
group.add_argument('--top_p', type=float, default=0.0)
group.add_argument('--top_k', type=int, default=0)
group.add_argument('--out-seq-length', type=int, default=256)
group.add_argument('--num-beams', type=int, default=1)
group.add_argument('--length-penalty', type=float, default=0.0)
group.add_argument('--no-repeat-ngram-size', type=int, default=0)
group.add_argument('--min-tgt-length', type=int, default=0)
group.add_argument('--select-topk', action='store_true')
group.add_argument('--blank-maskratio', type=float, default=0.1)
return parser
def add_data_args(parser):
"""Train/valid/test data arguments."""
group = parser.add_argument_group('data', 'data configurations')
group.add_argument(
'--model-parallel-size',
type=int,
default=1,
help='size of the model parallel.')
group.add_argument(
'--shuffle',
action='store_true',
help='Shuffle data. Shuffling is deterministic '
'based on seed and current epoch.')
group.add_argument('--filter-english', action='store_true')
group.add_argument(
'--train-data',
nargs='+',
default=None,
help='Whitespace separated filenames or corpora names '
'for training.')
group.add_argument(
'--valid-data',
nargs='*',
default=None,
help="""Filename for validation data.""")
group.add_argument(
'--test-data',
nargs='*',
default=None,
help="""Filename for testing""")
group.add_argument(
'--data-dir',
type=str,
default=None,
help='The data path to all the data files')
group.add_argument(
'--input-data-sizes-file',
type=str,
default='sizes.txt',
help='the filename containing all the shards sizes')
group.add_argument(
'--delim', default=',', help='delimiter used to parse csv data files')
group.add_argument(
'--text-key',
default='sentence',
help='key to use to extract text from json/csv')
group.add_argument(
'--eval-text-key',
default=None,
help='key to use to extract text from '
'json/csv evaluation datasets')
group.add_argument(
'--split',
default='1000,1,1',
help='comma-separated list of proportions for training,'
' validation, and test split')
group.add_argument(
'--no-lazy-loader',
action='store_true',
help='whether to lazy read the data set')
group.add_argument('--half-lazy-loader', action='store_true')
group.add_argument(
'--loader-scatter',
type=int,
default=None,
help='Number of scatters to use for dataloaders')
group.add_argument(
'--loose-json',
action='store_true',
help='Use loose json (one json-formatted string per '
'newline), instead of tight json (data file is one '
'json string)')
group.add_argument(
'--presplit-sentences',
action='store_true',
help='Dataset content consists of documents where '
'each document consists of newline separated sentences')
group.add_argument(
'--num-workers',
type=int,
default=2,
help="""Number of workers to use for dataloading""")
group.add_argument(
'--tokenizer-model-type',
type=str,
default=None,
help="Model type to use for sentencepiece tokenization \
(one of ['bpe', 'char', 'unigram', 'word']) or \
bert vocab to use for BertWordPieceTokenizer (one of \
['bert-large-uncased', 'bert-large-cased', etc.])")
group.add_argument(
'--tokenizer-path',
type=str,
default='tokenizer.model',
help='path used to save/load sentencepiece tokenization '
'models')
group.add_argument(
'--tokenizer-type',
type=str,
default='BertWordPieceTokenizer',
choices=[
'CharacterLevelTokenizer', 'SentencePieceTokenizer',
'BertWordPieceTokenizer', 'GPT2BPETokenizer', 'ChineseSPTokenizer'
],
help='what type of tokenizer to use')
group.add_argument('--no-pre-tokenize', action='store_true')
group.add_argument(
'--cache-dir',
default=None,
type=str,
help='Where to store pre-trained BERT downloads')
group.add_argument(
'--use-tfrecords',
action='store_true',
help='load `--train-data`, `--valid-data`, '
'`--test-data` from BERT tf records instead of '
'normal data pipeline')
group.add_argument(
'--seq-length',
type=int,
default=512,
help='Maximum sequence length to process')
group.add_argument(
'--mem-length',
type=int,
default=0,
help='The memory length to preserve')
group.add_argument(
'--max-preds-per-seq',
type=int,
default=None,
help='Maximum number of predictions to use per sequence.'
'Defaults to math.ceil(`--seq-length`*.15/10)*10.'
'MUST BE SPECIFIED IF `--use-tfrecords` is True.')
group.add_argument('--non-sentence-start', type=float, default=0.0)
group.add_argument(
'--sample-one-document',
action='store_true',
help='only sample one document in one sample')
group.add_argument(
'--load-splits',
type=str,
default=None,
help='The path to load split indices from')
group.add_argument(
'--save-splits',
type=str,
default=None,
help='The path to save split indices to')
group.add_argument(
'--save-test-data',
type=str,
default=None,
help='The path to save the test data')
group.add_argument(
'--multi-task-data',
nargs='*',
default=None,
help='Downsteam task names for multi-task pre-training')
group.add_argument(
'--multi-task-ratio',
type=float,
default=0.0,
help='Ratio for multi-task pre-training')
group.add_argument('--multi-seq-length', type=int, default=None)
group.add_argument('--multi-batch-size', type=int, default=None)
return parser
def add_finetune_config_args(parser):
group = parser.add_argument_group('finetune', 'finetune configurations')
group.add_argument('--task', type=str, help='Task name.')
group.add_argument(
'--load-pretrained',
type=str,
help='Load pretrained model',
default=None)
group.add_argument(
'--pool-token',
type=str,
choices=['start', 'pad', 'cls'],
help='The token to pool the sequence representation',
default='cls')
group.add_argument(
'--cloze-eval',
action='store_true',
help='Evaluation dataset with cloze task')
group.add_argument(
'--multi-token',
action='store_true',
help='Use multi token for cloze evaluation')
group.add_argument(
'--segment-length',
type=int,
default=0,
help='The maximum segment length for cloze evaluation')
group.add_argument(
'--loss-func',
type=str,
choices=['cross_entropy', 'hinge', 'generative', 'mix'],
default='cross_entropy')
group.add_argument('--block-lm-ratio', type=float, default=0.0)
group.add_argument(
'--adapet',
action='store_true',
help='Use the decoupled cross entropy loss in AdaPET')
group.add_argument('--pattern-id', type=int, default=0)
group.add_argument(
'--fast-decode',
action='store_true',
help=
'Fast decode for multi-token cloze. Can only be used without checkpoint activation.'
)
group.add_argument('--few-superglue', action='store_true')
group.add_argument(
'--eval-valid',
action='store_true',
help='Whether evaluate on the valid set')
group.add_argument('--validation-metric', type=str, default=None)
group.add_argument(
'--unidirectional',
action='store_true',
help='Use the left to right language model')
group.add_argument('--src-seq-length', type=int, default=None)
group.add_argument('--tgt-seq-length', type=int, default=None)
group.add_argument('--adam-beta1', type=float, default=0.9)
group.add_argument('--adam-beta2', type=float, default=0.999)
group.add_argument('--adam-eps', type=float, default=1e-8)
group.add_argument(
'--optimizer', type=str, choices=['adam', 'adafactor'], default='adam')
group.add_argument('--wsc-negative', action='store_true')
group.add_argument('--overwrite', action='store_true')
group.add_argument('--no-validation', action='store_true')
# Continuous prompt arguments
group.add_argument(
'--continuous-prompt',
action='store_true',
help='Use continuous prompt for PET')
group.add_argument('--num-prompt-tokens', type=int, default=0)
group.add_argument(
'--prompt-func', default='lstm', choices=['lstm', 'mlp', 'none'])
group.add_argument(
'--freeze-transformer', action='store_true', default=False)
group.add_argument('--tune-prefix-layers', type=int, default=None)
group.add_argument('--prefix-prompt', type=int, default=0)
group.add_argument('--prompt-init', action='store_true', default=False)
return parser
def get_args():
"""Parse all the args."""
parser = argparse.ArgumentParser(description='PyTorch BERT Model')
parser = add_model_config_args(parser)
parser = add_fp16_config_args(parser)
parser = add_training_args(parser)
parser = add_evaluation_args(parser)
parser = add_text_generate_args(parser)
parser = add_data_args(parser)
parser = add_finetune_config_args(parser)
# Include DeepSpeed configuration arguments
parser = deepspeed.add_config_arguments(parser)
args = parser.parse_args(args=[])
if not args.train_data and not args.data_dir:
print('WARNING: No training data specified')
args.cuda = torch.cuda.is_available()
args.rank = int(os.getenv('RANK', '0'))
args.world_size = int(os.getenv('WORLD_SIZE', '1'))
if hasattr(args, 'deepspeed_mpi') and args.deepspeed_mpi:
mpi_define_env(args)
elif os.getenv('OMPI_COMM_WORLD_LOCAL_RANK'):
# We are using (OpenMPI) mpirun for launching distributed data parallel processes
local_rank = int(os.getenv('OMPI_COMM_WORLD_LOCAL_RANK'))
local_size = int(os.getenv('OMPI_COMM_WORLD_LOCAL_SIZE'))
# Possibly running with Slurm
num_nodes = int(os.getenv('SLURM_JOB_NUM_NODES', '1'))
nodeid = int(os.getenv('SLURM_NODEID', '0'))
args.local_rank = local_rank
args.rank = nodeid * local_size + local_rank
args.world_size = num_nodes * local_size
args.model_parallel_size = min(args.model_parallel_size, args.world_size)
if args.rank == 0:
print('using world size: {} and model-parallel size: {} '.format(
args.world_size, args.model_parallel_size))
args.dynamic_loss_scale = False
if args.loss_scale is None:
args.dynamic_loss_scale = True
if args.rank == 0:
print(' > using dynamic loss scaling')
# The args fp32_* or fp16_* meant to be active when the
# args fp16 is set. So the default behaviour should all
# be false.
if not args.fp16:
args.fp32_embedding = False
args.fp32_tokentypes = False
args.fp32_layernorm = False
if hasattr(args, 'deepspeed'
) and args.deepspeed and args.deepspeed_config is not None:
with open(args.deepspeed_config, encoding='utf-8') as file:
deepspeed_config = json.load(file)
if 'train_micro_batch_size_per_gpu' in deepspeed_config:
args.batch_size = deepspeed_config[
'train_micro_batch_size_per_gpu']
if 'gradient_accumulation_steps' in deepspeed_config:
args.gradient_accumulation_steps = deepspeed_config[
'gradient_accumulation_steps']
else:
args.gradient_accumulation_steps = 1
if 'optimizer' in deepspeed_config:
optimizer_params_config = deepspeed_config['optimizer'].get(
'params', {})
args.lr = optimizer_params_config.get('lr', args.lr)
args.weight_decay = optimizer_params_config.get(
'weight_decay', args.weight_decay)
return args
def mpi_define_env(args):
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
world_size = comm.Get_size()
master_addr = None
if rank == 0:
master_addr = get_hostname()
master_addr = comm.bcast(master_addr, root=0)
# Determine local rank by assuming hostnames are unique
proc_name = MPI.Get_processor_name()
all_procs = comm.allgather(proc_name)
local_rank = sum([i == proc_name for i in all_procs[:rank]])
os.environ['RANK'] = str(rank)
os.environ['WORLD_SIZE'] = str(world_size)
args.local_rank = local_rank
args.world_size = world_size
args.rank = rank
os.environ['MASTER_ADDR'] = master_addr
os.environ[
'MASTER_PORT'] = '29500' # TORCH_DISTRIBUTED_DEFAULT_PORT = 29500
print(
'Discovered MPI settings of world_rank={}, local_rank={}, world_size={}, master_addr={}, master_port={}'
.format(os.environ['RANK'], args.local_rank, os.environ['WORLD_SIZE'],
os.environ['MASTER_ADDR'], os.environ['MASTER_PORT']))
|
00377ecaf73aa02e217607b4b219b55c50c6562e
|
d6f6d1d1aac892f7555c8ae436430f8da415b455
|
/chempy/properties/tests/test_water_permittivity_bradley_pitzer_1979.py
|
2dd2e87ea116f006bd6f6889a31cf3f09676d768
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
bjodah/chempy
|
9884e1475cb62ec9c5ae3ecc0833efe7e36dbbab
|
1ef1bf6751884b57dc13dc420e1f5c634e954375
|
refs/heads/master
| 2023-08-10T07:47:32.227130
| 2023-07-16T09:59:09
| 2023-07-16T10:13:17
| 36,242,853
| 481
| 92
|
BSD-2-Clause
| 2023-07-16T09:57:03
| 2015-05-25T16:49:18
|
Python
|
UTF-8
|
Python
| false
| false
| 946
|
py
|
test_water_permittivity_bradley_pitzer_1979.py
|
import warnings
from chempy.units import allclose
from ..water_permittivity_bradley_pitzer_1979 import water_permittivity
from chempy.util.testing import requires
from chempy.units import linspace, units_library, default_units as u
def test_water_permittivity():
warnings.filterwarnings("error")
abs(water_permittivity(273.15 + 0) - 80) < 1.0
abs(water_permittivity(273.15 + 20) - 80.1) < 0.2
abs(water_permittivity(273.15 + 100) - 55.3) < 0.5
try:
water_permittivity(1)
except UserWarning:
pass # good: warning raised
else:
raise
warnings.resetwarnings()
@requires(units_library)
def test_water_permittivity__units():
assert allclose(
water_permittivity(298.15 * u.K, 1 * u.bar, units=u), 78.38436874203077
)
assert allclose(
water_permittivity(linspace(297.5, 298.65) * u.K, 1 * u.bar, units=u),
78,
rtol=1e-2,
atol=1e-2,
)
|
b499273b366149f19f161a7b9d6ab113f2b0d70f
|
93713f46f16f1e29b725f263da164fed24ebf8a8
|
/Library/lib/python3.7/site-packages/astropy-4.0-py3.7-macosx-10.9-x86_64.egg/astropy/io/misc/asdf/tags/time/timedelta.py
|
530faf046aa31e8f773debf67396bcf04236a562
|
[
"BSD-3-Clause"
] |
permissive
|
holzschu/Carnets
|
b83d15136d25db640cea023abb5c280b26a9620e
|
1ad7ec05fb1e3676ac879585296c513c3ee50ef9
|
refs/heads/master
| 2023-02-20T12:05:14.980685
| 2023-02-13T15:59:23
| 2023-02-13T15:59:23
| 167,671,526
| 541
| 36
|
BSD-3-Clause
| 2022-11-29T03:08:22
| 2019-01-26T09:26:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,147
|
py
|
timedelta.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import functools
from asdf.yamlutil import custom_tree_to_tagged_tree
import numpy as np
from astropy.time import TimeDelta
from ...types import AstropyType
__all__ = ['TimeDeltaType']
allclose_jd = functools.partial(np.allclose, rtol=2. ** -52, atol=0)
allclose_jd2 = functools.partial(np.allclose, rtol=2. ** -52,
atol=2. ** -52) # 20 ps atol
allclose_sec = functools.partial(np.allclose, rtol=2. ** -52,
atol=2. ** -52 * 24 * 3600) # 20 ps atol
class TimeDeltaType(AstropyType):
name = 'time/timedelta'
types = [TimeDelta]
version = '1.0.0'
@classmethod
def to_tree(cls, obj, ctx):
return custom_tree_to_tagged_tree(obj.info._represent_as_dict(), ctx)
@classmethod
def from_tree(cls, node, ctx):
return TimeDelta.info._construct_from_dict(node)
@classmethod
def assert_equal(cls, old, new):
assert allclose_jd(old.jd, new.jd)
assert allclose_jd2(old.jd2, new.jd2)
assert allclose_sec(old.sec, new.sec)
|
19afef6d37f64e7703d5a5ea87773b993cbf721e
|
dc387b1d0c247aca4d6227a2dcc34c30cd2558fd
|
/translation/gnmt/tensorflow/nmt/nmt.py
|
ce9e683e145e3e92c10d3b5f5a5065ec4930785a
|
[
"Apache-2.0"
] |
permissive
|
mlcommons/inference
|
6f0c725fb3a8d65c349bbd60be23e1db7f8dea74
|
c540fcc99eeacfb5c51de8daa0f8cca339f50799
|
refs/heads/master
| 2023-08-29T11:09:18.621119
| 2023-08-25T16:22:26
| 2023-08-25T16:22:26
| 148,566,613
| 575
| 253
|
Apache-2.0
| 2023-09-11T11:26:45
| 2018-09-13T01:53:57
|
Python
|
UTF-8
|
Python
| false
| false
| 29,871
|
py
|
nmt.py
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow NMT model implementation."""
from __future__ import print_function
import argparse
import os
import random
import sys
# import matplotlib.image as mpimg
import numpy as np
import tensorflow as tf
from . import inference
from . import train
from .utils import evaluation_utils
from .utils import misc_utils as utils
from .utils import vocab_utils
utils.check_tensorflow_version()
FLAGS = None
INFERENCE_KEYS = ["src_max_len_infer", "tgt_max_len_infer", "subword_option",
"infer_batch_size", "beam_width",
"length_penalty_weight", "sampling_temperature",
"num_translations_per_input", "infer_mode"]
def add_arguments(parser):
"""Build ArgumentParser."""
parser.register("type", "bool", lambda v: v.lower() == "true")
# network
parser.add_argument("--num_units", type=int, default=32, help="Network size.")
parser.add_argument("--num_layers", type=int, default=2,
help="Network depth.")
parser.add_argument("--num_encoder_layers", type=int, default=None,
help="Encoder depth, equal to num_layers if None.")
parser.add_argument("--num_decoder_layers", type=int, default=None,
help="Decoder depth, equal to num_layers if None.")
parser.add_argument("--encoder_type", type=str, default="uni", help="""\
uni | bi | gnmt.
For bi, we build num_encoder_layers/2 bi-directional layers.
For gnmt, we build 1 bi-directional layer, and (num_encoder_layers - 1)
uni-directional layers.\
""")
parser.add_argument("--residual", type="bool", nargs="?", const=True,
default=False,
help="Whether to add residual connections.")
parser.add_argument("--time_major", type="bool", nargs="?", const=True,
default=True,
help="Whether to use time-major mode for dynamic RNN.")
parser.add_argument("--num_embeddings_partitions", type=int, default=0,
help="Number of partitions for embedding vars.")
# attention mechanisms
parser.add_argument("--attention", type=str, default="", help="""\
luong | scaled_luong | bahdanau | normed_bahdanau or set to "" for no
attention\
""")
parser.add_argument(
"--attention_architecture",
type=str,
default="standard",
help="""\
standard | gnmt | gnmt_v2.
standard: use top layer to compute attention.
gnmt: GNMT style of computing attention, use previous bottom layer to
compute attention.
gnmt_v2: similar to gnmt, but use current bottom layer to compute
attention.\
""")
parser.add_argument(
"--output_attention", type="bool", nargs="?", const=True,
default=True,
help="""\
Only used in standard attention_architecture. Whether use attention as
the cell output at each timestep.
.\
""")
parser.add_argument(
"--pass_hidden_state", type="bool", nargs="?", const=True,
default=True,
help="""\
Whether to pass encoder's hidden state to decoder when using an attention
based model.\
""")
# optimizer
parser.add_argument("--optimizer", type=str, default="sgd", help="sgd | adam")
parser.add_argument("--learning_rate", type=float, default=1.0,
help="Learning rate. Adam: 0.001 | 0.0001")
parser.add_argument("--warmup_steps", type=int, default=0,
help="How many steps we inverse-decay learning.")
parser.add_argument("--warmup_scheme", type=str, default="t2t", help="""\
How to warmup learning rates. Options include:
t2t: Tensor2Tensor's way, start with lr 100 times smaller, then
exponentiate until the specified lr.\
""")
parser.add_argument(
"--decay_scheme", type=str, default="", help="""\
How we decay learning rate. Options include:
luong234: after 2/3 num train steps, we start halving the learning rate
for 4 times before finishing.
luong5: after 1/2 num train steps, we start halving the learning rate
for 5 times before finishing.\
luong10: after 1/2 num train steps, we start halving the learning rate
for 10 times before finishing.\
""")
parser.add_argument(
"--num_train_steps", type=int, default=12000, help="Num steps to train.")
parser.add_argument("--colocate_gradients_with_ops", type="bool", nargs="?",
const=True,
default=True,
help=("Whether try colocating gradients with "
"corresponding op"))
# initializer
parser.add_argument("--init_op", type=str, default="uniform",
help="uniform | glorot_normal | glorot_uniform")
parser.add_argument("--init_weight", type=float, default=0.1,
help=("for uniform init_op, initialize weights "
"between [-this, this]."))
# data
parser.add_argument("--src", type=str, default=None,
help="Source suffix, e.g., en.")
parser.add_argument("--tgt", type=str, default=None,
help="Target suffix, e.g., de.")
parser.add_argument("--train_prefix", type=str, default=None,
help="Train prefix, expect files with src/tgt suffixes.")
parser.add_argument("--dev_prefix", type=str, default=None,
help="Dev prefix, expect files with src/tgt suffixes.")
parser.add_argument("--test_prefix", type=str, default=None,
help="Test prefix, expect files with src/tgt suffixes.")
parser.add_argument("--out_dir", type=str, default=None,
help="Store log/model files.")
# Vocab
parser.add_argument("--vocab_prefix", type=str, default=None, help="""\
Vocab prefix, expect files with src/tgt suffixes.\
""")
parser.add_argument("--embed_prefix", type=str, default=None, help="""\
Pretrained embedding prefix, expect files with src/tgt suffixes.
The embedding files should be Glove formated txt files.\
""")
parser.add_argument("--sos", type=str, default="<s>",
help="Start-of-sentence symbol.")
parser.add_argument("--eos", type=str, default="</s>",
help="End-of-sentence symbol.")
parser.add_argument("--share_vocab", type="bool", nargs="?", const=True,
default=False,
help="""\
Whether to use the source vocab and embeddings for both source and
target.\
""")
parser.add_argument("--check_special_token", type="bool", default=True,
help="""\
Whether check special sos, eos, unk tokens exist in the
vocab files.\
""")
# Sequence lengths
parser.add_argument("--src_max_len", type=int, default=50,
help="Max length of src sequences during training.")
parser.add_argument("--tgt_max_len", type=int, default=50,
help="Max length of tgt sequences during training.")
parser.add_argument("--src_max_len_infer", type=int, default=None,
help="Max length of src sequences during inference.")
parser.add_argument("--tgt_max_len_infer", type=int, default=None,
help="""\
Max length of tgt sequences during inference. Also use to restrict the
maximum decoding length.\
""")
# Default settings works well (rarely need to change)
parser.add_argument("--unit_type", type=str, default="lstm",
help="lstm | gru | layer_norm_lstm | nas")
parser.add_argument("--forget_bias", type=float, default=1.0,
help="Forget bias for BasicLSTMCell.")
parser.add_argument("--dropout", type=float, default=0.2,
help="Dropout rate (not keep_prob)")
parser.add_argument("--max_gradient_norm", type=float, default=5.0,
help="Clip gradients to this norm.")
parser.add_argument("--batch_size", type=int, default=128, help="Batch size.")
parser.add_argument("--steps_per_stats", type=int, default=100,
help=("How many training steps to do per stats logging."
"Save checkpoint every 10x steps_per_stats"))
parser.add_argument("--max_train", type=int, default=0,
help="Limit on the size of training data (0: no limit).")
parser.add_argument("--num_buckets", type=int, default=5,
help="Put data into similar-length buckets.")
parser.add_argument("--num_sampled_softmax", type=int, default=0,
help=("Use sampled_softmax_loss if > 0."
"Otherwise, use full softmax loss."))
# SPM
parser.add_argument("--subword_option", type=str, default="",
choices=["", "bpe", "spm"],
help="""\
Set to bpe or spm to activate subword desegmentation.\
""")
# Experimental encoding feature.
parser.add_argument("--use_char_encode", type="bool", default=False,
help="""\
Whether to split each word or bpe into character, and then
generate the word-level representation from the character
reprentation.
""")
# Misc
parser.add_argument("--num_gpus", type=int, default=1,
help="Number of gpus in each worker.")
parser.add_argument("--log_device_placement", type="bool", nargs="?",
const=True, default=False, help="Debug GPU allocation.")
parser.add_argument("--metrics", type=str, default="bleu",
help=("Comma-separated list of evaluations "
"metrics (bleu,rouge,accuracy)"))
parser.add_argument("--steps_per_external_eval", type=int, default=None,
help="""\
How many training steps to do per external evaluation. Automatically set
based on data if None.\
""")
parser.add_argument("--scope", type=str, default=None,
help="scope to put variables under")
parser.add_argument("--hparams_path", type=str, default=None,
help=("Path to standard hparams json file that overrides"
"hparams values from FLAGS."))
parser.add_argument("--random_seed", type=int, default=None,
help="Random seed (>0, set a specific seed).")
parser.add_argument("--override_loaded_hparams", type="bool", nargs="?",
const=True, default=False,
help="Override loaded hparams with values specified")
parser.add_argument("--num_keep_ckpts", type=int, default=5,
help="Max number of checkpoints to keep.")
parser.add_argument("--avg_ckpts", type="bool", nargs="?",
const=True, default=False, help=("""\
Average the last N checkpoints for external evaluation.
N can be controlled by setting --num_keep_ckpts.\
"""))
parser.add_argument("--language_model", type="bool", nargs="?",
const=True, default=False,
help="True to train a language model, ignoring encoder")
# Inference
parser.add_argument("--ckpt", type=str, default="",
help="Checkpoint file to load a model for inference.")
parser.add_argument("--inference_input_file", type=str, default=None,
help="Set to the text to decode.")
parser.add_argument("--inference_list", type=str, default=None,
help=("A comma-separated list of sentence indices "
"(0-based) to decode."))
parser.add_argument("--infer_batch_size", type=int, default=None,
help="Batch size for inference mode.")
parser.add_argument("--inference_output_file", type=str, default=None,
help="Output file to store decoding results.")
parser.add_argument("--inference_ref_file", type=str, default=None,
help=("""\
Reference file to compute evaluation scores (if provided).\
"""))
# Advanced inference arguments
parser.add_argument("--infer_mode", type=str, default="greedy",
choices=["greedy", "sample", "beam_search"],
help="Which type of decoder to use during inference.")
parser.add_argument("--beam_width", type=int, default=0,
help=("""\
beam width when using beam search decoder. If 0 (default), use standard
decoder with greedy helper.\
"""))
parser.add_argument("--length_penalty_weight", type=float, default=0.0,
help="Length penalty for beam search.")
parser.add_argument("--sampling_temperature", type=float,
default=0.0,
help=("""\
Softmax sampling temperature for inference decoding, 0.0 means greedy
decoding. This option is ignored when using beam search.\
"""))
parser.add_argument("--num_translations_per_input", type=int, default=1,
help=("""\
Number of translations generated for each sentence. This is only used for
inference.\
"""))
# Job info
parser.add_argument("--jobid", type=int, default=0,
help="Task id of the worker.")
parser.add_argument("--num_workers", type=int, default=1,
help="Number of workers (inference only).")
parser.add_argument("--num_inter_threads", type=int, default=0,
help="number of inter_op_parallelism_threads")
parser.add_argument("--num_intra_threads", type=int, default=0,
help="number of intra_op_parallelism_threads")
parser.add_argument("--iterations", type=int, default=1,
help="number of iterations")
parser.add_argument("--workloadName", type=str, default="",
help="name of workload")
parser.add_argument("--run", type=str, default='performance',
help="Determine criteria run for infernece")
def create_hparams(flags):
"""Create training hparams."""
return tf.contrib.training.HParams(
# Data
src=flags.src,
tgt=flags.tgt,
train_prefix=flags.train_prefix,
dev_prefix=flags.dev_prefix,
test_prefix=flags.test_prefix,
vocab_prefix=flags.vocab_prefix,
embed_prefix=flags.embed_prefix,
out_dir=flags.out_dir,
# Networks
num_units=flags.num_units,
num_encoder_layers=(flags.num_encoder_layers or flags.num_layers),
num_decoder_layers=(flags.num_decoder_layers or flags.num_layers),
dropout=flags.dropout,
unit_type=flags.unit_type,
encoder_type=flags.encoder_type,
residual=flags.residual,
time_major=flags.time_major,
num_embeddings_partitions=flags.num_embeddings_partitions,
# Attention mechanisms
attention=flags.attention,
attention_architecture=flags.attention_architecture,
output_attention=flags.output_attention,
pass_hidden_state=flags.pass_hidden_state,
# Train
optimizer=flags.optimizer,
num_train_steps=flags.num_train_steps,
batch_size=flags.batch_size,
init_op=flags.init_op,
init_weight=flags.init_weight,
max_gradient_norm=flags.max_gradient_norm,
learning_rate=flags.learning_rate,
warmup_steps=flags.warmup_steps,
warmup_scheme=flags.warmup_scheme,
decay_scheme=flags.decay_scheme,
colocate_gradients_with_ops=flags.colocate_gradients_with_ops,
num_sampled_softmax=flags.num_sampled_softmax,
# Data constraints
num_buckets=flags.num_buckets,
max_train=flags.max_train,
src_max_len=flags.src_max_len,
tgt_max_len=flags.tgt_max_len,
# Inference
src_max_len_infer=flags.src_max_len_infer,
tgt_max_len_infer=flags.tgt_max_len_infer,
infer_batch_size=flags.infer_batch_size,
# Advanced inference arguments
infer_mode=flags.infer_mode,
beam_width=flags.beam_width,
length_penalty_weight=flags.length_penalty_weight,
sampling_temperature=flags.sampling_temperature,
num_translations_per_input=flags.num_translations_per_input,
# Vocab
sos=flags.sos if flags.sos else vocab_utils.SOS,
eos=flags.eos if flags.eos else vocab_utils.EOS,
subword_option=flags.subword_option,
check_special_token=flags.check_special_token,
use_char_encode=flags.use_char_encode,
# Misc
forget_bias=flags.forget_bias,
num_gpus=flags.num_gpus,
epoch_step=0, # record where we were within an epoch.
steps_per_stats=flags.steps_per_stats,
steps_per_external_eval=flags.steps_per_external_eval,
share_vocab=flags.share_vocab,
metrics=flags.metrics.split(","),
log_device_placement=flags.log_device_placement,
random_seed=flags.random_seed,
override_loaded_hparams=flags.override_loaded_hparams,
num_keep_ckpts=flags.num_keep_ckpts,
avg_ckpts=flags.avg_ckpts,
language_model=flags.language_model,
num_intra_threads=flags.num_intra_threads,
num_inter_threads=flags.num_inter_threads,
iterations=flags.iterations,
run = flags.run,
)
def _add_argument(hparams, key, value, update=True):
"""Add an argument to hparams; if exists, change the value if update==True."""
if hasattr(hparams, key):
if update:
setattr(hparams, key, value)
else:
hparams.add_hparam(key, value)
def extend_hparams(hparams):
"""Add new arguments to hparams."""
# Sanity checks
if hparams.encoder_type == "bi" and hparams.num_encoder_layers % 2 != 0:
raise ValueError("For bi, num_encoder_layers %d should be even" %
hparams.num_encoder_layers)
if (hparams.attention_architecture in ["gnmt"] and
hparams.num_encoder_layers < 2):
raise ValueError("For gnmt attention architecture, "
"num_encoder_layers %d should be >= 2" %
hparams.num_encoder_layers)
if hparams.subword_option and hparams.subword_option not in ["spm", "bpe"]:
raise ValueError("subword option must be either spm, or bpe")
if hparams.infer_mode == "beam_search" and hparams.beam_width <= 0:
raise ValueError("beam_width must greater than 0 when using beam_search"
"decoder.")
if hparams.infer_mode == "sample" and hparams.sampling_temperature <= 0.0:
raise ValueError("sampling_temperature must greater than 0.0 when using"
"sample decoder.")
# Different number of encoder / decoder layers
assert hparams.num_encoder_layers and hparams.num_decoder_layers
if hparams.num_encoder_layers != hparams.num_decoder_layers:
hparams.pass_hidden_state = False
utils.print_out("Num encoder layer %d is different from num decoder layer"
" %d, so set pass_hidden_state to False" % (
hparams.num_encoder_layers,
hparams.num_decoder_layers))
# Set residual layers
num_encoder_residual_layers = 0
num_decoder_residual_layers = 0
if hparams.residual:
if hparams.num_encoder_layers > 1:
num_encoder_residual_layers = hparams.num_encoder_layers - 1
if hparams.num_decoder_layers > 1:
num_decoder_residual_layers = hparams.num_decoder_layers - 1
if hparams.encoder_type == "gnmt":
# The first unidirectional layer (after the bi-directional layer) in
# the GNMT encoder can't have residual connection due to the input is
# the concatenation of fw_cell and bw_cell's outputs.
num_encoder_residual_layers = hparams.num_encoder_layers - 2
# Compatible for GNMT models
if hparams.num_encoder_layers == hparams.num_decoder_layers:
num_decoder_residual_layers = num_encoder_residual_layers
_add_argument(hparams, "num_encoder_residual_layers",
num_encoder_residual_layers)
_add_argument(hparams, "num_decoder_residual_layers",
num_decoder_residual_layers)
# Language modeling
if getattr(hparams, "language_model", None):
hparams.attention = "normed_bahdanau"
hparams.attention_architecture = "gnmt_v2"
hparams.pass_hidden_state = False
hparams.share_vocab = True
hparams.src = hparams.tgt
utils.print_out("For language modeling, we turn off attention and "
"pass_hidden_state; turn on share_vocab; set src to tgt.")
## Vocab
# Get vocab file names first
if hparams.vocab_prefix:
src_vocab_file = hparams.vocab_prefix + "." + hparams.src
tgt_vocab_file = hparams.vocab_prefix + "." + hparams.tgt
else:
raise ValueError("hparams.vocab_prefix must be provided.")
# Source vocab
check_special_token = getattr(hparams, "check_special_token", True)
src_vocab_size, src_vocab_file = vocab_utils.check_vocab(
src_vocab_file,
hparams.out_dir,
check_special_token=check_special_token,
sos=hparams.sos,
eos=hparams.eos,
unk=vocab_utils.UNK)
# Target vocab
if hparams.share_vocab:
utils.print_out(" using source vocab for target")
tgt_vocab_file = src_vocab_file
tgt_vocab_size = src_vocab_size
else:
tgt_vocab_size, tgt_vocab_file = vocab_utils.check_vocab(
tgt_vocab_file,
hparams.out_dir,
check_special_token=check_special_token,
sos=hparams.sos,
eos=hparams.eos,
unk=vocab_utils.UNK)
_add_argument(hparams, "src_vocab_size", src_vocab_size)
_add_argument(hparams, "tgt_vocab_size", tgt_vocab_size)
_add_argument(hparams, "src_vocab_file", src_vocab_file)
_add_argument(hparams, "tgt_vocab_file", tgt_vocab_file)
# Num embedding partitions
num_embeddings_partitions = getattr(hparams, "num_embeddings_partitions", 0)
_add_argument(hparams, "num_enc_emb_partitions", num_embeddings_partitions)
_add_argument(hparams, "num_dec_emb_partitions", num_embeddings_partitions)
# Pretrained Embeddings
_add_argument(hparams, "src_embed_file", "")
_add_argument(hparams, "tgt_embed_file", "")
if getattr(hparams, "embed_prefix", None):
src_embed_file = hparams.embed_prefix + "." + hparams.src
tgt_embed_file = hparams.embed_prefix + "." + hparams.tgt
if tf.gfile.Exists(src_embed_file):
utils.print_out(" src_embed_file %s exist" % src_embed_file)
hparams.src_embed_file = src_embed_file
utils.print_out(
"For pretrained embeddings, set num_enc_emb_partitions to 1")
hparams.num_enc_emb_partitions = 1
else:
utils.print_out(" src_embed_file %s doesn't exist" % src_embed_file)
if tf.gfile.Exists(tgt_embed_file):
utils.print_out(" tgt_embed_file %s exist" % tgt_embed_file)
hparams.tgt_embed_file = tgt_embed_file
utils.print_out(
"For pretrained embeddings, set num_dec_emb_partitions to 1")
hparams.num_dec_emb_partitions = 1
else:
utils.print_out(" tgt_embed_file %s doesn't exist" % tgt_embed_file)
# Evaluation
for metric in hparams.metrics:
best_metric_dir = os.path.join(hparams.out_dir, "best_" + metric)
tf.gfile.MakeDirs(best_metric_dir)
_add_argument(hparams, "best_" + metric, 0, update=False)
_add_argument(hparams, "best_" + metric + "_dir", best_metric_dir)
if getattr(hparams, "avg_ckpts", None):
best_metric_dir = os.path.join(hparams.out_dir, "avg_best_" + metric)
tf.gfile.MakeDirs(best_metric_dir)
_add_argument(hparams, "avg_best_" + metric, 0, update=False)
_add_argument(hparams, "avg_best_" + metric + "_dir", best_metric_dir)
return hparams
def ensure_compatible_hparams(hparams, default_hparams, hparams_path=""):
"""Make sure the loaded hparams is compatible with new changes."""
default_hparams = utils.maybe_parse_standard_hparams(
default_hparams, hparams_path)
# Set num encoder/decoder layers (for old checkpoints)
if hasattr(hparams, "num_layers"):
if not hasattr(hparams, "num_encoder_layers"):
hparams.add_hparam("num_encoder_layers", hparams.num_layers)
if not hasattr(hparams, "num_decoder_layers"):
hparams.add_hparam("num_decoder_layers", hparams.num_layers)
# For compatible reason, if there are new fields in default_hparams,
# we add them to the current hparams
default_config = default_hparams.values()
config = hparams.values()
for key in default_config:
if key not in config:
hparams.add_hparam(key, default_config[key])
# Update all hparams' keys if override_loaded_hparams=True
if getattr(default_hparams, "override_loaded_hparams", None):
overwritten_keys = default_config.keys()
else:
# For inference
overwritten_keys = INFERENCE_KEYS
for key in overwritten_keys:
if getattr(hparams, key) != default_config[key]:
utils.print_out("# Updating hparams.%s: %s -> %s" %
(key, str(getattr(hparams, key)),
str(default_config[key])))
setattr(hparams, key, default_config[key])
return hparams
def create_or_load_hparams(
out_dir, default_hparams, hparams_path, save_hparams=True):
"""Create hparams or load hparams from out_dir."""
hparams = utils.load_hparams(out_dir)
if not hparams:
hparams = default_hparams
hparams = utils.maybe_parse_standard_hparams(
hparams, hparams_path)
else:
hparams = ensure_compatible_hparams(hparams, default_hparams, hparams_path)
hparams = extend_hparams(hparams)
# Save HParams
if save_hparams:
utils.save_hparams(out_dir, hparams)
for metric in hparams.metrics:
utils.save_hparams(getattr(hparams, "best_" + metric + "_dir"), hparams)
# Print HParams
utils.print_hparams(hparams)
return hparams
def run_main(flags, default_hparams, train_fn, inference_fn, target_session=""):
"""Run main."""
# Job
jobid = flags.jobid
num_workers = flags.num_workers
utils.print_out("# Job id %d" % jobid)
# Random
random_seed = flags.random_seed
if random_seed is not None and random_seed > 0:
utils.print_out("# Set random seed to %d" % random_seed)
random.seed(random_seed + jobid)
np.random.seed(random_seed + jobid)
# Model output directory
out_dir = flags.out_dir
if out_dir and not tf.gfile.Exists(out_dir):
utils.print_out("# Creating output directory %s ..." % out_dir)
tf.gfile.MakeDirs(out_dir)
# Load hparams.
loaded_hparams = False
if flags.ckpt: # Try to load hparams from the same directory as ckpt
ckpt_dir = os.path.dirname(flags.ckpt)
ckpt_hparams_file = os.path.join(ckpt_dir, "hparams")
if tf.gfile.Exists(ckpt_hparams_file) or flags.hparams_path:
hparams = create_or_load_hparams(
ckpt_dir, default_hparams, flags.hparams_path,
save_hparams=False)
loaded_hparams = True
if not loaded_hparams: # Try to load from out_dir
assert out_dir
hparams = create_or_load_hparams(
out_dir, default_hparams, flags.hparams_path,
save_hparams=(jobid == 0))
# GPU device
config_proto = utils.get_config_proto(
allow_soft_placement=True,
num_intra_threads=hparams.num_intra_threads,
num_inter_threads=hparams.num_inter_threads)
utils.print_out(
"# Devices visible to TensorFlow: %s"
% repr(tf.Session(config=config_proto).list_devices()))
## Train / Decode
if flags.inference_input_file:
# Inference output directory
trans_file = flags.inference_output_file
assert trans_file
trans_dir = os.path.dirname(trans_file)
if not tf.gfile.Exists(trans_dir): tf.gfile.MakeDirs(trans_dir)
# Inference indices
hparams.inference_indices = None
if flags.inference_list:
(hparams.inference_indices) = (
[int(token) for token in flags.inference_list.split(",")])
# Inference
ckpt = flags.ckpt
if not ckpt:
ckpt = tf.train.latest_checkpoint(out_dir)
inference_fn(flags.run,flags.iterations,ckpt, flags.inference_input_file,
trans_file, hparams, num_workers, jobid)
# Evaluation
if flags.run == 'accuracy':
ref_file = flags.inference_ref_file
if ref_file and tf.gfile.Exists(trans_file):
for metric in hparams.metrics:
score = evaluation_utils.evaluate(
ref_file,
trans_file,
metric,
hparams.subword_option)
utils.print_out(" %s: %.1f" % (metric, score))
else:
# Train
train_fn(hparams, target_session=target_session)
def main(unused_argv):
default_hparams = create_hparams(FLAGS)
train_fn = train.train
inference_fn = inference.inference
run_main(FLAGS, default_hparams, train_fn, inference_fn)
if __name__ == "__main__":
nmt_parser = argparse.ArgumentParser()
add_arguments(nmt_parser)
FLAGS, unparsed = nmt_parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
cd5e0182a7151757bed1182a4e470e61ea9f67bc
|
48cd6a93fe538693fec65aaa81306e6b69b642ad
|
/dask/widgets/tests/test_widgets.py
|
3eac0eed5648272bd499f1103a793deae152c7f1
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
dask/dask
|
0138cc2fb9aad27287643fe5ee240b8b09f2300d
|
18098d35298bad21c878c339d73de784612566c7
|
refs/heads/main
| 2023-09-04T02:39:37.886054
| 2023-09-01T19:02:00
| 2023-09-01T19:02:00
| 28,782,747
| 11,423
| 2,116
|
BSD-3-Clause
| 2023-09-14T17:36:04
| 2015-01-04T18:50:00
|
Python
|
UTF-8
|
Python
| false
| false
| 1,401
|
py
|
test_widgets.py
|
from __future__ import annotations
import os.path
import pytest
jinja2 = pytest.importorskip("jinja2")
from dask.utils import format_bytes
from dask.widgets import FILTERS, TEMPLATE_PATHS, get_environment, get_template
@pytest.fixture(autouse=True)
def setup_testing():
TEMPLATE_PATHS.append(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "templates")
)
FILTERS["custom_filter"] = lambda x: "baz"
def test_widgets():
template = get_template("example.html.j2")
assert isinstance(template, jinja2.Template)
rendered = template.render(foo="bar")
assert "Hello bar" in rendered
def test_environment():
environment = get_environment()
assert isinstance(environment, jinja2.Environment)
def test_unknown_template():
with pytest.raises(jinja2.TemplateNotFound) as e:
get_template("does_not_exist.html.j2")
# The error should contain all the registered template directories to help the user
# understand where jinja2 is looking. Including the one we registered in the fixture.
assert os.path.dirname(os.path.abspath(__file__)) in str(e)
def test_filters():
template = get_template("bytes.html.j2")
assert format_bytes in FILTERS.values()
assert format_bytes(2e9) in template.render(foo=2e9)
template = get_template("custom_filter.html.j2")
assert "baz" in template.render(foo=None)
|
f802611d8e4aa4a5ae7910bac29a4d116b3aa43c
|
b26c41926fa3a7c2c061132d80e91a2750f2f468
|
/tensorflow_probability/python/experimental/auto_batching/test_programs.py
|
8557603cf3da65a018847a39dd9c878ee16d3a37
|
[
"Apache-2.0"
] |
permissive
|
tensorflow/probability
|
22e679a4a883e408f8ef237cda56e3e3dfa42b17
|
42a64ba0d9e0973b1707fcd9b8bd8d14b2d4e3e5
|
refs/heads/main
| 2023-09-04T02:06:08.174935
| 2023-08-31T20:30:00
| 2023-08-31T20:31:33
| 108,053,674
| 4,055
| 1,269
|
Apache-2.0
| 2023-09-13T21:49:49
| 2017-10-23T23:50:54
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 22,302
|
py
|
test_programs.py
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests of the instruction language (and definitional interpreter)."""
# Dependency imports
import numpy as np
from tensorflow_probability.python.experimental.auto_batching import instructions
def constant_program():
"""Constant program: 'ans=1; ans=2; return ans;'.
Returns:
program: `instructions.Program` which returns a constant value.
"""
constant_block = instructions.Block(
[
instructions.prim_op([], "answer", lambda: 1),
instructions.prim_op([], "answer", lambda: 2),
],
instructions.halt_op())
constant_vars = {
"answer": instructions.single_type(np.int64, ()),
}
return instructions.Program(
instructions.ControlFlowGraph([constant_block]), [],
constant_vars, ["answer"], "answer")
def _strip_types(the_vars):
for k in the_vars:
if k != instructions.pc_var:
the_vars[k] = instructions.Type(None)
def single_if_program():
"""Single if program: 'if (input > 1) ans = 2; else ans = 0; return ans;'.
Returns:
program: `instructions.Program` with a simple conditional.
"""
entry = instructions.Block()
then_ = instructions.Block()
else_ = instructions.Block()
entry.assign_instructions([
instructions.prim_op(["input"], "cond", lambda n: n > 1),
instructions.BranchOp("cond", then_, else_),
])
then_.assign_instructions([
instructions.prim_op([], "answer", lambda: 2),
instructions.halt_op(),
])
else_.assign_instructions([
instructions.prim_op([], "answer", lambda: 0),
instructions.halt_op(),
])
single_if_blocks = [entry, then_, else_]
# pylint: disable=bad-whitespace
single_if_vars = {
"input" : instructions.single_type(np.int64, ()),
"cond" : instructions.single_type(np.bool_, ()),
"answer" : instructions.single_type(np.int64, ()),
}
return instructions.Program(
instructions.ControlFlowGraph(single_if_blocks), [],
single_if_vars, ["input"], "answer")
def synthetic_pattern_program():
"""A program that tests pattern matching of `PrimOp` outputs.
Returns:
program: `instructions.Program`.
"""
block = instructions.Block(
[
instructions.prim_op(
[], ("one", ("five", "three")), lambda: (1, (2, 3))),
instructions.prim_op(
[], (("four", "five"), "six"), lambda: ((4, 5), 6)),
],
instructions.halt_op())
the_vars = {
"one": instructions.single_type(np.int64, ()),
"three": instructions.single_type(np.int64, ()),
"four": instructions.single_type(np.int64, ()),
"five": instructions.single_type(np.int64, ()),
"six": instructions.single_type(np.int64, ()),
}
return instructions.Program(
instructions.ControlFlowGraph([block]), [],
the_vars, [], (("one", "three"), "four", ("five", "six")))
def synthetic_pattern_variable_program(include_types=True):
"""A program that tests product types.
Args:
include_types: If False, we omit types on the variables, requiring a type
inference pass.
Returns:
program: `instructions.Program`.
"""
block = instructions.Block(
[
instructions.prim_op(
["inp"], "many", lambda x: (x + 1, (x + 2, x + 3))),
instructions.prim_op(["many"], ["one", "two"], lambda x: x),
],
instructions.halt_op())
leaf = instructions.TensorType(np.int64, ())
the_vars = {
"inp": instructions.Type(leaf),
"many": instructions.Type((leaf, (leaf, leaf))),
"one": instructions.Type(leaf),
"two": instructions.Type((leaf, leaf)),
}
if not include_types:
_strip_types(the_vars)
return instructions.Program(
instructions.ControlFlowGraph([block]), [],
the_vars, ["inp"], "two")
def fibonacci_program():
"""More complicated, fibonacci program: computes fib(n): fib(0) = fib(1) = 1.
Returns:
program: Full-powered `instructions.Program` that computes fib(n).
"""
entry = instructions.Block(name="entry")
enter_fib = instructions.Block(name="enter_fib")
recur1 = instructions.Block(name="recur1")
recur2 = instructions.Block(name="recur2")
recur3 = instructions.Block(name="recur3")
finish = instructions.Block(name="finish")
# pylint: disable=bad-whitespace
entry.assign_instructions([
instructions.PushGotoOp(instructions.halt(), enter_fib),
])
# Definition of fibonacci function starts here
enter_fib.assign_instructions([
instructions.prim_op(
["n"], "cond",
lambda n: n > 1), # cond = n > 1
instructions.BranchOp(
"cond", recur1, finish), # if cond
])
recur1.assign_instructions([
instructions.PopOp(["cond"]), # done with cond now
instructions.prim_op(
["n"], "nm1",
lambda n: n - 1), # nm1 = n - 1
instructions.push_op(["nm1"], ["n"]), # fibm1 = fibonacci(nm1)
instructions.PopOp(["nm1"]), # done with nm1
instructions.PushGotoOp(recur2, enter_fib),
])
recur2.assign_instructions([
instructions.push_op(["ans"], ["fibm1"]), # ...
instructions.PopOp(["ans"]), # pop callee's "ans"
instructions.prim_op(
["n"], "nm2",
lambda n: n - 2), # nm2 = n - 2
instructions.PopOp(["n"]), # done with n
instructions.push_op(["nm2"], ["n"]), # fibm2 = fibonacci(nm2)
instructions.PopOp(["nm2"]), # done with nm2
instructions.PushGotoOp(recur3, enter_fib),
])
recur3.assign_instructions([
instructions.push_op(["ans"], ["fibm2"]), # ...
instructions.PopOp(["ans"]), # pop callee's "ans"
instructions.prim_op(
["fibm1", "fibm2"], "ans",
lambda x, y: x + y), # ans = fibm1 + fibm2
instructions.PopOp(["fibm1", "fibm2"]), # done with fibm1, fibm2
instructions.IndirectGotoOp(), # return ans
])
finish.assign_instructions([ # else:
instructions.PopOp(["n", "cond"]), # done with n, cond
instructions.prim_op(
[], "ans",
lambda : 1), # ans = 1
instructions.IndirectGotoOp(), # return ans
])
fibonacci_blocks = [
entry,
enter_fib,
recur1, recur2, recur3,
finish
]
# pylint: disable=bad-whitespace
fibonacci_vars = {
"n" : instructions.single_type(np.int64, ()),
"cond" : instructions.single_type(np.bool_, ()),
"nm1" : instructions.single_type(np.int64, ()),
"fibm1" : instructions.single_type(np.int64, ()),
"nm2" : instructions.single_type(np.int64, ()),
"fibm2" : instructions.single_type(np.int64, ()),
"ans" : instructions.single_type(np.int64, ()),
}
return instructions.Program(
instructions.ControlFlowGraph(fibonacci_blocks), [],
fibonacci_vars, ["n"], "ans")
def is_even_function_calls(include_types=True, dtype=np.int64):
"""The is-even program, via "even-odd" recursion.
Computes True if the input is even, False if the input is odd, by a pair of
mutually recursive functions is_even and is_odd, which return True and False
respectively for <1-valued inputs.
Tests out mutual recursion.
Args:
include_types: If False, we omit types on the variables, requiring a type
inference pass.
dtype: The dtype to use for `n`-like internal state variables.
Returns:
program: Full-powered `instructions.Program` that computes is_even(n).
"""
def pred_type(t):
return instructions.TensorType(np.bool_, t[0].shape)
# Forward declaration of is_odd.
is_odd_func = instructions.Function(None, ["n"], "ans", pred_type)
enter_is_even = instructions.Block()
finish_is_even = instructions.Block()
recur_is_even = instructions.Block()
is_even_func = instructions.Function(None, ["n"], "ans", pred_type)
# pylint: disable=bad-whitespace
# Definition of is_even function
enter_is_even.assign_instructions([
instructions.prim_op(
["n"], "cond", lambda n: n < 1), # cond = n < 1
instructions.BranchOp(
"cond", finish_is_even, recur_is_even), # if cond
])
finish_is_even.assign_instructions([
instructions.PopOp(["n", "cond"]), # done with n, cond
instructions.prim_op(
[], "ans", lambda : True), # ans = True
instructions.halt_op(), # return ans
])
recur_is_even.assign_instructions([ # else
instructions.PopOp(["cond"]), # done with cond now
instructions.prim_op(
["n"], "nm1", lambda n: n - 1), # nm1 = n - 1
instructions.PopOp(["n"]), # done with n
instructions.FunctionCallOp(
is_odd_func, ["nm1"], "ans"), # ans = is_odd(nm1)
instructions.PopOp(["nm1"]), # done with nm1
instructions.halt_op(), # return ans
])
is_even_blocks = [enter_is_even, finish_is_even, recur_is_even]
is_even_func.graph = instructions.ControlFlowGraph(is_even_blocks)
enter_is_odd = instructions.Block()
finish_is_odd = instructions.Block()
recur_is_odd = instructions.Block()
# pylint: disable=bad-whitespace
# Definition of is_odd function
enter_is_odd.assign_instructions([
instructions.prim_op(
["n"], "cond", lambda n: n < 1), # cond = n < 1
instructions.BranchOp(
"cond", finish_is_odd, recur_is_odd), # if cond
])
finish_is_odd.assign_instructions([
instructions.PopOp(["n", "cond"]), # done with n, cond
instructions.prim_op(
[], "ans", lambda : False), # ans = False
instructions.halt_op(), # return ans
])
recur_is_odd.assign_instructions([ # else
instructions.PopOp(["cond"]), # done with cond now
instructions.prim_op(
["n"], "nm1", lambda n: n - 1), # nm1 = n - 1
instructions.PopOp(["n"]), # done with n
instructions.FunctionCallOp(
is_even_func, ["nm1"], "ans"), # ans = is_even(nm1)
instructions.PopOp(["nm1"]), # done with nm1
instructions.halt_op(), # return ans
])
is_odd_blocks = [enter_is_odd, finish_is_odd, recur_is_odd]
is_odd_func.graph = instructions.ControlFlowGraph(is_odd_blocks)
is_even_main_blocks = [
instructions.Block(
[
instructions.FunctionCallOp(is_even_func, ["n1"], "ans"),
],
instructions.halt_op()),
]
# pylint: disable=bad-whitespace
is_even_vars = {
"n" : instructions.single_type(dtype, ()),
"n1" : instructions.single_type(dtype, ()),
"cond" : instructions.single_type(np.bool_, ()),
"nm1" : instructions.single_type(dtype, ()),
"ans" : instructions.single_type(np.bool_, ()),
}
if not include_types:
_strip_types(is_even_vars)
return instructions.Program(
instructions.ControlFlowGraph(is_even_main_blocks),
[is_even_func, is_odd_func],
is_even_vars, ["n1"], "ans")
def fibonacci_function_calls(include_types=True, dtype=np.int64):
"""The Fibonacci program again, but with `instructions.FunctionCallOp`.
Computes fib(n): fib(0) = fib(1) = 1.
Args:
include_types: If False, we omit types on the variables, requiring a type
inference pass.
dtype: The dtype to use for `n`-like internal state variables.
Returns:
program: Full-powered `instructions.Program` that computes fib(n).
"""
enter_fib = instructions.Block(name="enter_fib")
recur = instructions.Block(name="recur")
finish = instructions.Block(name="finish")
fibonacci_type = lambda types: types[0]
fibonacci_func = instructions.Function(
None, ["n"], "ans", fibonacci_type, name="fibonacci")
# pylint: disable=bad-whitespace
# Definition of fibonacci function
enter_fib.assign_instructions([
instructions.prim_op(
["n"], "cond",
lambda n: n > 1), # cond = n > 1
instructions.BranchOp(
"cond", recur, finish), # if cond
])
recur.assign_instructions([
instructions.prim_op(
["n"], "nm1",
lambda n: n - 1), # nm1 = n - 1
instructions.FunctionCallOp(
fibonacci_func, ["nm1"], "fibm1"), # fibm1 = fibonacci(nm1)
instructions.prim_op(
["n"], "nm2",
lambda n: n - 2), # nm2 = n - 2
instructions.FunctionCallOp(
fibonacci_func, ["nm2"], "fibm2"), # fibm2 = fibonacci(nm2)
instructions.prim_op(
["fibm1", "fibm2"], "ans",
lambda x, y: x + y), # ans = fibm1 + fibm2
instructions.halt_op(), # return ans
])
finish.assign_instructions([ # else:
instructions.prim_op(
[], "ans",
lambda : 1), # ans = 1
instructions.halt_op(), # return ans
])
fibonacci_blocks = [enter_fib, recur, finish]
fibonacci_func.graph = instructions.ControlFlowGraph(fibonacci_blocks)
fibonacci_main_blocks = [
instructions.Block(
[
instructions.FunctionCallOp(fibonacci_func, ["n1"], "ans"),
],
instructions.halt_op(),
name="main_entry"),
]
# pylint: disable=bad-whitespace
fibonacci_vars = {
"n" : instructions.single_type(dtype, ()),
"n1" : instructions.single_type(dtype, ()),
"cond" : instructions.single_type(np.bool_, ()),
"nm1" : instructions.single_type(dtype, ()),
"fibm1" : instructions.single_type(dtype, ()),
"nm2" : instructions.single_type(dtype, ()),
"fibm2" : instructions.single_type(dtype, ()),
"ans" : instructions.single_type(dtype, ()),
}
if not include_types:
_strip_types(fibonacci_vars)
return instructions.Program(
instructions.ControlFlowGraph(fibonacci_main_blocks),
[fibonacci_func], fibonacci_vars, ["n1"], "ans")
def pea_nuts_program(latent_shape, choose_depth, step_state):
"""Synthetic program usable for benchmarking VM performance.
This program is intended to resemble the control flow and scaling
parameters of the NUTS algorithm, without any of the complexity.
Hence the name.
Each batch member looks like:
state = ... # shape latent_shape
def recur(depth, state):
if depth > 1:
state1 = recur(depth - 1, state)
state2 = state1 + 1
state3 = recur(depth - 1, state2)
ans = state3 + 1
else:
ans = step_state(state) # To simulate NUTS, something heavy
return ans
while count > 0:
count = count - 1
depth = choose_depth(count)
state = recur(depth, state)
Args:
latent_shape: Python `tuple` of `int` giving the event shape of the
latent state.
choose_depth: Python `Tensor -> Tensor` callable. The input
`Tensor` will have shape `[batch_size]` (i.e., scalar event
shape), and give the iteration of the outer while loop the
thread is in. The `choose_depth` function must return a `Tensor`
of shape `[batch_size]` giving the depth, for each thread,
to which to call `recur` in this iteration.
step_state: Python `Tensor -> Tensor` callable. The input and
output `Tensor`s will have shape `[batch_size] + latent_shape`.
This function is expected to update the state, and represents
the "real work" versus which the VM overhead is being measured.
Returns:
program: `instructions.Program` that runs the above benchmark.
"""
entry = instructions.Block()
top_body = instructions.Block()
finish_body = instructions.Block()
enter_recur = instructions.Block()
recur_body_1 = instructions.Block()
recur_body_2 = instructions.Block()
recur_body_3 = instructions.Block()
recur_base_case = instructions.Block()
# pylint: disable=bad-whitespace
entry.assign_instructions([
instructions.prim_op(
["count"], "cond",
lambda count: count > 0), # cond = count > 0
instructions.BranchOp(
"cond", top_body,
instructions.halt()), # if cond
])
top_body.assign_instructions([
instructions.PopOp(["cond"]), # done with cond now
instructions.prim_op(
["count"], "ctm1",
lambda count: count - 1), # ctm1 = count - 1
instructions.PopOp(["count"]), # done with count now
instructions.push_op(
["ctm1"], ["count"]), # count = ctm1
instructions.PopOp(["ctm1"]), # done with ctm1
instructions.prim_op(
["count"], "depth",
choose_depth), # depth = choose_depth(count)
instructions.push_op(
["depth", "state"],
["depth", "state"]), # state = recur(depth, state)
instructions.PopOp(
["depth", "state"]), # done with depth, state
instructions.PushGotoOp(
finish_body, enter_recur),
])
finish_body.assign_instructions([
instructions.push_op(
["ans"], ["state"]), # ...
instructions.PopOp(["ans"]), # pop callee's "ans"
instructions.GotoOp(entry), # end of while body
])
# Definition of recur begins here
enter_recur.assign_instructions([
instructions.prim_op(
["depth"], "cond1",
lambda depth: depth > 0), # cond1 = depth > 0
instructions.BranchOp(
"cond1", recur_body_1,
recur_base_case), # if cond1
])
recur_body_1.assign_instructions([
instructions.PopOp(["cond1"]), # done with cond1 now
instructions.prim_op(
["depth"], "dm1",
lambda depth: depth - 1), # dm1 = depth - 1
instructions.PopOp(["depth"]), # done with depth
instructions.push_op(
["dm1", "state"],
["depth", "state"]), # state1 = recur(dm1, state)
instructions.PopOp(["state"]), # done with state
instructions.PushGotoOp(
recur_body_2, enter_recur),
])
recur_body_2.assign_instructions([
instructions.push_op(
["ans"], ["state1"]), # ...
instructions.PopOp(["ans"]), # pop callee's "ans"
instructions.prim_op(
["state1"], "state2",
lambda state: state + 1), # state2 = state1 + 1
instructions.PopOp(["state1"]), # done with state1
instructions.push_op(
["dm1", "state2"],
["depth", "state"]), # state3 = recur(dm1, state2)
instructions.PopOp(
["dm1", "state2"]), # done with dm1, state2
instructions.PushGotoOp(
recur_body_3, enter_recur),
])
recur_body_3.assign_instructions([
instructions.push_op(
["ans"], ["state3"]), # ...
instructions.PopOp(["ans"]), # pop callee's "ans"
instructions.prim_op(
["state3"], "ans",
lambda state: state + 1), # ans = state3 + 1
instructions.PopOp(["state3"]), # done with state3
instructions.IndirectGotoOp(), # return ans
])
recur_base_case.assign_instructions([
instructions.PopOp(
["cond1", "depth"]), # done with cond1, depth
instructions.prim_op(
["state"], "ans", step_state), # ans = step_state(state)
instructions.PopOp(["state"]), # done with state
instructions.IndirectGotoOp(), # return ans
])
pea_nuts_graph = instructions.ControlFlowGraph([
entry,
top_body,
finish_body,
enter_recur,
recur_body_1,
recur_body_2,
recur_body_3,
recur_base_case,
])
# pylint: disable=bad-whitespace
pea_nuts_vars = {
"count" : instructions.single_type(np.int64, ()),
"cond" : instructions.single_type(np.bool_, ()),
"cond1" : instructions.single_type(np.bool_, ()),
"ctm1" : instructions.single_type(np.int64, ()),
"depth" : instructions.single_type(np.int64, ()),
"dm1" : instructions.single_type(np.int64, ()),
"state" : instructions.single_type(np.float32, latent_shape),
"state1" : instructions.single_type(np.float32, latent_shape),
"state2" : instructions.single_type(np.float32, latent_shape),
"state3" : instructions.single_type(np.float32, latent_shape),
"ans" : instructions.single_type(np.float32, latent_shape),
}
return instructions.Program(
pea_nuts_graph, [], pea_nuts_vars, ["count", "state"], "state")
|
a4eb76f8b2fa264de03022356955f46332a62f73
|
25c5d243ffac4b4f4f9efcd6a28cb41d51b23c90
|
/src/main/python/tests/manual_tests/multi_log_reg_mnist.py
|
79cda6d8181cb9db6f029f30996190a615f24068
|
[
"Apache-2.0"
] |
permissive
|
apache/systemds
|
5351e8dd9aa842b693e8c148cf3be151697f07a7
|
73555e932a516063c860f5d05c84e6523cc7619b
|
refs/heads/main
| 2023-08-31T03:46:03.010474
| 2023-08-30T18:25:59
| 2023-08-30T18:34:41
| 45,896,813
| 194
| 167
|
Apache-2.0
| 2023-09-13T08:43:37
| 2015-11-10T08:00:06
|
Java
|
UTF-8
|
Python
| false
| false
| 1,565
|
py
|
multi_log_reg_mnist.py
|
# -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
import logging
from systemds.context import SystemDSContext
from systemds.examples.tutorials.mnist import DataManager
from systemds.operator.algorithm import multiLogReg, multiLogRegPredict
d = DataManager()
with SystemDSContext() as sds:
# Train Data
X = sds.from_numpy(d.get_train_data().reshape((60000, 28*28)))
Y = sds.from_numpy(d.get_train_labels()) + 1.0
bias = multiLogReg(X, Y, tol=0.0001, verbose=False)
# Test data
Xt = sds.from_numpy(d.get_test_data().reshape((10000, 28*28)))
Yt = sds.from_numpy(d.get_test_labels()) + 1.0
[_, _, acc] = multiLogRegPredict(Xt, bias, Yt).compute()
logging.info(acc)
|
e93c678f059c67d515be0b0595861bf79caadae2
|
2d5f297ec3274ce93f1f5592d5b80c2605f8edc5
|
/pydiffvg_tensorflow/shape.py
|
432a3b5dc2fd1b8eb03c306a8123c76e6b9302ff
|
[
"Apache-2.0"
] |
permissive
|
BachiLi/diffvg
|
9ec3e3e7b3674c82ca42b18fe49c69991c076370
|
6f60468bfdef5b9fec8cc3fa47b441dc2720eefc
|
refs/heads/master
| 2023-06-21T18:49:09.604301
| 2023-06-13T17:16:46
| 2023-06-13T17:16:46
| 292,727,955
| 747
| 134
|
Apache-2.0
| 2023-06-13T17:16:47
| 2020-09-04T02:23:40
|
Python
|
UTF-8
|
Python
| false
| false
| 1,773
|
py
|
shape.py
|
import tensorflow as tf
import math
class Circle:
def __init__(self, radius, center, stroke_width = tf.constant(1.0), id = ''):
self.radius = radius
self.center = center
self.stroke_width = stroke_width
self.id = id
class Ellipse:
def __init__(self, radius, center, stroke_width = tf.constant(1.0), id = ''):
self.radius = radius
self.center = center
self.stroke_width = stroke_width
self.id = id
class Path:
def __init__(self, num_control_points, points, is_closed, stroke_width = tf.constant(1.0), id = '', use_distance_approx = False):
self.num_control_points = num_control_points
self.points = points
self.is_closed = is_closed
self.stroke_width = stroke_width
self.id = id
self.use_distance_approx = use_distance_approx
class Polygon:
def __init__(self, points, is_closed, stroke_width = tf.constant(1.0), id = ''):
self.points = points
self.is_closed = is_closed
self.stroke_width = stroke_width
self.id = id
class Rect:
def __init__(self, p_min, p_max, stroke_width = tf.constant(1.0), id = ''):
self.p_min = p_min
self.p_max = p_max
self.stroke_width = stroke_width
self.id = id
class ShapeGroup:
def __init__(self,
shape_ids,
fill_color,
use_even_odd_rule = True,
stroke_color = None,
shape_to_canvas = tf.eye(3),
id = ''):
self.shape_ids = shape_ids
self.fill_color = fill_color
self.use_even_odd_rule = use_even_odd_rule
self.stroke_color = stroke_color
self.shape_to_canvas = shape_to_canvas
self.id = id
|
ca3cdf1a8655334b28d44b1d680b47172e5bc5f9
|
b32df2ffae14c3ca8083f36f93165c220aef5e44
|
/blueoil/configs/core/keypoint_detection/lm_single_pose_v1_quantize_mscoco.py
|
7ff58d5786eb338b869282803c65b88f58232034
|
[
"Apache-2.0"
] |
permissive
|
blue-oil/blueoil
|
213659909b6eac26dd249f878a03ed732b639539
|
0c9160b524b17482d59ae48a0c11384f1d26dccc
|
refs/heads/master
| 2023-01-24T05:10:54.825811
| 2021-04-22T08:46:56
| 2021-04-22T08:46:56
| 153,597,157
| 252
| 111
|
Apache-2.0
| 2021-05-10T05:02:45
| 2018-10-18T09:19:36
|
Python
|
UTF-8
|
Python
| false
| false
| 3,359
|
py
|
lm_single_pose_v1_quantize_mscoco.py
|
# -*- coding: utf-8 -*-
# Copyright 2018 The Blueoil Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from blueoil.utils.smartdict import SmartDict
import tensorflow as tf
from blueoil.common import Tasks
from blueoil.networks.keypoint_detection.lm_single_pose_v1 import LmSinglePoseV1Quantize
from blueoil.datasets.mscoco_2017 import MscocoSinglePersonKeypoints
from blueoil.data_processor import Sequence
from blueoil.pre_processor import (
DivideBy255,
ResizeWithJoints,
JointsToGaussianHeatmap
)
from blueoil.post_processor import (
GaussianHeatmapToJoints
)
from blueoil.data_augmentor import (
Brightness,
Color,
Contrast
)
from blueoil.quantizations import (
binary_channel_wise_mean_scaling_quantizer,
linear_mid_tread_half_quantizer,
)
IS_DEBUG = False
NETWORK_CLASS = LmSinglePoseV1Quantize
DATASET_CLASS = MscocoSinglePersonKeypoints
IMAGE_SIZE = [256, 320]
BATCH_SIZE = 8
DATA_FORMAT = "NHWC"
TASK = Tasks.KEYPOINT_DETECTION
CLASSES = DATASET_CLASS.classes
MAX_STEPS = 2000000
SAVE_CHECKPOINT_STEPS = 3000
KEEP_CHECKPOINT_MAX = 5
TEST_STEPS = 10000
SUMMARISE_STEPS = 200
# distributed training
IS_DISTRIBUTION = False
# pretrain
IS_PRETRAIN = False
PRETRAIN_VARS = []
PRETRAIN_DIR = ""
PRETRAIN_FILE = ""
# for debug
# BATCH_SIZE = 2
# SUMMARISE_STEPS = 1
# IS_DEBUG = True
# stride of output heatmap. the smaller, the slower.
STRIDE = 8
PRE_PROCESSOR = Sequence([
ResizeWithJoints(image_size=IMAGE_SIZE),
JointsToGaussianHeatmap(image_size=IMAGE_SIZE,
stride=STRIDE, sigma=2),
DivideBy255()
])
POST_PROCESSOR = Sequence([
GaussianHeatmapToJoints(num_dimensions=2, stride=STRIDE, confidence_threshold=0.1)
])
step_per_epoch = 149813 // BATCH_SIZE
NETWORK = SmartDict()
NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.AdamOptimizer
NETWORK.OPTIMIZER_KWARGS = {}
NETWORK.LEARNING_RATE_FUNC = tf.compat.v1.train.piecewise_constant
NETWORK.LEARNING_RATE_KWARGS = {
"values": [1e-4, 1e-3, 1e-4, 1e-5],
"boundaries": [5000, step_per_epoch * 5, step_per_epoch * 10],
}
NETWORK.STRIDE = STRIDE
NETWORK.IMAGE_SIZE = IMAGE_SIZE
NETWORK.BATCH_SIZE = BATCH_SIZE
NETWORK.DATA_FORMAT = DATA_FORMAT
NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer
NETWORK.ACTIVATION_QUANTIZER_KWARGS = {
'bit': 2,
'max_value': 2
}
NETWORK.WEIGHT_QUANTIZER = binary_channel_wise_mean_scaling_quantizer
NETWORK.WEIGHT_QUANTIZER_KWARGS = {}
DATASET = SmartDict()
DATASET.IMAGE_SIZE = IMAGE_SIZE
DATASET.BATCH_SIZE = BATCH_SIZE
DATASET.DATA_FORMAT = DATA_FORMAT
DATASET.PRE_PROCESSOR = PRE_PROCESSOR
DATASET.AUGMENTOR = Sequence([
Brightness((0.75, 1.25)),
Color((0.75, 1.25)),
Contrast((0.75, 1.25))
])
DATASET.ENABLE_PREFETCH = True
|
6883e3c83fa60abf8cc296b706b5647dd80e5a08
|
2bbc2628e5b4aaf1e67e04b5485ffc621e088a4d
|
/qa/L0_cmdline_trace/trace_client.py
|
4d59579d7cb55f3e44439c757f4e342369bda54a
|
[
"BSD-3-Clause"
] |
permissive
|
triton-inference-server/server
|
9dbce65aba73ef36a0d2399ed9d63eccb9f84e52
|
0f478f32fe74f591400c3a073e253f7dae8a383e
|
refs/heads/main
| 2023-08-16T16:46:50.059935
| 2023-08-15T22:58:44
| 2023-08-15T22:58:44
| 151,636,194
| 4,711
| 1,085
|
BSD-3-Clause
| 2023-09-14T11:14:08
| 2018-10-04T21:10:30
|
Python
|
UTF-8
|
Python
| false
| false
| 3,064
|
py
|
trace_client.py
|
#!/usr/bin/env python
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import sys
import numpy as np
import tritonclient.grpc as grpcclient
import tritonclient.http as httpclient
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-u",
"--url",
type=str,
required=False,
default="localhost:8001",
help="Inference server URL. Default is localhost:8001.",
)
parser.add_argument("-i", "--protocol", type=str, required=True)
FLAGS = parser.parse_args()
if FLAGS.protocol == "grpc":
client_type = grpcclient
else:
client_type = httpclient
try:
triton_client = client_type.InferenceServerClient(url=FLAGS.url)
except Exception as e:
print("channel creation failed: " + str(e))
sys.exit()
model_name = "simple"
# Infer
inputs = []
outputs = []
inputs.append(client_type.InferInput("INPUT0", [1, 16], "INT32"))
inputs.append(client_type.InferInput("INPUT1", [1, 16], "INT32"))
input0_data = np.arange(start=0, stop=16, dtype=np.int32)
input0_data = np.expand_dims(input0_data, axis=0)
input1_data = np.ones(shape=(1, 16), dtype=np.int32)
inputs[0].set_data_from_numpy(input0_data)
inputs[1].set_data_from_numpy(input1_data)
outputs.append(client_type.InferRequestedOutput("OUTPUT0"))
outputs.append(client_type.InferRequestedOutput("OUTPUT1"))
triton_client.infer(
model_name=model_name, inputs=inputs, outputs=outputs, request_id="1"
)
|
70e39013176d52be9bbff47aaff5dbd409506237
|
0bcd128368e2de959ca648960ffd7944067fcf27
|
/tools/parse_llvm_coverage.py
|
5569fadac98754e0c30100b6ac3506f37f077791
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
google/skia
|
ac6e39179cd33cf0c8a46d29c1a70bf78b4d74ee
|
bf6b239838d3eb56562fffd0856f4047867ae771
|
refs/heads/main
| 2023-08-31T21:03:04.620734
| 2023-08-31T18:24:15
| 2023-08-31T20:20:26
| 15,773,229
| 8,064
| 1,487
|
BSD-3-Clause
| 2023-09-11T13:42:07
| 2014-01-09T17:09:57
|
C++
|
UTF-8
|
Python
| false
| false
| 6,521
|
py
|
parse_llvm_coverage.py
|
#!/usr/bin/env python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Parse an LLVM coverage report to generate useable results."""
import argparse
import json
import os
import re
import subprocess
import sys
def _fix_filename(filename):
"""Return a filename which we can use to identify the file.
The file paths printed by llvm-cov take the form:
/path/to/repo/out/dir/../../src/filename.cpp
And then they're truncated to 22 characters with leading ellipses:
...../../src/filename.cpp
This makes it really tough to determine whether the file actually belongs in
the Skia repo. This function strips out the leading junk so that, if the file
exists in the repo, the returned string matches the end of some relative path
in the repo. This doesn't guarantee correctness, but it's about as close as
we can get.
"""
return filename.split('..')[-1].lstrip('./')
def _file_in_repo(filename, all_files):
"""Return the name of the checked-in file matching the given filename.
Use suffix matching to determine which checked-in files the given filename
matches. If there are no matches or multiple matches, return None.
"""
new_file = _fix_filename(filename)
matched = []
for f in all_files:
if f.endswith(new_file):
matched.append(f)
if len(matched) == 1:
return matched[0]
elif len(matched) > 1:
print >> sys.stderr, ('WARNING: multiple matches for %s; skipping:\n\t%s'
% (new_file, '\n\t'.join(matched)))
return None
def _get_per_file_per_line_coverage(report):
"""Return a dict whose keys are file names and values are coverage data.
Values are lists which take the form (lineno, coverage, code).
"""
all_files = []
for root, dirs, files in os.walk(os.getcwd()):
if 'third_party/externals' in root:
continue
files = [f for f in files if not (f[0] == '.' or f.endswith('.pyc'))]
dirs[:] = [d for d in dirs if not d[0] == '.']
for name in files:
all_files.append(os.path.join(root[(len(os.getcwd()) + 1):], name))
all_files.sort()
lines = report.splitlines()
current_file = None
file_lines = []
files = {}
not_checked_in = '%' # Use this as the file name for not-checked-in files.
for line in lines:
m = re.match('([a-zA-Z0-9\./_-]+):', line)
if m:
if current_file and current_file != not_checked_in:
files[current_file] = file_lines
match_filename = _file_in_repo(m.groups()[0], all_files)
current_file = match_filename or not_checked_in
file_lines = []
else:
if current_file != not_checked_in:
skip = re.match('^\s{2}-+$|^\s{2}\|.+$', line)
if line and not skip:
cov, linenum, code = line.split('|', 2)
cov = cov.strip()
if cov:
cov = int(cov)
else:
cov = None # We don't care about coverage for this line.
linenum = int(linenum.strip())
assert linenum == len(file_lines) + 1
file_lines.append((linenum, cov, code.decode('utf-8', 'replace')))
return files
def _testname(filename):
"""Transform the file name into an ingestible test name."""
return re.sub(r'[^a-zA-Z0-9]', '_', filename)
def _nanobench_json(results, properties, key):
"""Return the results in JSON format like that produced by nanobench."""
rv = {}
# Copy over the properties first, then set the 'key' and 'results' keys,
# in order to avoid bad formatting in case the user passes in a properties
# dict containing those keys.
rv.update(properties)
rv['key'] = key
rv['results'] = {
_testname(f): {
'coverage': {
'percent': percent,
'lines_not_covered': not_covered_lines,
'options': {
'fullname': f,
'dir': os.path.dirname(f),
'source_type': 'coverage',
},
},
} for percent, not_covered_lines, f in results
}
return rv
def _parse_key_value(kv_list):
"""Return a dict whose key/value pairs are derived from the given list.
For example:
['k1', 'v1', 'k2', 'v2']
becomes:
{'k1': 'v1',
'k2': 'v2'}
"""
if len(kv_list) % 2 != 0:
raise Exception('Invalid key/value pairs: %s' % kv_list)
rv = {}
for i in xrange(len(kv_list) / 2):
rv[kv_list[i*2]] = kv_list[i*2+1]
return rv
def _get_per_file_summaries(line_by_line):
"""Summarize the full line-by-line coverage report by file."""
per_file = []
for filepath, lines in line_by_line.iteritems():
total_lines = 0
covered_lines = 0
for _, cov, _ in lines:
if cov is not None:
total_lines += 1
if cov > 0:
covered_lines += 1
if total_lines > 0:
per_file.append((float(covered_lines)/float(total_lines)*100.0,
total_lines - covered_lines,
filepath))
return per_file
def main():
"""Generate useful data from a coverage report."""
# Parse args.
parser = argparse.ArgumentParser()
parser.add_argument('--report', help='input file; an llvm coverage report.',
required=True)
parser.add_argument('--nanobench', help='output file for nanobench data.')
parser.add_argument(
'--key', metavar='key_or_value', nargs='+',
help='key/value pairs identifying this bot.')
parser.add_argument(
'--properties', metavar='key_or_value', nargs='+',
help='key/value pairs representing properties of this build.')
parser.add_argument('--linebyline',
help='output file for line-by-line JSON data.')
args = parser.parse_args()
if args.nanobench and not (args.key and args.properties):
raise Exception('--key and --properties are required with --nanobench')
with open(args.report) as f:
report = f.read()
line_by_line = _get_per_file_per_line_coverage(report)
if args.linebyline:
with open(args.linebyline, 'w') as f:
json.dump(line_by_line, f)
if args.nanobench:
# Parse the key and properties for use in the nanobench JSON output.
key = _parse_key_value(args.key)
properties = _parse_key_value(args.properties)
# Get per-file summaries.
per_file = _get_per_file_summaries(line_by_line)
# Write results.
format_results = _nanobench_json(per_file, properties, key)
with open(args.nanobench, 'w') as f:
json.dump(format_results, f)
if __name__ == '__main__':
main()
|
1b626af67ece2a296b1aade3bac221bdc4a98a8c
|
51f0733d85502f3fbb6805b785c620390ca07d0a
|
/underactuated/meshcat_cpp_utils.py
|
67acb89d99b2302dd7f0d58cee12fa8515d85f96
|
[
"BSD-3-Clause"
] |
permissive
|
RussTedrake/underactuated
|
17d53574ee4d11fb33768174773ed59adbf67840
|
a37b43e36057f2008d5db3c488ddbc541bdb0975
|
refs/heads/master
| 2023-09-01T13:55:11.063722
| 2023-08-17T10:33:11
| 2023-08-17T10:33:11
| 23,526,294
| 565
| 212
|
NOASSERTION
| 2023-08-19T00:49:08
| 2014-09-01T00:43:31
|
HTML
|
UTF-8
|
Python
| false
| false
| 259
|
py
|
meshcat_cpp_utils.py
|
import warnings
from underactuated.meshcat_utils import * # noqa
warnings.warn(
"underactuated.meshcat_cpp_utils has been renamed to underactuated.meshcat_utils. This shim will be removed after 2023-05-31.",
DeprecationWarning,
stacklevel=2,
)
|
1a3b88f356fa391abf9399b48b918fd109b5f661
|
b889e24f6f68d407cebfa8404d15ec980f596cf2
|
/train_stylegan_cgd.py
|
3f4078d14e45486b44e4551d9d5afbe676066287
|
[
"Apache-2.0"
] |
permissive
|
devzhk/Implicit-Competitive-Regularization
|
e7071c79dd83f28d18191fecaf38b0aa10e0604c
|
71bda29f2db18d1d7ae9860e4a761ff61cbec756
|
refs/heads/master
| 2022-02-02T15:32:29.148198
| 2021-04-01T09:06:10
| 2021-04-01T09:06:10
| 213,550,763
| 115
| 27
|
Apache-2.0
| 2020-12-05T06:08:40
| 2019-10-08T04:45:27
|
Python
|
UTF-8
|
Python
| false
| false
| 10,085
|
py
|
train_stylegan_cgd.py
|
'''
Adapted from https://github.com/rosinality/stylegan2-pytorch/blob/master/train.py
'''
import os
try:
import wandb
except ImportError:
wandb = None
import torch
from torch import nn
from torch.utils import data
from torchvision import transforms, utils
from tqdm import tqdm
from GANs.styleganv2 import Generator, Discriminator
from datas.dataset_utils import MultiResolutionDataset, data_sampler, sample_data
from non_leaking import augment
from losses import d_logistic_loss, d_r1_loss, g_nonsaturating_loss, g_path_regularize
from train_utils import requires_grad, accumulate, mixing_noise
from utils import stylegan_parser
from optims import ACGD, BCGD
def train(args, loader, generator, discriminator, optimizer, g_ema, device):
ckpt_dir = 'checkpoints/stylegan-acgd'
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
fig_dir = 'figs/stylegan-acgd'
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
loader = sample_data(loader)
pbar = range(args.iter)
pbar = tqdm(pbar, initial=args.start_iter, dynamic_ncols=True, smoothing=0.01)
mean_path_length = 0
r1_loss = torch.tensor(0.0, device=device)
path_loss = torch.tensor(0.0, device=device)
path_lengths = torch.tensor(0.0, device=device)
mean_path_length_avg = 0
loss_dict = {}
if args.gpu_num > 1:
g_module = generator.module
d_module = discriminator.module
else:
g_module = generator
d_module = discriminator
accum = 0.5 ** (32 / (10 * 1000))
ada_augment = torch.tensor([0.0, 0.0], device=device)
ada_aug_p = args.augment_p if args.augment_p > 0 else 0.0
ada_aug_step = args.ada_target / args.ada_length
r_t_stat = 0
sample_z = torch.randn(args.n_sample, args.latent, device=device)
for idx in pbar:
i = idx + args.start_iter
if i > args.iter:
print("Done!")
break
real_img = next(loader)
real_img = real_img.to(device)
noise = mixing_noise(args.batch, args.latent, args.mixing, device)
fake_img, _ = generator(noise)
if args.augment:
real_img_aug, _ = augment(real_img, ada_aug_p)
fake_img, _ = augment(fake_img, ada_aug_p)
else:
real_img_aug = real_img
fake_pred = discriminator(fake_img)
real_pred = discriminator(real_img_aug)
d_loss = d_logistic_loss(real_pred, fake_pred)
# d_loss = fake_pred.mean() - real_pred.mean()
loss_dict["loss"] = d_loss.item()
loss_dict["real_score"] = real_pred.mean().item()
loss_dict["fake_score"] = fake_pred.mean().item()
# d_regularize = i % args.d_reg_every == 0
d_regularize = False
if d_regularize:
real_img_cp = real_img.clone().detach()
real_img_cp.requires_grad = True
real_pred_cp = discriminator(real_img_cp)
r1_loss = d_r1_loss(real_pred_cp, real_img_cp)
d_loss += args.r1 / 2 * r1_loss * args.d_reg_every
loss_dict["r1"] = r1_loss.item()
# g_regularize = i % args.g_reg_every == 0
g_regularize = False
if g_regularize: # TODO adapt code for nn.DataParallel
path_batch_size = max(1, args.batch // args.path_batch_shrink)
noise = mixing_noise(path_batch_size, args.latent, args.mixing, device)
fake_img, latents = generator(noise, return_latents=True)
path_loss, mean_path_length, path_lengths = g_path_regularize(
fake_img, latents, mean_path_length
)
generator.zero_grad()
weighted_path_loss = args.path_regularize * args.g_reg_every * path_loss
if args.path_batch_shrink:
weighted_path_loss += 0 * fake_img[0, 0, 0, 0]
d_loss += weighted_path_loss
mean_path_length_avg = mean_path_length.item()
loss_dict["path"] = path_loss.mean().item()
loss_dict["path_length"] = path_lengths.mean().item()
optimizer.step(d_loss)
# update ada_aug_p
if args.augment and args.augment_p == 0:
ada_augment_data = torch.tensor(
(torch.sign(real_pred).sum().item(), real_pred.shape[0]), device=device
)
ada_augment += ada_augment_data
if ada_augment[1] > 255:
pred_signs, n_pred = ada_augment.tolist()
r_t_stat = pred_signs / n_pred
if r_t_stat > args.ada_target:
sign = 1
else:
sign = -1
ada_aug_p += sign * ada_aug_step * n_pred
ada_aug_p = min(1, max(0, ada_aug_p))
ada_augment.mul_(0)
accumulate(g_ema, g_module, accum)
d_loss_val = loss_dict["loss"]
r1_val = loss_dict['r1']
path_loss_val = loss_dict["path"]
real_score_val = loss_dict["real_score"]
fake_score_val = loss_dict["fake_score"]
path_length_val = loss_dict["path_length"]
pbar.set_description(
(
f"d: {d_loss_val:.4f}; g: {d_loss_val:.4f}; r1: {r1_val:.4f}; "
f"path: {path_loss_val:.4f}; mean path: {mean_path_length_avg:.4f}; "
f"augment: {ada_aug_p:.4f}"
)
)
if wandb and args.wandb:
wandb.log(
{
"Generator": d_loss_val,
"Discriminator": d_loss_val,
"Augment": ada_aug_p,
"Rt": r_t_stat,
"R1": r1_val,
"Path Length Regularization": path_loss_val,
"Mean Path Length": mean_path_length,
"Real Score": real_score_val,
"Fake Score": fake_score_val,
"Path Length": path_length_val,
}
)
if i % 100 == 0:
with torch.no_grad():
g_ema.eval()
sample, _ = g_ema([sample_z])
utils.save_image(
sample,
f"figs/stylegan-acgd/{str(i).zfill(6)}.png",
nrow=int(args.n_sample ** 0.5),
normalize=True,
range=(-1, 1),
)
if i % 1000 == 0:
torch.save(
{
"g": g_module.state_dict(),
"d": d_module.state_dict(),
"g_ema": g_ema.state_dict(),
"d_optim": optimizer.state_dict(),
"args": args,
"ada_aug_p": ada_aug_p,
},
f"checkpoints/stylegan-acgd/{str(i).zfill(6)}.pt",
)
if __name__ == '__main__':
torch.backends.cudnn.benchmark = True
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print(device)
parser = stylegan_parser()
parser.add_argument('--optimizer', type=str, default='ACGD')
parser.add_argument('--lr_d', type=float, default=1e-4)
parser.add_argument('--lr_g', type=float, default=1e-4)
parser.add_argument('--gpu_num', type=int, default=1)
parser.add_argument('--tol', type=float, default=1e-10)
parser.add_argument('--atol', type=float, default=1e-16)
args = parser.parse_args()
args.latent = 512
args.n_mlp = 8
args.start_iter = 0
args.distributed =False
generator = Generator(
args.size, args.latent, args.n_mlp, channel_multiplier=args.channel_multiplier
).to(device)
discriminator = Discriminator(
args.size, channel_multiplier=args.channel_multiplier
).to(device)
g_ema = Generator(
args.size, args.latent, args.n_mlp, channel_multiplier=args.channel_multiplier
).to(device)
g_ema.eval()
accumulate(g_ema, generator, 0)
g_reg_ratio = args.g_reg_every / (args.g_reg_every + 1)
d_reg_ratio = args.d_reg_every / (args.d_reg_every + 1)
optimizer = ACGD(max_params=generator.parameters(),
min_params=discriminator.parameters(),
lr_max=args.lr_g, lr_min=args.lr_d,
tol=args.tol, atol=args.atol,
device=device,
beta=0.99 ** g_reg_ratio)
if args.ckpt is not None:
print("load model:", args.ckpt)
ckpt = torch.load(args.ckpt, map_location=lambda storage, loc: storage)
try:
ckpt_name = os.path.basename(args.ckpt)
args.start_iter = int(os.path.splitext(ckpt_name)[0])
except ValueError:
pass
generator.load_state_dict(ckpt["g"])
discriminator.load_state_dict(ckpt["d"])
g_ema.load_state_dict(ckpt["g_ema"])
optimizer.load_state_dict(ckpt["d_optim"])
# TODO: check the following two lines
del ckpt
torch.cuda.empty_cache()
optimizer.set_lr(lr_max=args.lr_g, lr_min=args.lr_d)
if args.gpu_num > 1:
generator = nn.DataParallel(generator, list(range(args.gpu_num)))
discriminator = nn.DataParallel(discriminator, list(range(args.gpu_num)))
transform = transforms.Compose(
[
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True),
]
)
dataset = MultiResolutionDataset(args.path, transform, args.size)
loader = data.DataLoader(
dataset,
batch_size=args.batch,
sampler=data_sampler(dataset, shuffle=True, distributed=args.distributed),
drop_last=True,
)
if wandb is not None and args.wandb:
wandb.init(project="styleganv2-acgd",
config={'lr_d': args.lr_d,
'lr_g': args.lr_g,
'Image size': args.size,
'Batchsize': args.batch,
'CG tolerance': args.tol}
)
train(args, loader, generator, discriminator, optimizer, g_ema, device)
|
4c3ded9c0a2afe4b9e83923036760dc4872f9246
|
3e51fa3888cedcdaee15513a17175b294ec29b56
|
/adminlteui/admin.py
|
238e697d764ac7e3683f9aa917eb8fba12a2980a
|
[
"MIT"
] |
permissive
|
wuyue92tree/django-adminlte-ui
|
4e5273f84e82bfd86cd380c04f6e6685b40ec9e2
|
ece16c646a7925568c3cc9a48dc43dd18baef2ef
|
refs/heads/master
| 2023-07-06T18:19:07.816675
| 2023-07-05T06:00:05
| 2023-07-05T06:00:05
| 193,415,177
| 287
| 77
|
MIT
| 2020-05-06T01:45:37
| 2019-06-24T01:43:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,399
|
py
|
admin.py
|
from django.contrib import admin
class ModelAdmin(admin.ModelAdmin):
select2_list_filter = ()
search_field_placeholder = ''
class Media:
css = {
"all": (
"admin/components/select2/dist/css/select2.min.css",
# for daterangefilter
"admin/components/bootstrap-daterangepicker/daterangepicker.css"
)
}
js = (
"admin/components/select2/dist/js/select2.min.js",
# for daterangefilter
"admin/components/moment/moment-with-locales.min.js",
"admin/components/bootstrap-daterangepicker/daterangepicker.js",
)
def changelist_view(self, request, extra_context=None):
view = super().changelist_view(request, extra_context)
if hasattr(view, 'context_data'):
cl = view.context_data.get('cl', None)
if cl:
cl.search_field_placeholder = self.search_field_placeholder
filter_specs = cl.filter_specs
for index, filter_spec in enumerate(filter_specs):
if filter_spec.field_path in self.select2_list_filter:
# flag to use select2
filter_spec.display_select2 = True
cl.filter_specs[index] = filter_spec
view.context_data['cl'] = cl
return view
|
00ef6751e16fc8b8c6f510c8d75577a3e15fefd9
|
83544ef94ce2c1a05b6028ae2ce58ef8acfb6fa8
|
/pmca/platform/backup.py
|
f12555d3420a823e917492961e63b958b42a7e2e
|
[
"MIT"
] |
permissive
|
ma1co/Sony-PMCA-RE
|
9ae44c5b09580d62e860c3acff24bd1fac28a31e
|
a82f5baaa8e9c3d9f28f94699e860fb2e48cc8e0
|
refs/heads/master
| 2023-08-07T07:54:13.763912
| 2022-08-18T12:46:04
| 2022-08-18T12:46:04
| 35,510,548
| 1,788
| 228
|
MIT
| 2022-11-05T06:45:01
| 2015-05-12T20:18:25
|
Python
|
UTF-8
|
Python
| false
| false
| 8,053
|
py
|
backup.py
|
import abc
from collections import OrderedDict
import io
from ..backup import *
from ..util import *
class BaseBackupProp(abc.ABC):
def __init__(self, dataInterface, size):
self.dataInterface = dataInterface
self.size = size
@abc.abstractmethod
def read(self):
pass
@abc.abstractmethod
def write(self, data):
pass
class BackupProp(BaseBackupProp):
def __init__(self, dataInterface, id, size):
super(BackupProp, self).__init__(dataInterface, size)
self.id = id
def read(self):
data = self.dataInterface.readProp(self.id)
if len(data) != self.size:
raise Exception('Wrong size')
return data
def write(self, data):
if len(data) != self.size:
raise Exception('Wrong size')
self.dataInterface.writeProp(self.id, data)
class CompoundBackupProp(BaseBackupProp):
def __init__(self, dataInterface, props):
super(CompoundBackupProp, self).__init__(dataInterface, sum(size for id, size in props))
self._props = [BackupProp(dataInterface, id, size) for id, size in props]
def read(self):
return b''.join(prop.read() for prop in self._props)
def write(self, data):
if len(data) != self.size:
raise Exception('Wrong size')
for prop in self._props:
prop.write(data[:prop.size])
data = data[prop.size:]
class BackupDataInterface(abc.ABC):
@abc.abstractmethod
def getRegion(self):
pass
@abc.abstractmethod
def readProp(self, id):
pass
@abc.abstractmethod
def writeProp(self, id, data):
pass
class BackupPlatformDataInterface(BackupDataInterface):
def __init__(self, backend):
self.backend = backend
def getRegion(self):
return self.backend.getBackupStatus()[0x14:].decode('latin1').rstrip('\0')
def readProp(self, id):
return self.backend.readBackup(id)
def writeProp(self, id, data):
self.backend.writeBackup(id, data)
class BackupFileDataInterface(BackupDataInterface):
def __init__(self, file):
self.backup = BackupFile(file)
def getRegion(self):
return self.backup.getRegion()
def readProp(self, id):
return self.backup.getProperty(id).data
def writeProp(self, id, data):
self.backup.setProperty(id, data)
def setProtection(self, enabled):
self.backup.setId1(enabled)
def getSize(self):
return self.backup.size
def updateChecksum(self):
self.backup.updateChecksum()
class BackupPlatformFileDataInterface(BackupFileDataInterface):
def __init__(self, backend):
self.backend = backend
self.file = io.BytesIO(self.backend.getBackupData())
super(BackupPlatformFileDataInterface, self).__init__(self.file)
def apply(self):
self.updateChecksum()
data = self.file.getvalue()
self.backend.setBackupData(data)
if self.backend.getBackupData()[0x100:self.getSize()] != data[0x100:self.getSize()]:
raise Exception('Cannot overwrite backup')
class BackupPatchDataInterface(BackupPlatformFileDataInterface):
def __init__(self, backend):
super(BackupPatchDataInterface, self).__init__(backend)
self.patch = {}
def readProp(self, id):
if id in self.patch:
return self.patch[id]
return super(BackupPatchDataInterface, self).readProp(id)
def writeProp(self, id, data):
if len(data) != len(self.backup.getProperty(id).data):
raise Exception('Wrong data size')
self.patch[id] = data
def getPatch(self):
return self.patch
def setPatch(self, patch):
self.patch = patch
def apply(self):
if not self.patch:
return
patchAttr = {}
for id, data in self.patch.items():
p = self.backup.getProperty(id)
if p.data != data and p.attr & 1:
patchAttr[id] = p.attr
self.backup.setPropertyAttr(id, p.attr & ~1)
self.backup.setProperty(id, data)
try:
super(BackupPatchDataInterface, self).apply()
finally:
if patchAttr:
for id, attr in patchAttr.items():
self.backup.setPropertyAttr(id, attr)
super(BackupPatchDataInterface, self).apply()
class BackupInterface:
def __init__(self, dataInterface):
self.dataInterface = dataInterface
self._props = OrderedDict()
self.addProp('androidPlatformVersion', BackupProp(dataInterface, 0x01660024, 8))
self.addProp('modelCode', BackupProp(dataInterface, 0x00e70000, 5))
self.addProp('modelName', BackupProp(dataInterface, 0x003e0005, 16))
self.addProp('serialNumber', BackupProp(dataInterface, 0x00e70003, 4))
self.addProp('recLimit', CompoundBackupProp(dataInterface, [(0x003c0373 + i, 1) for i in range(3)]))
self.addProp('recLimit4k', BackupProp(dataInterface, 0x003c04b6, 2))
self.addProp('palNtscSelector', BackupProp(dataInterface, 0x01070148, 1))
self.addProp('language', CompoundBackupProp(dataInterface, [(0x010d008f + i, 1) for i in range(35)]))
self.addProp('usbAppInstaller', BackupProp(dataInterface, 0x01640001, 1))
def addProp(self, name, prop):
self._props[name] = prop
def readProp(self, name):
return self._props[name].read()
def writeProp(self, name, data):
return self._props[name].write(data)
def getRegion(self):
return self.dataInterface.getRegion()
def getDefaultLanguages(self, region):
return {
'ALLLANG': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'AP2': [1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'AU2': [1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0],
'CA2': [1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'CE': [1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0],
'CE3': [1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
'CE7': [1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0],
'CEC': [1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0],
'CEH': [1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
'CN1': [1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0],
'CN2': [1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
'E32': [1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0],
'E33': [1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0],
'E37': [1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0],
'E38': [1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
'EA8': [1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0],
'HK1': [1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0],
'IN5': [1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'J1': [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'JE3': [1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'KR2': [1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0],
'RU2': [1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0],
'RU3': [1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0],
'TW6': [1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0],
'U2': [1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'UC2': [1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
}[region]
|
360063f5db193f876497bf8a51aa100d528496ff
|
41eb0837713f297134529591b66f3d4d82bcf98e
|
/update/usr/lib/python2.7/site-packages/configgen/generators/mupen/mupenControllers.py
|
072b13a3c78ee73ae1c2acede63c8add7fa6164e
|
[] |
no_license
|
AlexxandreFS/Batocera.PLUS
|
27b196b3cbb781b6fc99e62cad855396d1d5f8f2
|
997ee763ae7135fdf0c34a081e789918bd2eb169
|
refs/heads/master
| 2023-08-17T21:52:39.083687
| 2023-08-17T15:03:44
| 2023-08-17T15:03:44
| 215,869,486
| 135
| 57
| null | 2023-08-14T14:46:14
| 2019-10-17T19:23:42
|
C
|
UTF-8
|
Python
| false
| false
| 8,652
|
py
|
mupenControllers.py
|
#!/usr/bin/env python
import os
import ConfigParser
from controllersConfig import Input
from xml.dom import minidom
import batoceraFiles
# Must read :
# http://mupen64plus.org/wiki/index.php?title=Mupen64Plus_Plugin_Parameters
# Mupen doesn't like to have 2 buttons mapped for N64 pad entry. That's why r2 is commented for now. 1 axis and 1 button is ok
mupenHatToAxis = {'1': 'Up', '2': 'Right', '4': 'Down', '8': 'Left'}
mupenHatToReverseAxis = {'1': 'Down', '2': 'Left', '4': 'Up', '8': 'Right'}
mupenDoubleAxis = {0:'X Axis', 1:'Y Axis'}
def getMupenMapping():
# load system values and override by user values in case some user values are missing
map = dict()
for file in [batoceraFiles.mupenMappingSystem, batoceraFiles.mupenMappingUser]:
if os.path.exists(file):
dom = minidom.parse(file)
for inputs in dom.getElementsByTagName('inputList'):
for input in inputs.childNodes:
if input.attributes:
if input.attributes['name']:
if input.attributes['value']:
map[input.attributes['name'].value] = input.attributes['value'].value
return map
def setControllersConfig(iniConfig, controllers, systemconfig):
nplayer = 1
for playercontroller, pad in sorted(controllers.items()):
# Dynamic controller bindings
config = defineControllerKeys(pad, systemconfig)
fillIniPlayer(nplayer, iniConfig, pad, config)
nplayer += 1
# remove section with no player
for x in range(nplayer, 4):
section = "Input-SDL-Control"+str(x)
if iniConfig.has_section(section):
cleanPlayer(nplayer, iniConfig)
def defineControllerKeys(controller, systemconfig):
mupenmapping = getMupenMapping()
# config holds the final pad configuration in the mupen style
# ex: config['DPad U'] = "button(1)"
config = dict()
# deadzone and peak from config files
config['AnalogDeadzone'] = mupenmapping['AnalogDeadzone']
config['AnalogPeak'] = mupenmapping['AnalogPeak']
if 'analogdeadzone' in systemconfig:
config['AnalogDeadzone'] = systemconfig['analogdeadzone']
if 'analogpeak' in systemconfig:
config['AnalogPeak'] = systemconfig['analogpeak']
# z is important, in case l2 is not available for this pad, use l1
# assume that l2 is for "Z Trig" in the mapping
if 'l2' not in controller.inputs:
mupenmapping['pageup'] = mupenmapping['l2']
# if joystick1up is not available, use up/left while these keys are more used
if 'joystick1up' not in controller.inputs:
mupenmapping['up'] = mupenmapping['joystick1up']
mupenmapping['down'] = mupenmapping['joystick1down']
mupenmapping['left'] = mupenmapping['joystick1left']
mupenmapping['right'] = mupenmapping['joystick1right']
# the input.xml adds 2 directions per joystick, ES handles just 1
fakeSticks = { 'joystick2up' : 'joystick2down', 'joystick2left' : 'joystick2right'}
# Cheat on the controller
for realStick, fakeStick in fakeSticks.iteritems():
if realStick in controller.inputs:
if controller.inputs[realStick].type == "axis":
print fakeStick + "-> " + realStick
inputVar = Input(fakeStick
, controller.inputs[realStick].type
, controller.inputs[realStick].id
, str(-int(controller.inputs[realStick].value))
, controller.inputs[realStick].code)
controller.inputs[fakeStick] = inputVar
for inputIdx in controller.inputs:
input = controller.inputs[inputIdx]
if input.name in mupenmapping and mupenmapping[input.name] != "":
value=setControllerLine(mupenmapping, input, mupenmapping[input.name])
# Handle multiple inputs for a single N64 Pad input
if value != "":
if mupenmapping[input.name] not in config :
config[mupenmapping[input.name]] = value
else:
config[mupenmapping[input.name]] += " " + value
return config
def setControllerLine(mupenmapping, input, mupenSettingName):
value = ''
inputType = input.type
if inputType == 'button':
value = "button({})".format(input.id)
elif inputType == 'hat':
if mupenSettingName in ["X Axis", "Y Axis"]: # special case for these 2 axis...
if input.value == "1" or input.value == "8": # only for the lower value to avoid duplicate
value = "hat({} {} {})".format(input.id, mupenHatToAxis[input.value], mupenHatToReverseAxis[input.value])
else:
value = "hat({} {})".format(input.id, mupenHatToAxis[input.value])
elif inputType == 'axis':
# Generic case for joystick1up and joystick1left
if mupenSettingName in mupenDoubleAxis.values():
# X axis : value = -1 for left, +1 for right
# Y axis : value = -1 for up, +1 for down
# we configure only left and down to not configure 2 times each axis
if input.name in [ "left", "up", "joystick1left", "joystick1up", "joystick2left", "joystick2up" ]:
if input.value == "-1":
value = "axis({}-,{}+)".format(input.id, input.id)
else:
value = "axis({}+,{}-)".format(input.id, input.id)
else:
if input.value == "1":
value = "axis({}+)".format(input.id)
else:
value = "axis({}-)".format(input.id)
return value
def fillIniPlayer(nplayer, iniConfig, controller, config):
section = "Input-SDL-Control"+str(nplayer)
# set static config
if not iniConfig.has_section(section):
iniConfig.add_section(section)
iniConfig.set(section, 'Version', '2')
iniConfig.set(section, 'mode', 0)
iniConfig.set(section, 'device', controller.index)
# TODO: python 3 remove hack to overcome ConfigParser limitation with utf8 in python 2.7
name_encode = controller.realName.encode("ascii", "ignore")
iniConfig.set(section, 'name', name_encode)
iniConfig.set(section, 'plugged', True)
iniConfig.set(section, 'plugin', 2)
iniConfig.set(section, 'AnalogDeadzone', config['AnalogDeadzone'])
iniConfig.set(section, 'AnalogPeak', config['AnalogPeak'])
iniConfig.set(section, 'mouse', "False")
# set dynamic config - clear all keys then fill
iniConfig.set(section, "Mempak switch", "")
iniConfig.set(section, "Rumblepak switch", "")
iniConfig.set(section, "C Button R", "")
iniConfig.set(section, "A Button", "")
iniConfig.set(section, "C Button U", "")
iniConfig.set(section, "B Button", "")
iniConfig.set(section, "Start", "")
iniConfig.set(section, "L Trig", "")
iniConfig.set(section, "R Trig", "")
iniConfig.set(section, "Z Trig", "")
iniConfig.set(section, "DPad U", "")
iniConfig.set(section, "DPad D", "")
iniConfig.set(section, "DPad R", "")
iniConfig.set(section, "DPad L", "")
iniConfig.set(section, "Y Axis", "")
iniConfig.set(section, "Y Axis", "")
iniConfig.set(section, "X Axis", "")
iniConfig.set(section, "X Axis", "")
iniConfig.set(section, "C Button U", "")
iniConfig.set(section, "C Button D", "")
iniConfig.set(section, "C Button L", "")
iniConfig.set(section, "C Button R", "")
for inputName in sorted(config):
iniConfig.set(section, inputName, config[inputName])
def cleanPlayer(nplayer, iniConfig):
section = "Input-SDL-Control"+str(nplayer)
# set static config
if not iniConfig.has_section(section):
iniConfig.add_section(section)
iniConfig.set(section, 'Version', '2')
iniConfig.set(section, 'plugged', False)
|
14f4164310d5a9a15c0f9a6e251c11557406a84f
|
2bc18a13c4a65b4005741b979f2cb0193c1e1a01
|
/test/suite/out/E71.py
|
d9c3c51bce3d3091177bd06c114a7fc574603ddd
|
[
"MIT"
] |
permissive
|
hhatto/autopep8
|
b0b9daf78050d981c4355f096418b9283fc20a0f
|
4e869ad63a11575267450bfefdf022bb6128ab93
|
refs/heads/main
| 2023-09-01T05:14:18.553939
| 2023-08-27T14:12:45
| 2023-08-27T14:12:45
| 1,206,729
| 3,966
| 329
|
MIT
| 2023-08-27T14:12:46
| 2010-12-29T20:08:51
|
Python
|
UTF-8
|
Python
| false
| false
| 437
|
py
|
E71.py
|
#: E711
if res is None:
pass
#: E712
if res:
pass
#: E712
if res:
pass
#
#: E713
if X not in Y:
pass
#: E713
if X.B not in Y:
pass
#: E713
if X not in Y and Z == "zero":
pass
#: E713
if X == "zero" or Y not in Z:
pass
#
#: E714
if X is not Y:
pass
#: E714
if X.B is not Y:
pass
#: Okay
if x not in y:
pass
if not (X in Y or X is Z):
pass
if not (X in Y):
pass
if x is not y:
pass
#:
|
77895d6c3493743ae20b9f4e5f4f3a73834b5ba1
|
2871a5c3d1e885ee72332dbd8ff2c015dbcb1200
|
/NLP/FreeTransfer-X/third_party/utils/run_trans_m2m100.py
|
e886ffbcf94677eab45c5df02d36a7ca86804ec9
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
huawei-noah/noah-research
|
297476299ad040552e44656541858145de72d141
|
82c49c36b76987a46dec8479793f7cf0150839c6
|
refs/heads/master
| 2023-08-16T19:29:25.439701
| 2023-08-14T03:11:49
| 2023-08-14T03:11:49
| 272,853,727
| 816
| 171
| null | 2023-09-12T01:28:36
| 2020-06-17T01:53:20
|
Python
|
UTF-8
|
Python
| false
| false
| 5,434
|
py
|
run_trans_m2m100.py
|
#!/usr/bin/env python3
# Copyright (C) 2022. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE
import os
import sys
import argparse
import torch
from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer
class M2M100(object):
def __init__(self, model_path):
self._model = M2M100ForConditionalGeneration.from_pretrained(model_path)
self._tokenizer = M2M100Tokenizer.from_pretrained(model_path)
self._fids = []
self._dim = ""
self._device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self._model.to(self._device)
def set_src_lang(self, src_lang):
self._tokenizer.src_lang = src_lang
def translate(self, text, tgt_lang, do_sample=False, num_return_sequences=1, temperature=1.0):
# TODO: to improve efficiency, batch translation?
encoded = self._tokenizer(text, return_tensors="pt").to(self._device)
generated_tokens = self._model.generate(**encoded, forced_bos_token_id=self._tokenizer.get_lang_id(tgt_lang),
do_sample=do_sample, num_return_sequences=num_return_sequences, temperature=temperature,
) # prompt-based generation, `transformers.generation_utils`
return self._tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
def set_fields(self, fids, dim):
self._fids = [int(fid) for fid in fids]
self._dim = dim
def translate_fields(self, text, tgt_lang, do_sample=False, num_return_sequences=1, temperature=1.0):
'''return:
trans = [
"{trans_field[0]\ttrans_field[1]\t...}",
"{trans_field[0]\ttrans_field[1]\t...}",
...
]
'''
fields = text.split(self._dim)
trans = []
for i, field in enumerate(fields):
trans_field = []
if i in self._fids:
trans_field = self.translate(field.strip(), tgt_lang,
do_sample=do_sample, num_return_sequences=num_return_sequences, temperature=temperature)
else:
trans_field = [field.strip()] * num_return_sequences
for j in range(num_return_sequences): # 211213
if not trans_field[j].strip():
print(f" [translate_fields] WARNING: empty elements {j}, trans_field = [{trans_field}]")
sys.stdout.flush()
break
if trans:
for j in range(num_return_sequences):
trans[j] = self._dim.join([trans[j], trans_field[j]])
else: # the 1st field
trans.extend(trans_field)
# return self._dim.join(trans)
return trans
def test():
'''test the translation'''
chkpt = "."
model = M2M100(chkpt)
text = "जीवन एक चॉकलेट बॉक्स की तरह है।"
model.set_src_lang("hi")
print(model.translate(text, "en"))
text = "生活就像一盒巧克力。"
model.set_src_lang("zh")
print(model.translate(text, "en"))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--src", type=str, help="")
parser.add_argument("--tgt", type=str, default="en", help="")
parser.add_argument("--text", type=str, help="")
parser.add_argument("--in_files", type=str, default="", help="")
parser.add_argument("--out_dir", type=str, default="", help="")
parser.add_argument("--model_dir", type=str, help="")
parser.add_argument("--test", action="store_true", help="")
parser.add_argument("--fields", type=str, default="", help="field ids, comma-split & 0-start")
parser.add_argument("--fields_dim", type=str, default="\t", help="")
args = parser.parse_args()
if args.test: test()
model = M2M100(args.model_dir)
model.set_src_lang(args.src)
if args.fields:
model.set_fields(args.fields.split(','), args.fields_dim)
if args.in_files:
os.makedirs(args.out_dir, exist_ok=True)
for path in args.in_files.split(','):
filename = os.path.basename(path)
out_path = os.path.join(args.out_dir, filename + f'.{args.tgt}')
with open(path, 'r') as fin, open(out_path, 'w') as fout:
if args.fields:
for line in fin: fout.write(model.translate_fields(line.strip(), args.tgt) + '\n')
else:
for line in fin: fout.write(model.translate(line.strip(), args.tgt) + '\n')
elif args.text:
if args.fields: print(model.translate_fields(args.text, args.tgt))
else: print(model.translate(args.text, args.tgt))
else:
raise TypeError(f"{args.text}, {args.in_files}")
|
458c5c1c73328207ada469f6ecb1ceaf57208bb2
|
ec85250addb7357dfe7bb3e0680d53fc7b0fd8fb
|
/examples/docs_snippets/docs_snippets_tests/concepts_tests/types_tests/test_types.py
|
a0a16b09558e569f3bc6a7465c40eb4aefa4d68d
|
[
"Apache-2.0"
] |
permissive
|
dagster-io/dagster
|
6adb5deee8bcf3ea1866a6a64f2ed81e1db5e73a
|
fe21995e0402878437a828c6a4244025eac8c43b
|
refs/heads/master
| 2023-09-05T20:46:08.203794
| 2023-09-05T19:54:52
| 2023-09-05T19:54:52
| 131,619,646
| 8,565
| 1,154
|
Apache-2.0
| 2023-09-14T21:57:37
| 2018-04-30T16:30:04
|
Python
|
UTF-8
|
Python
| false
| false
| 878
|
py
|
test_types.py
|
import pytest
from dagster import DagsterTypeCheckDidNotPass
from docs_snippets.concepts.types.types import test_dagster_type
def test_basic_even_type():
from docs_snippets.concepts.types.types import double_even
double_even(num=2)
with pytest.raises(DagsterTypeCheckDidNotPass):
double_even(num=3)
def test_basic_even_type_with_annotations():
from docs_snippets.concepts.types.types import double_even_with_annotations
double_even_with_annotations(num=2)
with pytest.raises(DagsterTypeCheckDidNotPass):
double_even_with_annotations(num=3)
def test_python_object_dagster_type():
from docs_snippets.concepts.types.object_type import EvenType, double_even
double_even(even_num=EvenType(2))
with pytest.raises(AssertionError):
double_even(even_num=EvenType(3))
def test_unit_test():
test_dagster_type()
|
f55f007bd4e8e3d18886a33044b3d5d30563d809
|
11cd362cdd78c2fc48042ed203614b201ac94aa6
|
/desktop/core/ext-py3/boto-2.49.0/tests/unit/vpc/test_routetable.py
|
4948fa125f0eb03ab5281493ecdf3faa1ffb4054
|
[
"CC-BY-3.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"ZPL-2.0",
"Unlicense",
"LGPL-3.0-only",
"CC0-1.0",
"LicenseRef-scancode-other-permissive",
"CNRI-Python",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-2.0-or-later",
"Python-2.0",
"GPL-3.0-only",
"CC-BY-4.0",
"LicenseRef-scancode-jpython-1.1",
"AFL-2.1",
"JSON",
"WTFPL",
"MIT",
"LicenseRef-scancode-generic-exception",
"LicenseRef-scancode-jython",
"GPL-3.0-or-later",
"LicenseRef-scancode-python-cwi",
"BSD-3-Clause",
"LGPL-3.0-or-later",
"Zlib",
"LicenseRef-scancode-free-unknown",
"Classpath-exception-2.0",
"LicenseRef-scancode-proprietary-license",
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"MPL-2.0",
"ISC",
"GPL-2.0-only",
"ZPL-2.1",
"BSL-1.0",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-public-domain",
"Xnet",
"BSD-2-Clause"
] |
permissive
|
cloudera/hue
|
b42343d0e03d2936b5a9a32f8ddb3e9c5c80c908
|
dccb9467675c67b9c3399fc76c5de6d31bfb8255
|
refs/heads/master
| 2023-08-31T06:49:25.724501
| 2023-08-28T20:45:00
| 2023-08-28T20:45:00
| 732,593
| 5,655
| 2,244
|
Apache-2.0
| 2023-09-14T03:05:41
| 2010-06-21T19:46:51
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 20,511
|
py
|
test_routetable.py
|
from tests.unit import unittest
from tests.unit import AWSMockServiceTestCase
from boto.vpc import VPCConnection, RouteTable
class TestDescribeRouteTables(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DescribeRouteTablesResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>6f570b0b-9c18-4b07-bdec-73740dcf861a</requestId>
<routeTableSet>
<item>
<routeTableId>rtb-13ad487a</routeTableId>
<vpcId>vpc-11ad4878</vpcId>
<routeSet>
<item>
<destinationCidrBlock>10.0.0.0/22</destinationCidrBlock>
<gatewayId>local</gatewayId>
<state>active</state>
<origin>CreateRouteTable</origin>
</item>
</routeSet>
<associationSet>
<item>
<routeTableAssociationId>rtbassoc-12ad487b</routeTableAssociationId>
<routeTableId>rtb-13ad487a</routeTableId>
<main>true</main>
</item>
</associationSet>
<tagSet/>
</item>
<item>
<routeTableId>rtb-f9ad4890</routeTableId>
<vpcId>vpc-11ad4878</vpcId>
<routeSet>
<item>
<destinationCidrBlock>10.0.0.0/22</destinationCidrBlock>
<gatewayId>local</gatewayId>
<state>active</state>
<origin>CreateRouteTable</origin>
</item>
<item>
<destinationCidrBlock>0.0.0.0/0</destinationCidrBlock>
<gatewayId>igw-eaad4883</gatewayId>
<state>active</state>
<origin>CreateRoute</origin>
</item>
<item>
<destinationCidrBlock>10.0.0.0/21</destinationCidrBlock>
<networkInterfaceId>eni-884ec1d1</networkInterfaceId>
<state>blackhole</state>
<origin>CreateRoute</origin>
</item>
<item>
<destinationCidrBlock>11.0.0.0/22</destinationCidrBlock>
<vpcPeeringConnectionId>pcx-efc52b86</vpcPeeringConnectionId>
<state>blackhole</state>
<origin>CreateRoute</origin>
</item>
</routeSet>
<associationSet>
<item>
<routeTableAssociationId>rtbassoc-faad4893</routeTableAssociationId>
<routeTableId>rtb-f9ad4890</routeTableId>
<subnetId>subnet-15ad487c</subnetId>
</item>
</associationSet>
<tagSet/>
</item>
</routeTableSet>
</DescribeRouteTablesResponse>
"""
def test_get_all_route_tables(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.get_all_route_tables(
['rtb-13ad487a', 'rtb-f9ad4890'], filters=[('route.state', 'active')])
self.assert_request_parameters({
'Action': 'DescribeRouteTables',
'RouteTableId.1': 'rtb-13ad487a',
'RouteTableId.2': 'rtb-f9ad4890',
'Filter.1.Name': 'route.state',
'Filter.1.Value.1': 'active'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(len(api_response), 2)
self.assertIsInstance(api_response[0], RouteTable)
self.assertEquals(api_response[0].id, 'rtb-13ad487a')
self.assertEquals(len(api_response[0].routes), 1)
self.assertEquals(api_response[0].routes[0].destination_cidr_block, '10.0.0.0/22')
self.assertEquals(api_response[0].routes[0].gateway_id, 'local')
self.assertEquals(api_response[0].routes[0].state, 'active')
self.assertEquals(api_response[0].routes[0].origin, 'CreateRouteTable')
self.assertEquals(len(api_response[0].associations), 1)
self.assertEquals(api_response[0].associations[0].id, 'rtbassoc-12ad487b')
self.assertEquals(api_response[0].associations[0].route_table_id, 'rtb-13ad487a')
self.assertIsNone(api_response[0].associations[0].subnet_id)
self.assertEquals(api_response[0].associations[0].main, True)
self.assertEquals(api_response[1].id, 'rtb-f9ad4890')
self.assertEquals(len(api_response[1].routes), 4)
self.assertEquals(api_response[1].routes[0].destination_cidr_block, '10.0.0.0/22')
self.assertEquals(api_response[1].routes[0].gateway_id, 'local')
self.assertEquals(api_response[1].routes[0].state, 'active')
self.assertEquals(api_response[1].routes[0].origin, 'CreateRouteTable')
self.assertEquals(api_response[1].routes[1].destination_cidr_block, '0.0.0.0/0')
self.assertEquals(api_response[1].routes[1].gateway_id, 'igw-eaad4883')
self.assertEquals(api_response[1].routes[1].state, 'active')
self.assertEquals(api_response[1].routes[1].origin, 'CreateRoute')
self.assertEquals(api_response[1].routes[2].destination_cidr_block, '10.0.0.0/21')
self.assertEquals(api_response[1].routes[2].interface_id, 'eni-884ec1d1')
self.assertEquals(api_response[1].routes[2].state, 'blackhole')
self.assertEquals(api_response[1].routes[2].origin, 'CreateRoute')
self.assertEquals(api_response[1].routes[3].destination_cidr_block, '11.0.0.0/22')
self.assertEquals(api_response[1].routes[3].vpc_peering_connection_id, 'pcx-efc52b86')
self.assertEquals(api_response[1].routes[3].state, 'blackhole')
self.assertEquals(api_response[1].routes[3].origin, 'CreateRoute')
self.assertEquals(len(api_response[1].associations), 1)
self.assertEquals(api_response[1].associations[0].id, 'rtbassoc-faad4893')
self.assertEquals(api_response[1].associations[0].route_table_id, 'rtb-f9ad4890')
self.assertEquals(api_response[1].associations[0].subnet_id, 'subnet-15ad487c')
self.assertEquals(api_response[1].associations[0].main, False)
class TestAssociateRouteTable(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<AssociateRouteTableResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<associationId>rtbassoc-f8ad4891</associationId>
</AssociateRouteTableResponse>
"""
def test_associate_route_table(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.associate_route_table(
'rtb-e4ad488d', 'subnet-15ad487c')
self.assert_request_parameters({
'Action': 'AssociateRouteTable',
'RouteTableId': 'rtb-e4ad488d',
'SubnetId': 'subnet-15ad487c'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, 'rtbassoc-f8ad4891')
class TestDisassociateRouteTable(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DisassociateRouteTableResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</DisassociateRouteTableResponse>
"""
def test_disassociate_route_table(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.disassociate_route_table('rtbassoc-fdad4894')
self.assert_request_parameters({
'Action': 'DisassociateRouteTable',
'AssociationId': 'rtbassoc-fdad4894'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
class TestCreateRouteTable(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<CreateRouteTableResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<routeTable>
<routeTableId>rtb-f9ad4890</routeTableId>
<vpcId>vpc-11ad4878</vpcId>
<routeSet>
<item>
<destinationCidrBlock>10.0.0.0/22</destinationCidrBlock>
<gatewayId>local</gatewayId>
<state>active</state>
</item>
</routeSet>
<associationSet/>
<tagSet/>
</routeTable>
</CreateRouteTableResponse>
"""
def test_create_route_table(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_route_table('vpc-11ad4878')
self.assert_request_parameters({
'Action': 'CreateRouteTable',
'VpcId': 'vpc-11ad4878'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertIsInstance(api_response, RouteTable)
self.assertEquals(api_response.id, 'rtb-f9ad4890')
self.assertEquals(len(api_response.routes), 1)
self.assertEquals(api_response.routes[0].destination_cidr_block, '10.0.0.0/22')
self.assertEquals(api_response.routes[0].gateway_id, 'local')
self.assertEquals(api_response.routes[0].state, 'active')
class TestDeleteRouteTable(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DeleteRouteTableResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</DeleteRouteTableResponse>
"""
def test_delete_route_table(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.delete_route_table('rtb-e4ad488d')
self.assert_request_parameters({
'Action': 'DeleteRouteTable',
'RouteTableId': 'rtb-e4ad488d'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
class TestReplaceRouteTableAssociation(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<ReplaceRouteTableAssociationResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<newAssociationId>rtbassoc-faad4893</newAssociationId>
</ReplaceRouteTableAssociationResponse>
"""
def test_replace_route_table_assocation(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.replace_route_table_assocation(
'rtbassoc-faad4893', 'rtb-f9ad4890')
self.assert_request_parameters({
'Action': 'ReplaceRouteTableAssociation',
'AssociationId': 'rtbassoc-faad4893',
'RouteTableId': 'rtb-f9ad4890'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
def test_replace_route_table_association_with_assoc(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.replace_route_table_association_with_assoc(
'rtbassoc-faad4893', 'rtb-f9ad4890')
self.assert_request_parameters({
'Action': 'ReplaceRouteTableAssociation',
'AssociationId': 'rtbassoc-faad4893',
'RouteTableId': 'rtb-f9ad4890'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, 'rtbassoc-faad4893')
class TestCreateRoute(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<CreateRouteResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</CreateRouteResponse>
"""
def test_create_route_gateway(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_route(
'rtb-e4ad488d', '0.0.0.0/0', gateway_id='igw-eaad4883')
self.assert_request_parameters({
'Action': 'CreateRoute',
'RouteTableId': 'rtb-e4ad488d',
'DestinationCidrBlock': '0.0.0.0/0',
'GatewayId': 'igw-eaad4883'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
def test_create_route_instance(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_route(
'rtb-g8ff4ea2', '0.0.0.0/0', instance_id='i-1a2b3c4d')
self.assert_request_parameters({
'Action': 'CreateRoute',
'RouteTableId': 'rtb-g8ff4ea2',
'DestinationCidrBlock': '0.0.0.0/0',
'InstanceId': 'i-1a2b3c4d'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
def test_create_route_interface(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_route(
'rtb-g8ff4ea2', '0.0.0.0/0', interface_id='eni-1a2b3c4d')
self.assert_request_parameters({
'Action': 'CreateRoute',
'RouteTableId': 'rtb-g8ff4ea2',
'DestinationCidrBlock': '0.0.0.0/0',
'NetworkInterfaceId': 'eni-1a2b3c4d'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
def test_create_route_vpc_peering_connection(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_route(
'rtb-g8ff4ea2', '0.0.0.0/0', vpc_peering_connection_id='pcx-1a2b3c4d')
self.assert_request_parameters({
'Action': 'CreateRoute',
'RouteTableId': 'rtb-g8ff4ea2',
'DestinationCidrBlock': '0.0.0.0/0',
'VpcPeeringConnectionId': 'pcx-1a2b3c4d'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
class TestReplaceRoute(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<CreateRouteResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</CreateRouteResponse>
"""
def test_replace_route_gateway(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.replace_route(
'rtb-e4ad488d', '0.0.0.0/0', gateway_id='igw-eaad4883')
self.assert_request_parameters({
'Action': 'ReplaceRoute',
'RouteTableId': 'rtb-e4ad488d',
'DestinationCidrBlock': '0.0.0.0/0',
'GatewayId': 'igw-eaad4883'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
def test_replace_route_instance(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.replace_route(
'rtb-g8ff4ea2', '0.0.0.0/0', instance_id='i-1a2b3c4d')
self.assert_request_parameters({
'Action': 'ReplaceRoute',
'RouteTableId': 'rtb-g8ff4ea2',
'DestinationCidrBlock': '0.0.0.0/0',
'InstanceId': 'i-1a2b3c4d'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
def test_replace_route_interface(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.replace_route(
'rtb-g8ff4ea2', '0.0.0.0/0', interface_id='eni-1a2b3c4d')
self.assert_request_parameters({
'Action': 'ReplaceRoute',
'RouteTableId': 'rtb-g8ff4ea2',
'DestinationCidrBlock': '0.0.0.0/0',
'NetworkInterfaceId': 'eni-1a2b3c4d'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
def test_replace_route_vpc_peering_connection(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.replace_route(
'rtb-g8ff4ea2', '0.0.0.0/0', vpc_peering_connection_id='pcx-1a2b3c4d')
self.assert_request_parameters({
'Action': 'ReplaceRoute',
'RouteTableId': 'rtb-g8ff4ea2',
'DestinationCidrBlock': '0.0.0.0/0',
'VpcPeeringConnectionId': 'pcx-1a2b3c4d'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
class TestDeleteRoute(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DeleteRouteTableResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</DeleteRouteTableResponse>
"""
def test_delete_route(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.delete_route('rtb-e4ad488d', '172.16.1.0/24')
self.assert_request_parameters({
'Action': 'DeleteRoute',
'RouteTableId': 'rtb-e4ad488d',
'DestinationCidrBlock': '172.16.1.0/24'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
if __name__ == '__main__':
unittest.main()
|
ad61f45bbf19dcdd64945e8d627878507d6ea223
|
691b82da7c38a6c109088d8426ffa26175f4ff31
|
/apps/easytask/apps.py
|
fb128ecbfa841970fe263d109f3fcfb40a7249d2
|
[
"MIT"
] |
permissive
|
Hopetree/izone
|
4072957eb12c7703a235c6df87ab49c7a7fdd84e
|
46f90dbaa3968cb3261d60a74765fa462415f000
|
refs/heads/master
| 2023-08-19T00:21:48.632159
| 2023-08-09T07:36:20
| 2023-08-09T07:36:20
| 115,265,222
| 1,167
| 460
|
MIT
| 2023-06-25T07:27:10
| 2017-12-24T13:39:43
|
Python
|
UTF-8
|
Python
| false
| false
| 91
|
py
|
apps.py
|
from django.apps import AppConfig
class EasytaskConfig(AppConfig):
name = 'easytask'
|
09ad0636f2a215d6732819af1d1fb3b810678c60
|
5a6ccde5f37cc86b6fc0812b2bf40f42eab23906
|
/A-set/45A. Codecraft III.py
|
729d5c8ff67552a1359e531b3abe14fcee8a1813
|
[] |
no_license
|
Waqar-107/Codeforces
|
23f2b1edffb85f6f020107f03e09a455d3e6e792
|
f0d2f25aa6a09c06083b82c39cdf3288ec2eecba
|
refs/heads/master
| 2023-03-09T07:55:46.583363
| 2023-03-04T09:57:44
| 2023-03-04T09:57:44
| 82,915,896
| 196
| 138
| null | 2023-02-11T22:06:20
| 2017-02-23T10:29:34
|
C++
|
UTF-8
|
Python
| false
| false
| 322
|
py
|
45A. Codecraft III.py
|
# from dust i have come, dust i will be
month = input()
m = int(input())
s = ['January', 'February', 'March', 'April', 'May', 'June',
'July', 'August', 'September', 'October', 'November', 'December']
mp = {}
for i in range(12):
mp[s[i]] = i + 1
x = (mp[month] + m) % 12
if x == 0:
x = 12
print(s[x - 1])
|
ff697850d152ce5701447034eb23cdd1c29513a5
|
6c2dbc8d4e536220fb3b1cc72aa8104aea8b0698
|
/tests/test_api/test_methods/test_get_user_profile_photos.py
|
ed5329cc7678457b27834add48e31a3fb0930d75
|
[
"MIT"
] |
permissive
|
aiogram/aiogram
|
f8f98a0beb63bd4d93ea810638d5792569bf354b
|
04bd0c9e7c5421c060183b90d515050f41377bc1
|
refs/heads/dev-3.x
| 2023-08-30T21:20:13.018174
| 2023-08-28T23:01:54
| 2023-08-28T23:01:54
| 111,210,856
| 4,287
| 1,250
|
MIT
| 2023-09-10T21:34:03
| 2017-11-18T14:11:13
|
Python
|
UTF-8
|
Python
| false
| false
| 745
|
py
|
test_get_user_profile_photos.py
|
from aiogram.methods import GetUserProfilePhotos, Request
from aiogram.types import PhotoSize, UserProfilePhotos
from tests.mocked_bot import MockedBot
class TestGetUserProfilePhotos:
async def test_bot_method(self, bot: MockedBot):
prepare_result = bot.add_result_for(
GetUserProfilePhotos,
ok=True,
result=UserProfilePhotos(
total_count=1,
photos=[
[PhotoSize(file_id="file_id", width=42, height=42, file_unique_id="file id")]
],
),
)
response: UserProfilePhotos = await bot.get_user_profile_photos(user_id=42)
request = bot.get_request()
assert response == prepare_result.result
|
034a44fddf8fc6b17f8a354654032630e33bf834
|
cfb2a8652fe0afbcbbf2287f4f736ff85ce47d30
|
/tests/utils.py
|
4509e99718d9229209c209229db573491b786d93
|
[
"BSD-2-Clause"
] |
permissive
|
ionelmc/python-hunter
|
238ad366c9aae00cf1a249fd90993f31404e7087
|
cfae650dd2b7a89e5bf9eb81b109f268397c45e9
|
refs/heads/master
| 2023-08-14T23:47:51.328651
| 2023-04-26T09:11:54
| 2023-04-26T09:11:54
| 32,343,292
| 800
| 44
|
BSD-2-Clause
| 2022-09-09T19:31:05
| 2015-03-16T18:03:16
|
Python
|
UTF-8
|
Python
| false
| false
| 615
|
py
|
utils.py
|
import os
from hunter import CallPrinter
TIMEOUT = int(os.getenv('HUNTER_TEST_TIMEOUT', 60))
class DebugCallPrinter(CallPrinter):
def __init__(self, suffix='', **kwargs):
self.suffix = suffix
super(DebugCallPrinter, self).__init__(**kwargs)
def __call__(self, event):
self.output('depth={} calls={:<4}', event.depth, event.calls)
super(DebugCallPrinter, self).__call__(event)
def output(self, format_str, *args, **kwargs):
format_str = format_str.replace('\n', '%s\n' % self.suffix)
super(DebugCallPrinter, self).output(format_str, *args, **kwargs)
|
46ce6d1ffde42224bc839d7c61eaa3cd7de05c3f
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/dts/airbyte/airbyte-integrations/connectors/source-youtube-analytics/unit_tests/test_streams.py
|
0726b5da5ca8ae490f5a4d68f403f6ade3723000
|
[
"MIT",
"Elastic-2.0",
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 9,456
|
py
|
test_streams.py
|
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
import datetime
from collections import OrderedDict
from unittest.mock import MagicMock
from source_youtube_analytics.source import ChannelReports, CustomBackoffMixin, JobsResource, ReportResources
def test_jobs_resource_list(requests_mock):
json_result = {
"jobs": [
{
"id": "038777e7-dc6e-43c8-b86f-ed954c7acd95",
"name": "Airbyte reporting job",
"reportTypeId": "channel_playback_location_a2",
"createTime": "2021-10-30T20:32:58Z",
},
{
"id": "1c20da45-0604-4d60-85db-925989df1db6",
"name": "Airbyte reporting job",
"reportTypeId": "channel_basic_a2",
"createTime": "2021-10-25T19:48:36Z",
},
]
}
mock_jobs_call = requests_mock.get("https://youtubereporting.googleapis.com/v1/jobs", json=json_result)
jobs_resource = JobsResource()
jobs = jobs_resource.list()
assert jobs == json_result["jobs"]
assert mock_jobs_call.called_once
def test_jobs_resource_create(requests_mock):
name = "channel_basic_a2"
json_result = {
"createTime": "2021-10-30T20:32:58Z",
"id": "038777e7-dc6e-43c8-b86f-ed954c7acd95",
"name": "Airbyte reporting job",
"reportTypeId": name,
}
mock_jobs_call = requests_mock.post("https://youtubereporting.googleapis.com/v1/jobs", json=json_result)
jobs_resource = JobsResource()
result = jobs_resource.create(name)
assert result == json_result["id"]
assert mock_jobs_call.called_once
def test_report_resources_path(requests_mock):
mock_jobs_call = requests_mock.post("https://youtubereporting.googleapis.com/v1/jobs", json={"id": "job1"})
jobs_resource = JobsResource()
stream = ReportResources("stream_name", jobs_resource, "job1")
assert stream.path() == "jobs/job1/reports"
assert not mock_jobs_call.called_once
stream = ReportResources("stream_name", jobs_resource, job_id=None)
assert not mock_jobs_call.called_once
assert stream.path() == "jobs/job1/reports"
assert mock_jobs_call.called_once
assert stream.path() == "jobs/job1/reports"
assert mock_jobs_call.called_once
assert mock_jobs_call.last_request.json() == {"name": "Airbyte reporting job", "reportTypeId": "stream_name"}
def test_report_resources_parse_response():
jobs_resource = JobsResource()
stream = ReportResources("stream_name", jobs_resource, "job1")
response = MagicMock()
response.json = MagicMock(return_value={})
assert stream.parse_response(response, stream_state={}) == [None]
response.json = MagicMock(return_value={"reports": []})
assert stream.parse_response(response, stream_state={}) == [None]
reports = [
{
"id": "4317112913",
"jobId": "1c20da45-0604-4d60-85db-925989df1db6",
"startTime": "2021-10-25T07:00:00Z",
"endTime": "2021-10-26T07:00:00Z",
"createTime": "2021-10-27T04:59:46.114806Z",
"downloadUrl": "https://youtubereporting.googleapis.com/v1/media/CHANNEL/ybpwL6sPt6SSzazIV400WQ/jobs/1c20da45-0604-4d60-85db-925989df1db6/reports/4317112913?alt=media",
},
{
"id": "4315953856",
"jobId": "1c20da45-0604-4d60-85db-925989df1db6",
"startTime": "2021-10-18T07:00:00Z",
"endTime": "2021-10-19T07:00:00Z",
"createTime": "2021-10-26T07:43:27.680074Z",
"downloadUrl": "https://youtubereporting.googleapis.com/v1/media/CHANNEL/ybpwL6sPt6SSzazIV400WQ/jobs/1c20da45-0604-4d60-85db-925989df1db6/reports/4315953856?alt=media",
},
]
response.json = MagicMock(return_value={"reports": reports})
result = stream.parse_response(response, stream_state={})
assert result == [
{
"id": "4315953856",
"jobId": "1c20da45-0604-4d60-85db-925989df1db6",
"startTime": datetime.datetime(2021, 10, 18, 7, 0, tzinfo=datetime.timezone.utc),
"endTime": "2021-10-19T07:00:00Z",
"createTime": "2021-10-26T07:43:27.680074Z",
"downloadUrl": "https://youtubereporting.googleapis.com/v1/media/CHANNEL/ybpwL6sPt6SSzazIV400WQ/jobs/1c20da45-0604-4d60-85db-925989df1db6/reports/4315953856?alt=media",
},
{
"id": "4317112913",
"jobId": "1c20da45-0604-4d60-85db-925989df1db6",
"startTime": datetime.datetime(2021, 10, 25, 7, 0, tzinfo=datetime.timezone.utc),
"endTime": "2021-10-26T07:00:00Z",
"createTime": "2021-10-27T04:59:46.114806Z",
"downloadUrl": "https://youtubereporting.googleapis.com/v1/media/CHANNEL/ybpwL6sPt6SSzazIV400WQ/jobs/1c20da45-0604-4d60-85db-925989df1db6/reports/4317112913?alt=media",
},
]
def test_report_resources_next_page_token():
jobs_resource = JobsResource()
stream = ReportResources("stream_name", jobs_resource, "job1")
assert stream.next_page_token({}) is None
def test_channel_reports_path():
jobs_resource = JobsResource()
parent = ReportResources("stream_name", jobs_resource, "job1")
stream = ChannelReports("stream_name", [], parent=parent)
downloadUrl = "https://youtubereporting.googleapis.com/v1/media/CHANNEL/ybpwL6sPt6SSzazIV400WQ/jobs/1c20da45-0604-4d60-85db-925989df1db6/reports/4317112913?alt=media"
stream_slice = {
"parent": {
"id": "4317112913",
"jobId": "1c20da45-0604-4d60-85db-925989df1db6",
"startTime": datetime.datetime(2021, 10, 25, 7, 0, tzinfo=datetime.timezone.utc),
"endTime": datetime.datetime(2021, 10, 26, 7, 0, tzinfo=datetime.timezone.utc),
"createTime": datetime.datetime(2021, 10, 27, 4, 59, 46, 114806, tzinfo=datetime.timezone.utc),
"downloadUrl": downloadUrl,
}
}
path = stream.path(stream_state={}, stream_slice=stream_slice, next_page_token=None)
assert path == downloadUrl
def test_channel_reports_parse_response():
jobs_resource = JobsResource()
parent = ReportResources("stream_name", jobs_resource, "job1")
stream = ChannelReports("stream_name", ["date", "channel_id"], parent=parent)
response = MagicMock()
response.text = "date,channel_id,likes,dislikes\n20211026,UCybpwL6sPt6SSzazIV400WQ,210,21\n20211026,UCybpwL6sPt6SSzazIV400WQ,150,18\n"
result = stream.parse_response(response, stream_state={})
assert list(result) == [
OrderedDict([("date", "20211026"), ("channel_id", "UCybpwL6sPt6SSzazIV400WQ"), ("likes", "210"), ("dislikes", "21")]),
OrderedDict([("date", "20211026"), ("channel_id", "UCybpwL6sPt6SSzazIV400WQ"), ("likes", "150"), ("dislikes", "18")]),
]
def test_backoff_505():
response = MagicMock()
response.status_code = 505
assert CustomBackoffMixin().should_retry(response) is True
def test_backoff_429():
response = MagicMock()
response.status_code = 429
assert CustomBackoffMixin().should_retry(response) is True
def test_backoff_429_per_minute_limit():
response = MagicMock()
response.status_code = 429
response.json = MagicMock(
return_value={
"error": {
"code": 429,
"message": "Quota exceeded for quota metric 'Free requests' and limit 'Free requests per minute' of service 'youtubereporting.googleapis.com' for consumer 'project_number:863188056127'.",
"status": "RESOURCE_EXHAUSTED",
"details": [
{
"reason": "RATE_LIMIT_EXCEEDED",
"metadata": {
"consumer": "projects/863188056127",
"quota_limit": "FreeQuotaRequestsPerMinutePerProject",
"quota_limit_value": "60",
"quota_metric": "youtubereporting.googleapis.com/free_quota_requests",
"service": "youtubereporting.googleapis.com",
},
}
],
}
}
)
assert CustomBackoffMixin().should_retry(response) is True
def test_backoff_429_per_day_limit():
response = MagicMock()
response.status_code = 429
response.json = MagicMock(
return_value={
"error": {
"code": 429,
"message": "Quota exceeded for quota metric 'Free requests' and limit 'Free requests per day' of service 'youtubereporting.googleapis.com' for consumer 'project_number:863188056127",
"status": "RESOURCE_EXHAUSTED",
"details": [
{
"reason": "RATE_LIMIT_EXCEEDED",
"metadata": {
"consumer": "projects/863188056127",
"quota_limit": "FreeQuotaRequestsPerDayPerProject",
"quota_limit_value": "20000",
"quota_metric": "youtubereporting.googleapis.com/free_quota_requests",
"service": "youtubereporting.googleapis.com",
},
}
],
}
}
)
custom_mixin = CustomBackoffMixin()
custom_mixin.logger = MagicMock()
assert custom_mixin.should_retry(response) is False
|
f5de7d09143b1bee06cdd5a1d21779d11113720c
|
5b9b8a526984a986b527cf300b059bb2f220c386
|
/external-import/socprime/src/socprime/__init__.py
|
aa39afb395f3b90f381afd1ae37dd581937db229
|
[
"Apache-2.0",
"AGPL-3.0-only"
] |
permissive
|
OpenCTI-Platform/connectors
|
59f2260bff6a89d64c7ba08c15c905bd83d238a3
|
d00a0243946ded25b5d06bdefd9b40015dea9b80
|
refs/heads/master
| 2023-08-31T09:26:19.551741
| 2023-08-26T20:02:51
| 2023-08-26T20:02:51
| 192,614,260
| 254
| 368
|
Apache-2.0
| 2023-09-12T20:03:10
| 2019-06-18T21:22:05
|
Python
|
UTF-8
|
Python
| false
| false
| 77
|
py
|
__init__.py
|
from socprime.core import SocprimeConnector
__all__ = ["SocprimeConnector"]
|
c94bdcf88988505a180edaca920aeb5b69954eb0
|
47ef6997d03f4d5c921c83cc09aef1dfc6828e2c
|
/zeus/datasets/transforms/RandomVerticallFlip_pair.py
|
d527ae6d6cc214acd759c29d0096fc98b72e833c
|
[
"MIT"
] |
permissive
|
huawei-noah/xingtian
|
620c9f245183d636e0a65659fd99a984397ecbd4
|
e4ef3a1c92d19d1d08c3ef0e2156b6fecefdbe04
|
refs/heads/master
| 2023-09-03T01:10:21.768245
| 2022-03-21T03:39:39
| 2022-03-21T03:39:39
| 287,759,621
| 308
| 91
|
MIT
| 2023-09-12T11:33:22
| 2020-08-15T14:13:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,479
|
py
|
RandomVerticallFlip_pair.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""This is a class for RandomVerticallFlip_pair."""
import random
import numpy as np
from zeus.common import ClassFactory, ClassType
@ClassFactory.register(ClassType.TRANSFORM)
class RandomVerticallFlip_pair(object):
"""Random vertical flip two related image."""
def __call__(self, image, label):
"""Call function of RandomVerticallFlip_pair.
:param image: usually the feature image, for example, the LR image for super solution dataset,
the initial image for the segmentation dataset, and etc
:type image: PIL image
:param label: usually the label image, for example, the HR image for super solution dataset,
the mask image for the segmentation dataset, and etc
:type lebel: PIL image
:return: the image after transform
:rtype: list, erery item is a PIL image, the first one is feature image, the second is label image
"""
if random.random() < 0.5:
image, label = np.flip(image, 0), np.flip(label, 0)
return image, label
|
9c9960b1ce4b149839124a9043da2dba74ccce1a
|
bea2e5924a62b76a767b3eb915abb3f95a225926
|
/tensorflow_privacy/privacy/dp_query/discrete_gaussian_utils.py
|
ea0a6639d21eaba522e95d35215173e86710d46f
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
tensorflow/privacy
|
741ddc106e9b73384a1356bf915dc8f7f97ce768
|
c92610e37aa340932ed2d963813e0890035a22bc
|
refs/heads/master
| 2023-09-03T20:42:21.040653
| 2023-08-30T19:53:38
| 2023-08-30T19:54:08
| 162,747,292
| 1,881
| 493
|
Apache-2.0
| 2023-09-14T19:55:15
| 2018-12-21T18:46:46
|
Python
|
UTF-8
|
Python
| false
| false
| 5,552
|
py
|
discrete_gaussian_utils.py
|
# Copyright 2021, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Util functions for drawing discrete Gaussian samples.
The following functions implement a vectorized TF version of the sampling
algorithm described in the paper:
The Discrete Gaussian for Differential Privacy
https://arxiv.org/pdf/2004.00010.pdf
Note that the exact sampling implementation should use integer and fractional
parameters only. Here, we relax this constraint a bit and use vectorized
implementations of Bernoulli and discrete Laplace sampling that can take float
parameters.
"""
import tensorflow as tf
import tensorflow_probability as tf_prob
def _sample_discrete_laplace(t, shape):
"""Sample from discrete Laplace with scale t.
This method is based on the observation that sampling from Z ~ Lap(t) is
equivalent to sampling X, Y independently from Geo(1 - exp(-1/t)) and take
Z = X - Y.
Note also that tensorflow_probability's geometric sampler is based on floating
operations and may possibly be inexact.
Args:
t: The scale of the discrete Laplace distribution.
shape: The tensor shape of the tensors drawn.
Returns:
A tensor of the specified shape filled with random values.
"""
geometric_probs = 1.0 - tf.exp(-1.0 / tf.cast(t, tf.float64))
sampler = tf_prob.distributions.Geometric(probs=geometric_probs)
return tf.cast(sampler.sample(shape) - sampler.sample(shape), tf.int64)
def _sample_bernoulli(p):
"""Sample from Bernoulli(p)."""
return tf_prob.distributions.Bernoulli(probs=p, dtype=tf.int64).sample()
def _check_input_args(scale, shape, dtype):
"""Checks the input args to the discrete Gaussian sampler."""
if tf.as_dtype(dtype) not in (tf.int32, tf.int64):
raise ValueError(
f'Only tf.int32 and tf.int64 are supported. Found dtype `{dtype}`.')
checks = [
tf.compat.v1.assert_non_negative(scale),
tf.compat.v1.assert_integer(scale)
]
with tf.control_dependencies(checks):
return tf.identity(scale), shape, dtype
def _int_square(value):
"""Avoids the TF op `Square(T=...)` for ints as sampling can happen on clients."""
return (value - 1) * (value + 1) + 1
@tf.function
def _sample_discrete_gaussian_helper(scale, shape, dtype):
"""Draw samples from discrete Gaussian, assuming scale >= 0."""
scale = tf.cast(scale, tf.int64)
sq_scale = _int_square(scale)
# Scale for discrete Laplace. The sampling algorithm should be correct
# for any discrete Laplace scale, and the original paper uses
# `dlap_scale = floor(scale) + 1`. Here we use `dlap_scale = scale` (where
# input `scale` is restricted to integers >= 1) to simplify the fraction
# below. It turns out that for integer scales >= 1, `dlap_scale = scale` gives
# a good minimum success rate of ~70%, allowing a small oversampling factor.
dlap_scale = scale
oversample_factor = 1.5
# Draw at least some samples in case we got unlucky with small input shape.
min_n = 1000
target_n = tf.reduce_prod(tf.cast(shape, tf.int64))
oversample_n = oversample_factor * tf.cast(target_n, tf.float32)
draw_n = tf.maximum(min_n, tf.cast(oversample_n, tf.int32))
accepted_n = tf.constant(0, dtype=target_n.dtype)
result = tf.zeros((0,), dtype=tf.int64)
while accepted_n < target_n:
# Since the number of samples could be different in every retry, we need to
# manually specify the shape info for TF.
tf.autograph.experimental.set_loop_options(
shape_invariants=[(result, tf.TensorShape([None]))])
# Draw samples.
samples = _sample_discrete_laplace(dlap_scale, shape=(draw_n,))
z_numer = _int_square(tf.abs(samples) - scale)
z_denom = 2 * sq_scale
bern_probs = tf.exp(-1.0 * tf.divide(z_numer, z_denom))
accept = _sample_bernoulli(bern_probs)
# Keep successful samples and increment counter.
accepted_samples = samples[tf.equal(accept, 1)]
accepted_n += tf.cast(tf.size(accepted_samples), accepted_n.dtype)
result = tf.concat([result, accepted_samples], axis=0)
# Reduce the number of draws for any retries.
draw_n = tf.cast(target_n - accepted_n, tf.float32) * oversample_factor
draw_n = tf.maximum(min_n, tf.cast(draw_n, tf.int32))
return tf.cast(tf.reshape(result[:target_n], shape), dtype)
def sample_discrete_gaussian(scale, shape, dtype=tf.int32):
"""Draws (possibly inexact) samples from the discrete Gaussian distribution.
We relax some integer constraints to use vectorized implementations of
Bernoulli and discrete Laplace sampling. Integer operations are done in
tf.int64 as TF does not have direct support for fractions.
Args:
scale: The scale of the discrete Gaussian distribution.
shape: The shape of the output tensor.
dtype: The type of the output.
Returns:
A tensor of the specified shape filled with random values.
"""
scale, shape, dtype = _check_input_args(scale, shape, dtype)
return tf.cond(
tf.equal(scale, 0), lambda: tf.zeros(shape, dtype),
lambda: _sample_discrete_gaussian_helper(scale, shape, dtype))
|
4460d4722981947787a84453312e2061a92baf80
|
e22eeb5256e17a96a98b3ff25433aec2d641cd2c
|
/openstack/clustering/v1/node.py
|
8229d24cb1ed1df9aaed7751847487f3a7a17e50
|
[
"Apache-2.0"
] |
permissive
|
openstack/openstacksdk
|
b4b95fd7869653feea5a3b783e9a5c588235c039
|
d474eb84c605c429bb9cccb166cabbdd1654d73c
|
refs/heads/master
| 2023-09-03T22:50:03.398512
| 2023-07-27T14:09:35
| 2023-08-29T16:28:46
| 16,223,378
| 124
| 130
|
Apache-2.0
| 2023-09-06T02:52:47
| 2014-01-25T02:48:00
|
Python
|
UTF-8
|
Python
| false
| false
| 6,569
|
py
|
node.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.clustering.v1 import _async_resource
from openstack import resource
from openstack import utils
class Node(_async_resource.AsyncResource):
resource_key = 'node'
resources_key = 'nodes'
base_path = '/nodes'
# capabilities
allow_create = True
allow_fetch = True
allow_commit = True
allow_delete = True
allow_list = True
commit_method = 'PATCH'
_query_mapping = resource.QueryParameters(
'show_details',
'name',
'sort',
'global_project',
'cluster_id',
'status',
)
# Properties
#: The name of the node.
name = resource.Body('name')
#: The ID of the physical object that backs the node.
physical_id = resource.Body('physical_id')
#: The ID of the cluster in which this node is a member.
#: A node is an orphan node if this field is empty.
cluster_id = resource.Body('cluster_id')
#: The ID of the profile used by this node.
profile_id = resource.Body('profile_id')
#: The domain ID of the node.
domain_id = resource.Body('domain')
#: The ID of the user who created this node.
user_id = resource.Body('user')
#: The ID of the project this node belongs to.
project_id = resource.Body('project')
#: The name of the profile used by this node.
profile_name = resource.Body('profile_name')
#: An integer that is unique inside the owning cluster.
#: A value of -1 means this node is an orphan node.
index = resource.Body('index', type=int)
#: A string indicating the role the node plays in a cluster.
role = resource.Body('role')
#: The timestamp of the node object's initialization.
#: *Type: datetime object parsed from ISO 8601 formatted string*
init_at = resource.Body('init_at')
#: The timestamp of the node's creation, i.e. the physical object
#: represented by this node is also created.
#: *Type: datetime object parsed from ISO 8601 formatted string*
created_at = resource.Body('created_at')
#: The timestamp the node was last updated.
#: *Type: datetime object parsed from ISO 8601 formatted string*
updated_at = resource.Body('updated_at')
#: A string indicating the node's status.
status = resource.Body('status')
#: A string describing why the node entered its current status.
status_reason = resource.Body('status_reason')
#: A map containing key-value pairs attached to the node.
metadata = resource.Body('metadata', type=dict)
#: A map containing some runtime data for this node.
data = resource.Body('data', type=dict)
#: A map containing the details of the physical object this node
#: represents
details = resource.Body('details', type=dict)
#: A map containing the dependency of nodes
dependents = resource.Body('dependents', type=dict)
#: Whether the node is tainted. *Type: bool*
tainted = resource.Body('tainted', type=bool)
def _action(self, session, body):
"""Procedure the invoke an action API.
:param session: A session object used for sending request.
:param body: The body of action to be sent.
"""
url = utils.urljoin(self.base_path, self.id, 'actions')
resp = session.post(url, json=body)
return resp.json()
def check(self, session, **params):
"""An action procedure for the node to check its health status.
:param session: A session object used for sending request.
:returns: A dictionary containing the action ID.
"""
body = {'check': params}
return self._action(session, body)
def recover(self, session, **params):
"""An action procedure for the node to recover.
:param session: A session object used for sending request.
:returns: A dictionary containing the action ID.
"""
body = {'recover': params}
return self._action(session, body)
def op(self, session, operation, **params):
"""Perform an operation on the specified node.
:param session: A session object used for sending request.
:param operation: A string representing the operation to be performed.
:param dict params: An optional dict providing the parameters for the
operation.
:returns: A dictionary containing the action ID.
"""
url = utils.urljoin(self.base_path, self.id, 'ops')
resp = session.post(url, json={operation: params})
return resp.json()
def adopt(self, session, preview=False, **params):
"""Adopt a node for management.
:param session: A session object used for sending request.
:param preview: A boolean indicating whether the adoption is a
preview. A "preview" does not create the node object.
:param dict params: A dict providing the details of a node to be
adopted.
"""
if preview:
path = 'adopt-preview'
attrs = {
'identity': params.get('identity'),
'overrides': params.get('overrides'),
'type': params.get('type'),
'snapshot': params.get('snapshot'),
}
else:
path = 'adopt'
attrs = params
url = utils.urljoin(self.base_path, path)
resp = session.post(url, json=attrs)
if preview:
return resp.json()
self._translate_response(resp)
return self
def force_delete(self, session):
"""Force delete a node."""
body = {'force': True}
url = utils.urljoin(self.base_path, self.id)
response = session.delete(url, json=body)
return self._delete_response(response)
class NodeDetail(Node):
base_path = '/nodes/%(node_id)s?show_details=True'
allow_create = False
allow_fetch = True
allow_commit = False
allow_delete = False
allow_list = False
node_id = resource.URI('node_id')
|
158823ba7f05a54b85e79176ba716e1e5c17ce66
|
19f76203bbd176fe5a5ff7a2470ada9fb9af7c39
|
/taskflow/examples/reverting_linear.py
|
76c6b8111c3266c3c7e20bc2fc52537c0d44a4e9
|
[
"Apache-2.0"
] |
permissive
|
openstack/taskflow
|
19f1c614f2fc175b6e57ac9280dc510e402c9f56
|
3b40c04594fb49ab17f8834f2f0df294a1f3e996
|
refs/heads/master
| 2023-09-04T23:38:07.154364
| 2023-08-16T13:57:37
| 2023-08-16T13:57:37
| 16,626,961
| 338
| 75
|
Apache-2.0
| 2018-10-29T14:30:27
| 2014-02-07T20:45:53
|
Python
|
UTF-8
|
Python
| false
| false
| 3,543
|
py
|
reverting_linear.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012-2013 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import sys
logging.basicConfig(level=logging.ERROR)
top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir,
os.pardir))
sys.path.insert(0, top_dir)
import taskflow.engines
from taskflow.patterns import linear_flow as lf
from taskflow import task
# INTRO: In this example we create three tasks, each of which ~calls~ a given
# number (provided as a function input), one of those tasks *fails* calling a
# given number (the suzzie calling); this causes the workflow to enter the
# reverting process, which activates the revert methods of the previous two
# phone ~calls~.
#
# This simulated calling makes it appear like all three calls occur or all
# three don't occur (transaction-like capabilities). No persistence layer is
# used here so reverting and executing will *not* be tolerant of process
# failure.
class CallJim(task.Task):
def execute(self, jim_number, *args, **kwargs):
print("Calling jim %s." % jim_number)
def revert(self, jim_number, *args, **kwargs):
print("Calling %s and apologizing." % jim_number)
class CallJoe(task.Task):
def execute(self, joe_number, *args, **kwargs):
print("Calling joe %s." % joe_number)
def revert(self, joe_number, *args, **kwargs):
print("Calling %s and apologizing." % joe_number)
class CallSuzzie(task.Task):
def execute(self, suzzie_number, *args, **kwargs):
raise IOError("Suzzie not home right now.")
# Create your flow and associated tasks (the work to be done).
flow = lf.Flow('simple-linear').add(
CallJim(),
CallJoe(),
CallSuzzie()
)
try:
# Now run that flow using the provided initial data (store below).
taskflow.engines.run(flow, store=dict(joe_number=444,
jim_number=555,
suzzie_number=666))
except Exception as e:
# NOTE(harlowja): This exception will be the exception that came out of the
# 'CallSuzzie' task instead of a different exception, this is useful since
# typically surrounding code wants to handle the original exception and not
# a wrapped or altered one.
#
# *WARNING* If this flow was multi-threaded and multiple active tasks threw
# exceptions then the above exception would be wrapped into a combined
# exception (the object has methods to iterate over the contained
# exceptions). See: exceptions.py and the class 'WrappedFailure' to look at
# how to deal with multiple tasks failing while running.
#
# You will also note that this is not a problem in this case since no
# parallelism is involved; this is ensured by the usage of a linear flow
# and the default engine type which is 'serial' vs being 'parallel'.
print("Flow failed: %s" % e)
|
b1d663476eecbc136afe9cca59effa18fb581993
|
2212a32833776a5d5d2164d8efd11bd18bd3f768
|
/tf_agents/agents/ddpg/ddpg_agent.py
|
4517a71bc5816186311d3be5a724d7a47a10558b
|
[
"Apache-2.0"
] |
permissive
|
tensorflow/agents
|
f39805fb98ef9af712dcaff3ba49e1ac6d42804b
|
eca1093d3a047e538f17f6ab92ab4d8144284f23
|
refs/heads/master
| 2023-08-14T04:56:30.774797
| 2023-08-02T17:43:44
| 2023-08-02T17:44:09
| 157,936,206
| 2,755
| 848
|
Apache-2.0
| 2023-07-26T02:35:32
| 2018-11-17T00:29:12
|
Python
|
UTF-8
|
Python
| false
| false
| 17,734
|
py
|
ddpg_agent.py
|
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A DDPG Agent.
Implements the Deep Deterministic Policy Gradient (DDPG) algorithm from
"Continuous control with deep reinforcement learning" - Lilicrap et al.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from typing import Optional, Text
import gin
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.agents import data_converter
from tf_agents.agents import tf_agent
from tf_agents.networks import network
from tf_agents.policies import actor_policy
from tf_agents.policies import ou_noise_policy
from tf_agents.trajectories import time_step as ts
from tf_agents.typing import types
from tf_agents.utils import common
from tf_agents.utils import eager_utils
from tf_agents.utils import nest_utils
class DdpgInfo(
collections.namedtuple('DdpgInfo', ('actor_loss', 'critic_loss'))
):
pass
@gin.configurable
class DdpgAgent(tf_agent.TFAgent):
"""A DDPG Agent."""
def __init__(
self,
time_step_spec: ts.TimeStep,
action_spec: types.NestedTensorSpec,
actor_network: network.Network,
critic_network: network.Network,
actor_optimizer: Optional[types.Optimizer] = None,
critic_optimizer: Optional[types.Optimizer] = None,
ou_stddev: types.Float = 1.0,
ou_damping: types.Float = 1.0,
target_actor_network: Optional[network.Network] = None,
target_critic_network: Optional[network.Network] = None,
target_update_tau: types.Float = 1.0,
target_update_period: types.Int = 1,
dqda_clipping: Optional[types.Float] = None,
td_errors_loss_fn: Optional[types.LossFn] = None,
gamma: types.Float = 1.0,
reward_scale_factor: types.Float = 1.0,
gradient_clipping: Optional[types.Float] = None,
debug_summaries: bool = False,
summarize_grads_and_vars: bool = False,
train_step_counter: Optional[tf.Variable] = None,
name: Optional[Text] = None,
):
"""Creates a DDPG Agent.
Args:
time_step_spec: A `TimeStep` spec of the expected time_steps.
action_spec: A nest of BoundedTensorSpec representing the actions.
actor_network: A tf_agents.network.Network to be used by the agent. The
network will be called with call(observation, step_type[, policy_state])
and should return (action, new_state).
critic_network: A tf_agents.network.Network to be used by the agent. The
network will be called with call((observation, action), step_type[,
policy_state]) and should return (q_value, new_state).
actor_optimizer: The optimizer to use for the actor network.
critic_optimizer: The optimizer to use for the critic network.
ou_stddev: Standard deviation for the Ornstein-Uhlenbeck (OU) noise added
in the default collect policy.
ou_damping: Damping factor for the OU noise added in the default collect
policy.
target_actor_network: (Optional.) A `tf_agents.network.Network` to be
used as the actor target network during Q learning. Every
`target_update_period` train steps, the weights from `actor_network` are
copied (possibly withsmoothing via `target_update_tau`) to `
target_q_network`. If `target_actor_network` is not provided, it is
created by making a copy of `actor_network`, which initializes a new
network with the same structure and its own layers and weights.
Performing a `Network.copy` does not work when the network instance
already has trainable parameters (e.g., has already been built, or when
the network is sharing layers with another). In these cases, it is up
to you to build a copy having weights that are not shared with the
original `actor_network`, so that this can be used as a target network.
If you provide a `target_actor_network` that shares any weights with
`actor_network`, a warning will be logged but no exception is thrown.
target_critic_network: (Optional.) Similar network as target_actor_network
but for the critic_network. See documentation for target_actor_network.
target_update_tau: Factor for soft update of the target networks.
target_update_period: Period for soft update of the target networks.
dqda_clipping: when computing the actor loss, clips the gradient dqda
element-wise between [-dqda_clipping, dqda_clipping]. Does not perform
clipping if dqda_clipping == 0.
td_errors_loss_fn: A function for computing the TD errors loss. If None,
a default value of elementwise huber_loss is used.
gamma: A discount factor for future rewards.
reward_scale_factor: Multiplicative scale for the reward.
gradient_clipping: Norm length to clip gradients.
debug_summaries: A bool to gather debug summaries.
summarize_grads_and_vars: If True, gradient and network variable summaries
will be written during training.
train_step_counter: An optional counter to increment every time the train
op is run. Defaults to the global_step.
name: The name of this agent. All variables in this module will fall under
that name. Defaults to the class name.
"""
tf.Module.__init__(self, name=name)
self._actor_network = actor_network
actor_network.create_variables(time_step_spec.observation)
if target_actor_network:
target_actor_network.create_variables(time_step_spec.observation)
self._target_actor_network = common.maybe_copy_target_network_with_checks(
self._actor_network,
target_actor_network,
'TargetActorNetwork',
input_spec=time_step_spec.observation,
)
self._critic_network = critic_network
critic_input_spec = (time_step_spec.observation, action_spec)
critic_network.create_variables(critic_input_spec)
if target_critic_network:
target_critic_network.create_variables(critic_input_spec)
self._target_critic_network = common.maybe_copy_target_network_with_checks(
self._critic_network,
target_critic_network,
'TargetCriticNetwork',
input_spec=critic_input_spec,
)
self._actor_optimizer = actor_optimizer
self._critic_optimizer = critic_optimizer
self._ou_stddev = ou_stddev
self._ou_damping = ou_damping
self._target_update_tau = target_update_tau
self._target_update_period = target_update_period
self._dqda_clipping = dqda_clipping
self._td_errors_loss_fn = (
td_errors_loss_fn or common.element_wise_huber_loss
)
self._gamma = gamma
self._reward_scale_factor = reward_scale_factor
self._gradient_clipping = gradient_clipping
self._update_target = self._get_target_updater(
target_update_tau, target_update_period
)
policy = actor_policy.ActorPolicy(
time_step_spec=time_step_spec,
action_spec=action_spec,
actor_network=self._actor_network,
clip=True,
)
collect_policy = actor_policy.ActorPolicy(
time_step_spec=time_step_spec,
action_spec=action_spec,
actor_network=self._actor_network,
clip=False,
)
collect_policy = ou_noise_policy.OUNoisePolicy(
collect_policy,
ou_stddev=self._ou_stddev,
ou_damping=self._ou_damping,
clip=True,
)
super(DdpgAgent, self).__init__(
time_step_spec,
action_spec,
policy,
collect_policy,
train_sequence_length=2 if not self._actor_network.state_spec else None,
debug_summaries=debug_summaries,
summarize_grads_and_vars=summarize_grads_and_vars,
train_step_counter=train_step_counter,
)
self._as_transition = data_converter.AsTransition(
self.data_context, squeeze_time_dim=not self._actor_network.state_spec
)
def _initialize(self):
common.soft_variables_update(
self._critic_network.variables,
self._target_critic_network.variables,
tau=1.0,
)
common.soft_variables_update(
self._actor_network.variables,
self._target_actor_network.variables,
tau=1.0,
)
def _get_target_updater(self, tau=1.0, period=1):
"""Performs a soft update of the target network parameters.
For each weight w_s in the original network, and its corresponding
weight w_t in the target network, a soft update is:
w_t = (1- tau) x w_t + tau x ws
Args:
tau: A float scalar in [0, 1]. Default `tau=1.0` means hard update.
period: Step interval at which the target networks are updated.
Returns:
An operation that performs a soft update of the target network parameters.
"""
with tf.name_scope('get_target_updater'):
def update():
"""Update target network."""
# TODO(b/124381161): What about observation normalizer variables?
critic_update = common.soft_variables_update(
self._critic_network.variables,
self._target_critic_network.variables,
tau,
tau_non_trainable=1.0,
)
actor_update = common.soft_variables_update(
self._actor_network.variables,
self._target_actor_network.variables,
tau,
tau_non_trainable=1.0,
)
return tf.group(critic_update, actor_update)
return common.Periodically(update, period, 'periodic_update_targets')
def _train(self, experience, weights=None):
transition = self._as_transition(experience)
time_steps, policy_steps, next_time_steps = transition
actions = policy_steps.action
# TODO(b/124382524): Apply a loss mask or filter boundary transitions.
trainable_critic_variables = self._critic_network.trainable_variables
with tf.GradientTape(watch_accessed_variables=False) as tape:
assert (
trainable_critic_variables
), 'No trainable critic variables to optimize.'
tape.watch(trainable_critic_variables)
critic_loss = self.critic_loss(
time_steps, actions, next_time_steps, weights=weights, training=True
)
tf.debugging.check_numerics(critic_loss, 'Critic loss is inf or nan.')
critic_grads = tape.gradient(critic_loss, trainable_critic_variables)
self._apply_gradients(
critic_grads, trainable_critic_variables, self._critic_optimizer
)
trainable_actor_variables = self._actor_network.trainable_variables
with tf.GradientTape(watch_accessed_variables=False) as tape:
assert (
trainable_actor_variables
), 'No trainable actor variables to optimize.'
tape.watch(trainable_actor_variables)
actor_loss = self.actor_loss(time_steps, weights=weights, training=True)
tf.debugging.check_numerics(actor_loss, 'Actor loss is inf or nan.')
actor_grads = tape.gradient(actor_loss, trainable_actor_variables)
self._apply_gradients(
actor_grads, trainable_actor_variables, self._actor_optimizer
)
self.train_step_counter.assign_add(1)
self._update_target()
# TODO(b/124382360): Compute per element TD loss and return in loss_info.
total_loss = actor_loss + critic_loss
return tf_agent.LossInfo(total_loss, DdpgInfo(actor_loss, critic_loss))
def _loss(self, experience, weights=None, training=False):
transition = self._as_transition(experience)
time_steps, policy_steps, next_time_steps = transition
actions = policy_steps.action
critic_loss = self.critic_loss(
time_steps, actions, next_time_steps, weights=weights, training=training
)
tf.debugging.check_numerics(critic_loss, 'Critic loss is inf or nan.')
actor_loss = self.actor_loss(time_steps, weights=weights, training=training)
tf.debugging.check_numerics(actor_loss, 'Actor loss is inf or nan.')
total_loss = actor_loss + critic_loss
return tf_agent.LossInfo(total_loss, DdpgInfo(actor_loss, critic_loss))
def _apply_gradients(self, gradients, variables, optimizer):
if optimizer is None:
raise ValueError('Optimizer is undefined.')
# Tuple is used for py3, where zip is a generator producing values once.
grads_and_vars = tuple(zip(gradients, variables))
if self._gradient_clipping is not None:
grads_and_vars = eager_utils.clip_gradient_norms(
grads_and_vars, self._gradient_clipping
)
if self._summarize_grads_and_vars:
eager_utils.add_variables_summaries(
grads_and_vars, self.train_step_counter
)
eager_utils.add_gradients_summaries(
grads_and_vars, self.train_step_counter
)
optimizer.apply_gradients(grads_and_vars)
def critic_loss(
self,
time_steps: ts.TimeStep,
actions: types.NestedTensor,
next_time_steps: ts.TimeStep,
weights: Optional[types.Tensor] = None,
training: bool = False,
) -> types.Tensor:
"""Computes the critic loss for DDPG training.
Args:
time_steps: A batch of timesteps.
actions: A batch of actions.
next_time_steps: A batch of next timesteps.
weights: Optional scalar or element-wise (per-batch-entry) importance
weights.
training: Whether this loss is being used for training.
Returns:
critic_loss: A scalar critic loss.
"""
with tf.name_scope('critic_loss'):
target_actions, _ = self._target_actor_network(
next_time_steps.observation,
step_type=next_time_steps.step_type,
training=False,
)
target_critic_net_input = (next_time_steps.observation, target_actions)
target_q_values, _ = self._target_critic_network(
target_critic_net_input,
step_type=next_time_steps.step_type,
training=False,
)
td_targets = tf.stop_gradient(
self._reward_scale_factor * next_time_steps.reward
+ self._gamma * next_time_steps.discount * target_q_values
)
critic_net_input = (time_steps.observation, actions)
q_values, _ = self._critic_network(
critic_net_input, step_type=time_steps.step_type, training=training
)
critic_loss = self._td_errors_loss_fn(td_targets, q_values)
if nest_utils.is_batched_nested_tensors(
time_steps, self.time_step_spec, num_outer_dims=2
):
# Do a sum over the time dimension.
critic_loss = tf.reduce_sum(critic_loss, axis=1)
if weights is not None:
critic_loss *= weights
critic_loss = tf.reduce_mean(critic_loss)
with tf.name_scope('Losses/'):
tf.compat.v2.summary.scalar(
name='critic_loss', data=critic_loss, step=self.train_step_counter
)
if self._debug_summaries:
td_errors = td_targets - q_values
common.generate_tensor_summaries(
'td_errors', td_errors, self.train_step_counter
)
common.generate_tensor_summaries(
'td_targets', td_targets, self.train_step_counter
)
common.generate_tensor_summaries(
'q_values', q_values, self.train_step_counter
)
return critic_loss
def actor_loss(
self,
time_steps: ts.TimeStep,
weights: Optional[types.Tensor] = None,
training: bool = False,
) -> types.Tensor:
"""Computes the actor_loss for DDPG training.
Args:
time_steps: A batch of timesteps.
weights: Optional scalar or element-wise (per-batch-entry) importance
weights.
training: Whether this loss is being used for training.
Returns:
actor_loss: A scalar actor loss.
"""
# TODO(b/124383618): Add an action norm regularizer.
with tf.name_scope('actor_loss'):
actions, _ = self._actor_network(
time_steps.observation,
step_type=time_steps.step_type,
training=training,
)
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(actions)
q_values, _ = self._critic_network(
(time_steps.observation, actions),
step_type=time_steps.step_type,
training=False,
)
actions = tf.nest.flatten(actions)
dqdas = tape.gradient([q_values], actions)
actor_losses = []
for dqda, action in zip(dqdas, actions):
if self._dqda_clipping is not None:
dqda = tf.clip_by_value(
dqda, -1 * self._dqda_clipping, self._dqda_clipping
)
loss = common.element_wise_squared_loss(
tf.stop_gradient(dqda + action), action
)
if nest_utils.is_batched_nested_tensors(
time_steps, self.time_step_spec, num_outer_dims=2
):
# Sum over the time dimension.
loss = tf.reduce_sum(loss, axis=1)
if weights is not None:
loss *= weights
loss = tf.reduce_mean(loss)
actor_losses.append(loss)
actor_loss = tf.add_n(actor_losses)
with tf.name_scope('Losses/'):
tf.compat.v2.summary.scalar(
name='actor_loss', data=actor_loss, step=self.train_step_counter
)
return actor_loss
|
fc6fe0a1c1d19798d76873ec4f8f487d22a24113
|
c7b8c95f698c7cf5a90084a5acb88c763bee4a38
|
/examples/conways_game_of_life.py
|
898b95c2ce98fb70df6bedf9747d7b6aadc72947
|
[
"MIT"
] |
permissive
|
moderngl/moderngl
|
312d6760efba9782f9228159a0b556715bb8fbbe
|
8b5ecd86b073b97bce90633a39132c7052b8cc90
|
refs/heads/master
| 2023-08-31T09:19:52.007426
| 2023-07-22T17:48:23
| 2023-07-22T17:48:23
| 53,077,133
| 1,279
| 157
|
MIT
| 2023-09-05T03:41:37
| 2016-03-03T19:28:28
|
Python
|
UTF-8
|
Python
| false
| false
| 5,607
|
py
|
conways_game_of_life.py
|
"""
A Game of Life implementation using transform feedback.
We calculate the next state of the map with transform()
meaning a vertex shader will generate the new state into a buffer.
This buffer is then written into the texture we display.
This is a fast vram to vram copy.
Comments:
Another way to do this is simply rendering to Framebuffers.
"""
import numpy as np
import moderngl
from ported._example import Example
class Conway(Example):
title = "Conway's Game of Life"
window_size = 800, 800
def __init__(self, **kwargs):
super().__init__(**kwargs)
# How often the map should be updated
self.update_delay = 1 / 60 # updates per second
self.last_updated = 0
# size of the map
self.width, self.height = 400, 400
# Force the window to calculate black borders if needed to retain the aspect ratio
self.wnd.fixed_aspect_ratio = self.width / self.height
# Initial state of the map (random)
pixels = np.round(np.random.rand(self.width, self.height)).astype('f4')
# Program drawing the result to the screen.
# This is rendered simply using a textured screen aligned triangle strip
self.display_prog = self.ctx.program(
vertex_shader='''
#version 330
in vec2 in_vert;
in vec2 in_texcoord;
out vec2 v_text;
void main() {
v_text = in_texcoord;
gl_Position = vec4(in_vert, 0.0, 1.0);
}
''',
fragment_shader='''
#version 330
// Will read from texture bound to channel / locaton 0 by default
uniform sampler2D Texture;
// Interpolated texture coordinate from vertex shader
in vec2 v_text;
// The fragment ending up on the screen
out vec4 f_color;
void main() {
f_color = texture(Texture, v_text);
}
''',
)
# Program calculating the next state of the map
self.transform_prog = self.ctx.program(
vertex_shader='''
#version 330
uniform sampler2D Texture;
out float out_vert;
#define LIVING 0.0
#define DEAD 1.0
bool cell(int x, int y) {
// get the texture size
ivec2 tSize = textureSize(Texture, 0).xy;
// Ensure lookups are not going outside the texture area because
// texelFetch do not support texture repeat / wrap modes
return texelFetch(Texture, ivec2((x + tSize.x) % tSize.x, (y + tSize.y) % tSize.y), 0).r < 0.5;
}
void main() {
int width = textureSize(Texture, 0).x;
ivec2 in_text = ivec2(gl_VertexID % width, gl_VertexID / width);
bool living = cell(in_text.x, in_text.y);
int neighbours = 0;
if (cell(in_text.x - 1, in_text.y - 1)) neighbours++;
if (cell(in_text.x - 1, in_text.y + 0)) neighbours++;
if (cell(in_text.x - 1, in_text.y + 1)) neighbours++;
if (cell(in_text.x + 1, in_text.y - 1)) neighbours++;
if (cell(in_text.x + 1, in_text.y + 0)) neighbours++;
if (cell(in_text.x + 1, in_text.y + 1)) neighbours++;
if (cell(in_text.x + 0, in_text.y + 1)) neighbours++;
if (cell(in_text.x + 0, in_text.y - 1)) neighbours++;
if (living) {
out_vert = (neighbours == 2 || neighbours == 3) ? LIVING : DEAD;
} else {
out_vert = (neighbours == 3) ? LIVING : DEAD;
}
}
''',
varyings=['out_vert']
)
# Create the map texture
self.texture = self.ctx.texture((self.width, self.height), 1, pixels.tobytes(), dtype='f4')
self.texture.filter = moderngl.NEAREST, moderngl.NEAREST
self.texture.swizzle = 'RRR1' # What components texelFetch will get from the texture (in shader)
# A quad covering the screen with texture coordinates
self.vbo = self.ctx.buffer(np.array([
# x y u v
-1.0, -1.0, 0, 0, # lower left
-1.0, 1.0, 0, 1, # upper left
1.0, -1.0, 1, 0, # lower right
1.0, 1.0, 1, 1, # upper right
], dtype="f4"))
self.vao = self.ctx.simple_vertex_array(self.display_prog, self.vbo, 'in_vert', 'in_texcoord')
# Transform vertex array to generate new map state
self.tao = self.ctx.vertex_array(self.transform_prog, [])
self.pbo = self.ctx.buffer(reserve=pixels.nbytes) # buffer to store the result
def render(self, time, frame_time):
self.ctx.clear(1.0, 1.0, 1.0)
# Bind texture to channel 0
self.texture.use(location=0)
if time - self.last_updated > self.update_delay:
# Generate the new map and write that to the pbo buffer
self.tao.transform(self.pbo, vertices=self.width * self.height)
# Copy the pbo into the texture
self.texture.write(self.pbo)
self.last_updated = time
# Render the texture
self.vao.render(moderngl.TRIANGLE_STRIP)
if __name__ == '__main__':
Conway.run()
|
1ef681a343699f362b1ae1d6a9fc822f73ad3cb5
|
85373d45a83e4096affafa4f4e5b400787413e57
|
/test/programytest/parser/pattern/matching/test_basic_branches.py
|
b0e854318cfdff04250881c3dc28c99f00e0301c
|
[
"MIT"
] |
permissive
|
keiffster/program-y
|
a02bb9d8278835547cc875f4f9cd668d5b1f44da
|
fc7b0a3afa4fa6ed683e0c817a9aa89f9543bb20
|
refs/heads/master
| 2023-08-23T13:55:39.255535
| 2022-12-13T09:51:57
| 2022-12-13T09:51:57
| 74,462,571
| 379
| 173
|
NOASSERTION
| 2023-05-23T00:51:21
| 2016-11-22T10:43:41
|
Python
|
UTF-8
|
Python
| false
| false
| 1,512
|
py
|
test_basic_branches.py
|
from programytest.parser.pattern.matching.base import PatternMatcherBaseClass
class PatternMatcherTests(PatternMatcherBaseClass):
def test_basic_tree_matching_no_wildcards(self):
self.add_pattern_to_graph(pattern="A B D E F", topic="X", that="Y", template="1")
self.add_pattern_to_graph(pattern="A B D E F", topic="X", that="Z", template="2")
self.add_pattern_to_graph(pattern="A B D G", topic="X", that="Z", template="3")
context = self.match_sentence("A B D E F", topic="X", that="Z")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node)
self.assertEqual("2", context.template_node.template.word)
def test_basic_multi_tree_matching_no_wildcards(self):
self.add_pattern_to_graph(pattern="A B D", topic="X", that="Y", template="1")
self.add_pattern_to_graph(pattern="A B D", topic="X", that="Z", template="2")
self.add_pattern_to_graph(pattern="A B D", topic="X", that="X", template="3")
context = self.match_sentence("A B D", topic="X", that="Y")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node)
self.assertEqual("PTEMPLATE [*] [P(0)^(0)#(0)C(0)_(0)*(0)To(0)Th(0)Te(1)]", context.template_node.to_string())
context = self.match_sentence("A B D", topic="X", that="Z")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node)
self.assertEqual("2", context.template_node.template.word)
|
c45e4fe14eb401f2621656ff36b57a3627c39421
|
f7982a468b6f76dc72c53e7c3644ae4e7e6f2f49
|
/pyEX/refdata/symbols/fx.py
|
07d0ed7015c14f90245926a4dc5a313e714d56cf
|
[
"Apache-2.0"
] |
permissive
|
timkpaine/pyEX
|
55002c3718214c6e207976ab3661a47108c6c114
|
f678c791d05bc28911e25807241c392a9ee8134f
|
refs/heads/main
| 2023-08-20T00:17:53.162803
| 2022-11-22T02:51:13
| 2022-11-22T02:51:13
| 109,551,372
| 350
| 95
|
Apache-2.0
| 2023-09-11T12:26:54
| 2017-11-05T04:21:16
|
Python
|
UTF-8
|
Python
| false
| false
| 1,615
|
py
|
fx.py
|
# *****************************************************************************
#
# Copyright (c) 2021, the pyEX authors.
#
# This file is part of the pyEX library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
from functools import wraps
import pandas as pd
from ...common import _UTC, _expire, _get, _reindex
@_expire(hour=8, tz=_UTC)
def fxSymbols(token="", version="stable", filter="", format="json"):
"""This call returns a list of supported currencies and currency pairs.
https://iexcloud.io/docs/api/#fx-symbols
7am, 9am, UTC daily
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Returns:
dict or DataFrame or list: result
"""
return _get(
"ref-data/fx/symbols",
token=token,
version=version,
filter=filter,
format=format,
)
@wraps(fxSymbols)
def fxSymbolsDF(token="", version="stable"):
fx = fxSymbols(token, version)
df1 = pd.DataFrame(fx["currencies"])
df2 = pd.DataFrame(fx["pairs"])
_reindex(df1, "code")
df1.sort_index(inplace=True)
df2.sort_index(inplace=True)
return [df1, df2]
@wraps(fxSymbols)
def fxSymbolsList(*args, **kwargs):
fx = fxSymbols(*args, **kwargs)
ret = [[], []]
for c in fx["currencies"]:
ret[0].append(c["code"])
for p in fx["pairs"]:
ret[1].append(p["fromCurrency"] + p["toCurrency"])
return sorted(ret)
|
c53a281d5bff6395c4c27d0cb67da3770b4bda48
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/FWCore/Framework/test/test_get_by_type_cfg.py
|
99ad74c48fb195e51008a978dcd958857aaf9447
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 1,229
|
py
|
test_get_by_type_cfg.py
|
import FWCore.ParameterSet.Config as cms
import argparse
import sys
parser = argparse.ArgumentParser(prog=sys.argv[0], description='Test getting many DataProducts just by type.')
parser.add_argument("--useEDAlias", action="store_true", help="add an EDAlias for one of the modules")
argv = sys.argv[:]
if '--' in argv:
argv.remove("--")
args, unknown = parser.parse_known_args(argv)
process = cms.Process("TEST")
process.source = cms.Source("EmptySource")
process.maxEvents.input = 1
process.a = cms.EDProducer("IntProducer", ivalue = cms.int32(1))
process.b = cms.EDProducer("IntProducer", ivalue = cms.int32(10))
process.c = cms.EDProducer("IntProducer", ivalue = cms.int32(100))
if args.useEDAlias:
process.d = cms.EDAlias(a = cms.VPSet(cms.PSet(type = cms.string('*'))))
print("turned on useEDAlias")
process.add = cms.EDProducer("AddAllIntsProducer")
process.test = cms.EDAnalyzer("BuiltinIntTestAnalyzer",
valueMustMatch = cms.untracked.int32(111),
moduleLabel = cms.untracked.InputTag("add")
)
process.p = cms.Path(process.add, cms.Task(process.a, process.b, process.c))
process.e = cms.EndPath(process.test)
|
0d7c66093287fa80f8ea29c3a01216ab2a2eb725
|
4fd65dc15ed0e5849c440a41d81036d1ff47ea96
|
/tests/unit/tools_tests/test_rest.py
|
33a78bf2eb9ef3f5135191412735e458fdce0d3a
|
[
"MIT"
] |
permissive
|
tableau/TabPy
|
20ae3dacb958bf2d0e48fc36220366cb3db412bb
|
96aa26252b6115bd2788f9526680ec1b34f1c86f
|
refs/heads/master
| 2023-08-29T13:47:21.507211
| 2023-06-21T21:30:40
| 2023-06-21T21:30:40
| 69,400,040
| 1,527
| 633
|
MIT
| 2023-06-21T21:30:42
| 2016-09-27T21:26:03
|
Python
|
UTF-8
|
Python
| false
| false
| 7,431
|
py
|
test_rest.py
|
import json
import requests
from requests.auth import HTTPBasicAuth
from tabpy.tabpy_tools.rest import RequestsNetworkWrapper, ServiceClient
import unittest
from unittest.mock import Mock
class TestRequestsNetworkWrapper(unittest.TestCase):
def test_init(self):
RequestsNetworkWrapper()
def test_init_with_session(self):
session = {}
rnw = RequestsNetworkWrapper(session=session)
self.assertIs(session, rnw.session)
def mock_response(self, status_code):
response = Mock(requests.Response)
response.json.return_value = "json"
response.status_code = status_code
return response
def setUp(self):
session = Mock(requests.Session)
session.get.return_value = self.mock_response(200)
session.post.return_value = self.mock_response(200)
session.put.return_value = self.mock_response(200)
session.delete.return_value = self.mock_response(204)
self.rnw = RequestsNetworkWrapper(session=session)
def test_GET(self):
url = "abc"
data = {"foo": "bar"}
self.assertEqual(self.rnw.GET(url, data), "json")
self.rnw.session.get.assert_called_once_with(
url, params=data, timeout=None, auth=None
)
def test_GET_InvalidData(self):
url = "abc"
data = {"cat"}
with self.assertRaises(TypeError):
self.rnw.session.get.return_value = self.mock_response(404)
self.rnw.GET(url, data)
def test_GET_InvalidURL(self):
url = ""
data = {"foo": "bar"}
with self.assertRaises(TypeError):
self.rnw.session.get.return_value = self.mock_response(404)
self.rnw.GET(url, data)
def test_POST(self):
url = "abc"
data = {"foo": "bar"}
self.assertEqual(self.rnw.POST(url, data), "json")
self.rnw.session.post.assert_called_once_with(
url,
data=json.dumps(data),
headers={"content-type": "application/json"},
timeout=None,
auth=None,
)
def test_POST_InvalidURL(self):
url = ""
data = {"foo": "bar"}
with self.assertRaises(TypeError):
self.rnw.session.post.return_value = self.mock_response(404)
self.rnw.POST(url, data)
def test_POST_InvalidData(self):
url = "url"
data = {"cat"}
with self.assertRaises(TypeError):
self.rnw.POST(url, data)
def test_PUT(self):
url = "abc"
data = {"foo": "bar"}
self.assertEqual(self.rnw.PUT(url, data), "json")
self.rnw.session.put.assert_called_once_with(
url,
data=json.dumps(data),
headers={"content-type": "application/json"},
timeout=None,
auth=None,
)
def test_PUT_InvalidData(self):
url = "url"
data = {"cat"}
with self.assertRaises(TypeError):
self.rnw.PUT(url, data)
def test_PUT_InvalidURL(self):
url = ""
data = {"foo:bar"}
with self.assertRaises(TypeError):
self.rnw.PUT(url, data)
def test_DELETE(self):
url = "abc"
data = {"foo": "bar"}
self.assertIs(self.rnw.DELETE(url, data), None)
self.rnw.session.delete.assert_called_once_with(
url, data=json.dumps(data), timeout=None, auth=None
)
def test_DELETE_InvalidData(self):
url = "abc"
data = {"cat"}
with self.assertRaises(TypeError):
self.rnw.DELETE(url, data)
def test_DELETE_InvalidURL(self):
url = ""
data = {"foo:bar"}
with self.assertRaises(TypeError):
self.rnw.DELETE(url, data)
def test_set_credentials(self):
expected_auth = None
self.assertEqual(self.rnw.auth, expected_auth)
username, password = "username", "password"
expected_auth = HTTPBasicAuth(username, password)
self.rnw.set_credentials(username, password)
self.assertEqual(self.rnw.auth, expected_auth)
def _test_METHOD_with_credentials(
self,
http_method_function,
http_session_method_function,
headers=None,
params=False,
data=False,
response=None,
):
username, password = "username", "password"
self.rnw.set_credentials(username, password)
url = "url"
_data = {"foo": "bar"}
self.assertEqual(http_method_function(url, _data), response)
pargs = {url}
kwargs = {"timeout": None, "auth": self.rnw.auth}
if data:
kwargs["data"] = json.dumps(_data)
if headers:
kwargs["headers"] = headers
if params:
kwargs["params"] = _data
http_session_method_function.assert_called_once_with(*pargs, **kwargs)
self.assertEqual(self.rnw.auth, HTTPBasicAuth(username, password))
def test_GET_with_credentials(self):
self._test_METHOD_with_credentials(
self.rnw.GET, self.rnw.session.get, params=True, response="json"
)
def test_POST_with_credentials(self):
self._test_METHOD_with_credentials(
self.rnw.POST,
self.rnw.session.post,
headers={"content-type": "application/json"},
data=True,
response="json",
)
def test_PUT_with_credentials(self):
self._test_METHOD_with_credentials(
self.rnw.PUT,
self.rnw.session.put,
data=True,
headers={"content-type": "application/json"},
response="json",
)
def test_DELETE_with_credentials(self):
self._test_METHOD_with_credentials(
self.rnw.DELETE, self.rnw.session.delete, data=True
)
class TestServiceClient(unittest.TestCase):
def setUp(self):
nw = Mock(RequestsNetworkWrapper())
nw.GET.return_value = "GET"
nw.POST.return_value = "POST"
nw.PUT.return_value = "PUT"
nw.DELETE.return_value = "DELETE"
self.sc = ServiceClient("endpoint/", network_wrapper=nw)
self.scClientDoesNotEndWithSlash = ServiceClient("endpoint", network_wrapper=nw)
def test_GET(self):
self.assertEqual(self.sc.GET("test"), "GET")
self.sc.network_wrapper.GET.assert_called_once_with("endpoint/test", None, None)
def test_POST(self):
self.assertEqual(self.sc.POST("test"), "POST")
self.sc.network_wrapper.POST.assert_called_once_with(
"endpoint/test", None, None
)
def test_PUT(self):
self.assertEqual(self.sc.PUT("test"), "PUT")
self.sc.network_wrapper.PUT.assert_called_once_with("endpoint/test", None, None)
def test_DELETE(self):
self.assertEqual(self.sc.DELETE("test"), None)
self.sc.network_wrapper.DELETE.assert_called_once_with(
"endpoint/test", None, None
)
def test_FixEndpoint(self):
self.assertEqual(self.scClientDoesNotEndWithSlash.GET("test"), "GET")
self.sc.network_wrapper.GET.assert_called_once_with("endpoint/test", None, None)
def test_set_credentials(self):
username, password = "username", "password"
self.sc.set_credentials(username, password)
self.sc.network_wrapper.set_credentials.assert_called_once_with(
username, password
)
|
0160c39eece6a468147d7a38c2e2b565cd1e0cf2
|
1c790b0adc648ff466913cf4aed28ace905357ff
|
/ci_test/unit_tests/test_unit_callback_ltfb.py
|
ce61c09d970da3ca10aa288e04fbe51b6a4cf327
|
[
"Apache-2.0"
] |
permissive
|
LLNL/lbann
|
04d5fdf443d6b467be4fa91446d40b620eade765
|
e8cf85eed2acbd3383892bf7cb2d88b44c194f4f
|
refs/heads/develop
| 2023-08-23T18:59:29.075981
| 2023-08-22T22:16:48
| 2023-08-22T22:16:48
| 58,576,874
| 225
| 87
|
NOASSERTION
| 2023-09-11T22:43:32
| 2016-05-11T20:04:20
|
C++
|
UTF-8
|
Python
| false
| false
| 10,526
|
py
|
test_unit_callback_ltfb.py
|
"""Test to check weight exchanges in LTFB.
Each model has a randomly initialized weights object. An LTFB round is
performed after every training step and winners are chosen randomly.
The log files are post-processed to make sure that the correct weights
are propagated by LTFB.
"""
import os
import os.path
import random
import re
import sys
# Bamboo utilities
current_file = os.path.realpath(__file__)
current_dir = os.path.dirname(current_file)
sys.path.insert(0, os.path.join(os.path.dirname(current_dir), 'common_python'))
import tools
# ==============================================
# Objects for Python data reader
# ==============================================
# Note: The Python data reader imports this file as a module and calls
# the functions below to ingest data.
# RNG
rng_pid = None
def initialize_rng():
"""Initialize random seed if needed.
Seed should be initialized independently on each process. We
reinitialize if we detect a process fork.
"""
global rng_pid
if rng_pid != os.getpid():
rng_pid = os.getpid()
random.seed()
# Sample access functions
_mini_batch_size = 2
_num_epochs = 5
def get_sample(index):
initialize_rng()
return (random.gauss(0,1),)
def num_samples():
return _mini_batch_size
def sample_dims():
return (1,)
# ==============================================
# Setup LBANN experiment
# ==============================================
def setup_experiment(lbann, weekly):
"""Construct LBANN experiment.
Args:
lbann (module): Module for LBANN Python frontend
"""
trainer = lbann.Trainer(_mini_batch_size)
model = construct_model(lbann)
data_reader = construct_data_reader(lbann)
optimizer = lbann.NoOptimizer()
return trainer, model, data_reader, optimizer, None # Don't request any specific number of nodes
def construct_model(lbann):
"""Construct LBANN model.
Args:
lbann (module): Module for LBANN Python frontend
"""
# Layer graph
weight = lbann.Weights(initializer=lbann.UniformInitializer(min=0, max=1))
weight = lbann.WeightsLayer(weights=weight, dims=[1])
rand = lbann.Input(data_field='samples')
layers = list(lbann.traverse_layer_graph([weight, rand]))
for l in layers:
l.device = 'CPU'
# Model objects
metrics = [
lbann.Metric(weight, name='weight'),
lbann.Metric(rand, name='random'),
]
callbacks = [
lbann.CallbackPrint(),
lbann.CallbackLTFB(
batch_interval=1,
metric='random',
communication_algorithm='checkpoint_binary',
),
]
# Construct model
return lbann.Model(_num_epochs,
layers=layers,
metrics=metrics,
callbacks=callbacks)
def construct_data_reader(lbann):
"""Construct Protobuf message for Python data reader.
The Python data reader will import the current Python file to
access the sample access functions.
Args:
lbann (module): Module for LBANN Python frontend
"""
# Note: The training data reader should be removed when
# https://github.com/LLNL/lbann/issues/1098 is resolved.
message = lbann.reader_pb2.DataReader()
message.reader.extend([
tools.create_python_data_reader(
lbann,
current_file,
'get_sample',
'num_samples',
'sample_dims',
'train',
),
tools.create_python_data_reader(
lbann,
current_file,
'get_sample',
'num_samples',
'sample_dims',
'validate',
),
tools.create_python_data_reader(
lbann,
current_file,
'get_sample',
'num_samples',
'sample_dims',
'tournament',
),
])
return message
# ==============================================
# Setup PyTest
# ==============================================
def augment_test_func(test_func):
"""Augment test function to parse log files.
`tools.create_tests` creates functions that run an LBANN
experiment. This function creates augmented functions that parse
the log files after LBANN finishes running, e.g. to check metrics
or runtimes.
Note: The naive approach is to define the augmented test functions
in a loop. However, Python closures are late binding. In other
words, the function would be overwritten every time we define it.
We get around this overwriting problem by defining the augmented
function in the local scope of another function.
Args:
test_func (function): Test function created by
`tools.create_tests`.
Returns:
function: Test that can interact with PyTest.
"""
test_name = test_func.__name__
# Define test function
def func(cluster, dirname, weekly):
# Run LBANN experiment
experiment_output = test_func(cluster, dirname, weekly)
# Parse LBANN log file
num_trainers = None
log_file = experiment_output['stdout_log_file']
with open(log_file) as f:
for line in f:
# Configure data once we figure out number of trainers
if num_trainers is None:
match = re.search('Trainers *: ([0-9]+)', line)
if match:
num_trainers = int(match.group(1))
else:
continue
ltfb_partners = [[] for _ in range(num_trainers)]
ltfb_winners = [[] for _ in range(num_trainers)]
tournament_metrics = [[] for _ in range(num_trainers)]
validation_metrics = [[] for _ in range(num_trainers)]
# LTFB tournament winners
match = re.search(
'LTFB .* '
'trainer ([0-9]+) selected model from trainer ([0-9]+) '
'\\(trainer [0-9]+ score .* trainer ([0-9]+) score.*\\)',
line)
if match:
trainer = int(match.group(1))
winner = int(match.group(2))
partner = int(match.group(3))
ltfb_partners[trainer].append(partner)
ltfb_winners[trainer].append(winner)
# Metric value on tournament set
match = re.search(
'model0 \\(instance ([0-9]+)\\) tournament weight : '
'([0-9.]+)',
line)
if match:
trainer = int(match.group(1))
tournament_metrics[trainer].append(float(match.group(2)))
# Metric value on validation set
match = re.search(
'model0 \\(instance ([0-9]+)\\) validation weight : '
'([0-9.]+)',
line)
if match:
trainer = int(match.group(1))
validation_metrics[trainer].append(float(match.group(2)))
# Make sure file has been parsed correctly
assert num_trainers, \
f'Error parsing {log_file} (could not find number of trainers)'
for trainer, partners in enumerate(ltfb_partners):
assert len(partners) == _num_epochs-1, \
f'Error parsing {log_file} ' \
f'(expected {_num_epochs-1} LTFB rounds, ' \
f'but found {len(partners)} for trainer {trainer})'
for trainer, winners in enumerate(ltfb_winners):
assert len(winners) == _num_epochs-1, \
f'Error parsing {log_file} ' \
f'(expected {_num_epochs-1} LTFB rounds, ' \
f'but found {len(winners)} for trainer {trainer})'
for trainer, vals in enumerate(validation_metrics):
assert len(vals) == _num_epochs, \
f'Error parsing {log_file} ' \
f'(expected {_num_epochs} validation metric values, ' \
f'but found {len(val)} for trainer {trainer})'
for trainer, vals in enumerate(tournament_metrics):
assert len(vals) == 2*(_num_epochs-1), \
f'Error parsing {log_file} ' \
f'(expected {_num_epochs} validation metric values, ' \
f'but found {len(val)} for trainer {trainer})'
# Make sure metric values match expected values
# Note: An LTFB round occurs once per training epoch
# (excluding the first epoch). Each LTFB round involves two
# evaluations on the tournament set: once on the local model
# and once on a model from a partner trainer. At the end of
# each training epoch, we perform an evalutation on the
# validation set. By inspecting the metric values
# (corresponding to the model weight), we can make sure that
# LTFB is evaluating on the correct models.
tol = 1e-4
for step in range(_num_epochs-1):
for trainer in range(num_trainers):
partner = ltfb_partners[trainer][step]
winner = ltfb_winners[trainer][step]
local_val = tournament_metrics[trainer][2*step]
partner_val = tournament_metrics[trainer][2*step+1]
winner_val = validation_metrics[trainer][step+1]
true_local_val = validation_metrics[trainer][step]
true_partner_val = validation_metrics[partner][step]
true_winner_val = validation_metrics[winner][step]
assert true_local_val-tol < local_val < true_local_val+tol, \
'Incorrect metric value for LTFB local model'
assert true_partner_val-tol < partner_val < true_partner_val+tol, \
'Incorrect metric value for LTFB partner model'
assert true_winner_val-tol < winner_val < true_winner_val+tol, \
'Incorrect metric value for LTFB winner model'
# Return test function from factory function
func.__name__ = test_name
return func
# Create test functions that can interact with PyTest
for _test_func in tools.create_tests(setup_experiment,
__file__,
nodes=2,
lbann_args='--procs_per_trainer=2'):
globals()[_test_func.__name__] = augment_test_func(_test_func)
|
a0c0675c49f0101ccf3749a3124351af0215f60b
|
971e0efcc68b8f7cfb1040c38008426f7bcf9d2e
|
/tests/model_control/gen_all.py
|
bcca3e30cf43df96e3870e77a46fe0dfb8afce9f
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
antoinecarme/pyaf
|
a105d172c2e7544f8d580d75f28b751351dd83b6
|
b12db77cb3fa9292e774b2b33db8ce732647c35e
|
refs/heads/master
| 2023-09-01T09:30:59.967219
| 2023-07-28T20:15:53
| 2023-07-28T20:15:53
| 70,790,978
| 457
| 77
|
BSD-3-Clause
| 2023-03-08T21:45:40
| 2016-10-13T09:30:30
|
Python
|
UTF-8
|
Python
| false
| false
| 1,728
|
py
|
gen_all.py
|
import os
def createDirIfNeeded(dirname):
try:
os.mkdir(dirname);
except:
pass
lKnownTransformations = ['None', 'Difference', 'RelativeDifference',
'Integration', 'BoxCox',
'Quantization', 'Logit',
'Fisher', 'Anscombe'];
lKnownTrends = ['ConstantTrend',
'Lag1Trend', 'LinearTrend', 'PolyTrend',
'MovingAverage', 'MovingMedian'];
lKnownPeriodics = ['NoCycle', 'BestCycle',
'Seasonal_MonthOfYear' ,
'Seasonal_Second' ,
'Seasonal_Minute' ,
'Seasonal_Hour' ,
'Seasonal_DayOfWeek' ,
'Seasonal_DayOfMonth',
'Seasonal_WeekOfYear'];
lKnownAutoRegressions = ['NoAR' , 'AR' , 'ARX' , 'SVR' , 'MLP' , 'LSTM'];
createDirIfNeeded("tests/model_control/detailed/");
for transf in lKnownTransformations:
createDirIfNeeded("tests/model_control/detailed/transf_" + transf);
for trend in lKnownTrends:
for per in lKnownPeriodics:
for autoreg in lKnownAutoRegressions:
filename= "tests/model_control/detailed/transf_" + str(transf) + "/model_control_one_enabled_" + str(transf) + "_" + str(trend) + "_" + str(per) + "_" + str(autoreg) + ".py";
file = open(filename, "w");
print("WRTITING_FILE" , filename);
file.write("import tests.model_control.test_ozone_custom_models_enabled as testmod\n");
file.write("\n\ntestmod.build_model( ['" + str(transf) + "'] , ['" + str(trend) + "'] , ['" + str(per) + "'] , ['" + str(autoreg) + "'] );");
file.close();
|
4d158dfe64dc44b26e75fb40d54d66257a51f81f
|
ba6d757accea216af1a941046551d72afc39c744
|
/cpi.py
|
331bfaab11cde7ccd91c58dd18c83586b1ebf4d2
|
[] |
no_license
|
tablespoon/fun
|
31d4910f0b9d776037621785f1219b4157115cb7
|
2789e41b731066b664e7017811819bebad427dee
|
refs/heads/master
| 2022-11-30T03:07:21.502549
| 2022-11-25T23:20:08
| 2022-11-25T23:20:08
| 10,951,432
| 283
| 73
| null | 2023-08-17T08:59:00
| 2013-06-25T21:57:29
|
Shell
|
UTF-8
|
Python
| false
| false
| 209
|
py
|
cpi.py
|
#!/usr/bin/python
trials=10000000
from random import random
def calc():
counter=0
for i in xrange(trials):
if random()**2 + random()**2 <= 1:
counter+=1
return float(counter)/trials*4
print calc()
|
ce402b60bfabd1db5784a90e5abb15fcc10145f4
|
6fdb4eaf5b0e6dbd7db4bf947547541e9aebf110
|
/g-code-testing/g_code_parsing/g_code_functionality_defs/thermocycler/edit_pid_params_g_code_functionality_def.py
|
9d80b9e20823c3a9f25a50b50a848a3b0be72e07
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
Opentrons/opentrons
|
874321e01149184960eeaeaa31b1d21719a1ceda
|
026b523c8c9e5d45910c490efb89194d72595be9
|
refs/heads/edge
| 2023-09-02T02:51:49.579906
| 2023-08-31T16:02:45
| 2023-08-31T16:02:45
| 38,644,841
| 326
| 174
|
Apache-2.0
| 2023-09-14T21:47:20
| 2015-07-06T20:41:01
|
Python
|
UTF-8
|
Python
| false
| false
| 1,297
|
py
|
edit_pid_params_g_code_functionality_def.py
|
from typing import Dict
from string import Template
from enum import Enum
from g_code_parsing.g_code_functionality_defs.g_code_functionality_def_base import (
GCodeFunctionalityDefBase,
)
class EditPIDParamsGCodeFunctionalityDef(GCodeFunctionalityDefBase):
# Using this list to output string in specific order
EXPECTED_ARGS = ["P", "I", "D"]
class ValDefinedMessage(str, Enum):
P = "\n\tKp: $val"
I = "\n\tKi: $val" # noqa: E741
D = "\n\tKd: $val"
@classmethod
def _generate_command_explanation(cls, g_code_args: Dict[str, str]) -> str:
message_list = []
for arg in cls.EXPECTED_ARGS:
g_code_arg_val = g_code_args.get(arg)
if g_code_arg_val is not None:
message_temp = Template(cls.ValDefinedMessage[arg].value)
message = message_temp.substitute(val=g_code_arg_val)
message_list.append(message)
return f'Editing PID values to the following:{"".join(message_list)}'
@classmethod
def _generate_response_explanation(cls, response: str) -> str:
message = "Edit successful"
if "ERROR" in response.upper() and "BUSY" in response.upper():
message = "Cannot set PID values. Thermocycler busy"
return message
|
fb110cc089074f3be0fcd3eac9c5e943ed183760
|
93134d8429cc7c5251ea76e19bf1856466bd2b48
|
/trajnetbaselines/classical/__init__.py
|
916886e9a6e33fc0df7f7ae1c358d1c0ec0efae7
|
[
"MIT"
] |
permissive
|
vita-epfl/trajnetplusplusbaselines
|
0bd7ce75740f3ed39ba82d1bfaa5d3c279da474f
|
99a6e9d8675face1aeeb17227b73dd3d1267f463
|
refs/heads/master
| 2023-04-14T23:21:54.217111
| 2022-10-04T09:52:01
| 2022-10-04T09:52:01
| 218,013,163
| 221
| 83
|
MIT
| 2023-04-04T15:07:11
| 2019-10-28T09:42:36
|
Python
|
UTF-8
|
Python
| false
| false
| 126
|
py
|
__init__.py
|
from .socialforce import predict
from .orca import predict
from .kalman import predict
from .constant_velocity import predict
|
e65bc2a82ec6af8da2a58c1f17a2dfd883891f3c
|
9bc318535bbcaaa7fb15a18929fc11a2bbf531d1
|
/satori-rules/plugin/libs/gevent/util.py
|
1438688e2dfffb7c000e64e18f3cd88405a6af95
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
leancloud/satori
|
dcab126548a54fde6d02d79053b239456439d211
|
701caccbd4fe45765001ca60435c0cb499477c03
|
refs/heads/master
| 2022-12-10T23:33:53.046905
| 2021-04-08T08:20:45
| 2021-04-08T08:20:45
| 67,022,336
| 259
| 89
|
Apache-2.0
| 2022-12-08T02:12:01
| 2016-08-31T09:13:02
|
Python
|
UTF-8
|
Python
| false
| false
| 1,805
|
py
|
util.py
|
# Copyright (c) 2009 Denis Bilenko. See LICENSE for details.
"""
Low-level utilities.
"""
from __future__ import absolute_import
import functools
__all__ = ['wrap_errors']
class wrap_errors(object):
"""
Helper to make function return an exception, rather than raise it.
Because every exception that is unhandled by greenlet will be logged,
it is desirable to prevent non-error exceptions from leaving a greenlet.
This can done with a simple ``try/except`` construct::
def wrapped_func(*args, **kwargs):
try:
return func(*args, **kwargs)
except (TypeError, ValueError, AttributeError) as ex:
return ex
This class provides a shortcut to write that in one line::
wrapped_func = wrap_errors((TypeError, ValueError, AttributeError), func)
It also preserves ``__str__`` and ``__repr__`` of the original function.
"""
# QQQ could also support using wrap_errors as a decorator
def __init__(self, errors, func):
"""
Calling this makes a new function from *func*, such that it catches *errors* (an
:exc:`BaseException` subclass, or a tuple of :exc:`BaseException` subclasses) and
return it as a value.
"""
self.__errors = errors
self.__func = func
# Set __doc__, __wrapped__, etc, especially useful on Python 3.
functools.update_wrapper(self, func)
def __call__(self, *args, **kwargs):
func = self.__func
try:
return func(*args, **kwargs)
except self.__errors as ex:
return ex
def __str__(self):
return str(self.__func)
def __repr__(self):
return repr(self.__func)
def __getattr__(self, name):
return getattr(self.__func, name)
|
be806da8f556d2682d19da14397055a7d828b7b2
|
5130754859e274cd06f63260439e5203c2000a11
|
/core/storage/question/gae_models.py
|
4eba43f5aa1f9d88f573e49bdc2040c4c5de55b1
|
[
"Apache-2.0"
] |
permissive
|
oppia/oppia
|
8ebc9c7c7f2b336e9a79ce04533abe3956f48cbe
|
d16fdf23d790eafd63812bd7239532256e30a21d
|
refs/heads/develop
| 2023-09-04T07:50:13.661276
| 2023-09-03T09:21:32
| 2023-09-03T09:21:32
| 40,687,563
| 6,172
| 4,666
|
Apache-2.0
| 2023-09-14T18:25:11
| 2015-08-14T00:16:14
|
Python
|
UTF-8
|
Python
| false
| false
| 32,971
|
py
|
gae_models.py
|
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models for storing the question data models."""
from __future__ import annotations
import math
import random
from core import feconf
from core import utils
from core.constants import constants
from core.platform import models
from typing import Dict, List, Mapping, Sequence
MYPY = False
if MYPY: # pragma: no cover
# Here, we are importing 'state_domain' only for type-checking purpose.
from core.domain import state_domain # pylint: disable=invalid-import # isort:skip
from mypy_imports import base_models
from mypy_imports import datastore_services
(base_models, skill_models) = models.Registry.import_models([
models.Names.BASE_MODEL, models.Names.SKILL
])
datastore_services = models.Registry.import_datastore_services()
class QuestionSnapshotMetadataModel(base_models.BaseSnapshotMetadataModel):
"""Storage model for the metadata for a question snapshot."""
pass
class QuestionSnapshotContentModel(base_models.BaseSnapshotContentModel):
"""Storage model for the content of a question snapshot."""
@staticmethod
def get_deletion_policy() -> base_models.DELETION_POLICY:
"""Model doesn't contain any data directly corresponding to a user."""
return base_models.DELETION_POLICY.NOT_APPLICABLE
class QuestionCommitLogEntryModel(base_models.BaseCommitLogEntryModel):
"""Log of commits to questions.
A new instance of this model is created and saved every time a commit to
QuestionModel occurs.
The id for this model is of the form 'question-[question_id]-[version]'.
"""
# The id of the question being edited.
question_id = datastore_services.StringProperty(indexed=True, required=True)
@staticmethod
def get_model_association_to_user(
) -> base_models.MODEL_ASSOCIATION_TO_USER:
"""The history of commits is not relevant for the purposes of Takeout
since commits don't contain relevant data corresponding to users.
"""
return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER
@classmethod
def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]:
"""Model contains data corresponding to a user, but this isn't exported
because the history of commits isn't deemed as useful for users since
commit logs don't contain relevant data corresponding to those users.
"""
return dict(super(cls, cls).get_export_policy(), **{
'question_id': base_models.EXPORT_POLICY.NOT_APPLICABLE
})
@classmethod
def get_instance_id(cls, question_id: str, question_version: int) -> str:
"""Returns ID of the question commit log entry model.
Args:
question_id: str. The question id whose states are mapped.
question_version: int. The version of the question.
Returns:
str. A string containing question ID and
question version.
"""
return 'question-%s-%s' % (question_id, question_version)
class QuestionModel(base_models.VersionedModel):
"""Model for storing Questions.
The ID of instances of this class are in form of random hash of 12 chars.
"""
SNAPSHOT_METADATA_CLASS = QuestionSnapshotMetadataModel
SNAPSHOT_CONTENT_CLASS = QuestionSnapshotContentModel
COMMIT_LOG_ENTRY_CLASS = QuestionCommitLogEntryModel
ALLOW_REVERT = True
# An object representing the question state data.
question_state_data = (
datastore_services.JsonProperty(indexed=False, required=True))
# The schema version for the question state data.
question_state_data_schema_version = datastore_services.IntegerProperty(
required=True, indexed=True)
# The next_content_id index to use for generation of new content ids.
next_content_id_index = datastore_services.IntegerProperty(
required=True, default=0, indexed=True)
# The ISO 639-1 code for the language this question is written in.
language_code = (
datastore_services.StringProperty(required=True, indexed=True))
# The skill ids linked to this question.
linked_skill_ids = datastore_services.StringProperty(
indexed=True, repeated=True)
# The optional skill misconception ids marked as not relevant to the
# question.
# Note: Misconception ids are represented in two ways. In the Misconception
# domain object the id is a number. But in the context of a question
# (used here), the skill id needs to be included along with the
# misconception id, this is because questions can have multiple skills
# attached to it. Hence, the format for this field will be
# <skill-id>-<misconceptionid>.
inapplicable_skill_misconception_ids = datastore_services.StringProperty(
indexed=True, repeated=True)
@staticmethod
def get_deletion_policy() -> base_models.DELETION_POLICY:
"""Model doesn't contain any data directly corresponding to a user."""
return base_models.DELETION_POLICY.NOT_APPLICABLE
@staticmethod
def get_model_association_to_user(
) -> base_models.MODEL_ASSOCIATION_TO_USER:
"""Model does not contain user data."""
return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER
@classmethod
def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]:
"""Model doesn't contain any data directly corresponding to a user."""
return dict(super(cls, cls).get_export_policy(), **{
'question_state_data': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'question_state_data_schema_version':
base_models.EXPORT_POLICY.NOT_APPLICABLE,
'language_code': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'linked_skill_ids': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'inapplicable_skill_misconception_ids':
base_models.EXPORT_POLICY.NOT_APPLICABLE,
'next_content_id_index': base_models.EXPORT_POLICY.NOT_APPLICABLE
})
@classmethod
def _get_new_id(cls) -> str:
"""Generates a unique ID for the question in the form of random hash
of 12 chars.
Returns:
new_id: str. ID of the new QuestionModel instance.
Raises:
Exception. The ID generator for QuestionModel is
producing too many collisions.
"""
for _ in range(base_models.MAX_RETRIES):
new_id = utils.convert_to_hash(
str(utils.get_random_int(base_models.RAND_RANGE)),
base_models.ID_LENGTH)
if not cls.get_by_id(new_id):
return new_id
raise Exception(
'The id generator for QuestionModel is producing too many '
'collisions.')
# Here we use MyPy ignore because the signature of this method doesn't
# match with VersionedModel.compute_models_to_commit(). Because argument
# `commit_message` of super class can accept Optional[str] but this method
# can only accept str.
def compute_models_to_commit( # type: ignore[override]
self,
committer_id: str,
commit_type: str,
commit_message: str,
commit_cmds: base_models.AllowedCommitCmdsListType,
# We expect Mapping because we want to allow models that inherit
# from BaseModel as the values, if we used Dict this wouldn't
# be allowed.
additional_models: Mapping[str, base_models.BaseModel]
) -> base_models.ModelsToPutDict:
"""Record the event to the commit log after the model commit.
Note that this extends the superclass method.
Args:
committer_id: str. The user_id of the user who committed the
change.
commit_type: str. The type of commit. Possible values are in
core.storage.base_models.COMMIT_TYPE_CHOICES.
commit_message: str. The commit description message.
commit_cmds: list(dict). A list of commands, describing changes
made in this model, which should give sufficient information to
reconstruct the commit. Each dict always contains:
cmd: str. Unique command.
and then additional arguments for that command.
additional_models: dict(str, BaseModel). Additional models that are
needed for the commit process.
Returns:
ModelsToPutDict. A dict of models that should be put into
the datastore.
"""
models_to_put = super().compute_models_to_commit(
committer_id,
commit_type,
commit_message,
commit_cmds,
additional_models
)
question_commit_log = QuestionCommitLogEntryModel.create(
self.id, self.version, committer_id, commit_type, commit_message,
commit_cmds, constants.ACTIVITY_STATUS_PUBLIC, False
)
question_commit_log.question_id = self.id
return {
'snapshot_metadata_model': models_to_put['snapshot_metadata_model'],
'snapshot_content_model': models_to_put['snapshot_content_model'],
'commit_log_model': question_commit_log,
'versioned_model': models_to_put['versioned_model'],
}
@classmethod
def create(
cls,
question_state_data: state_domain.StateDict,
language_code: str,
version: int,
linked_skill_ids: List[str],
inapplicable_skill_misconception_ids: List[str],
next_content_id_index: int
) -> QuestionModel:
"""Creates a new QuestionModel entry.
Args:
question_state_data: dict. An dict representing the question
state data.
language_code: str. The ISO 639-1 code for the language this
question is written in.
version: int. The version of the question.
linked_skill_ids: list(str). The skill ids linked to the question.
inapplicable_skill_misconception_ids: list(str). The optional
skill misconception ids marked as not applicable to the
question.
next_content_id_index: int. The next content Id indext to generate
new content Id.
Returns:
QuestionModel. Instance of the new QuestionModel entry.
Raises:
Exception. A model with the same ID already exists.
"""
instance_id = cls._get_new_id()
question_model_instance = cls(
id=instance_id,
question_state_data=question_state_data,
language_code=language_code,
version=version,
linked_skill_ids=linked_skill_ids,
inapplicable_skill_misconception_ids=(
inapplicable_skill_misconception_ids),
next_content_id_index=next_content_id_index)
return question_model_instance
@classmethod
def put_multi_questions(cls, questions: List[QuestionModel]) -> None:
"""Puts multiple question models into the datastore.
Args:
questions: list(Question). The list of question objects
to put into the datastore.
"""
cls.update_timestamps_multi(questions)
cls.put_multi(questions)
class QuestionSkillLinkModel(base_models.BaseModel):
"""Model for storing Question-Skill Links.
The ID of instances of this class has the form '[question_id]:[skill_id]'.
"""
# The ID of the question.
question_id = (
datastore_services.StringProperty(required=True, indexed=True))
# The ID of the skill to which the question is linked.
skill_id = datastore_services.StringProperty(required=True, indexed=True)
# The difficulty of the skill.
skill_difficulty = (
datastore_services.FloatProperty(required=True, indexed=True))
@staticmethod
def get_deletion_policy() -> base_models.DELETION_POLICY:
"""Model doesn't contain any data directly corresponding to a user."""
return base_models.DELETION_POLICY.NOT_APPLICABLE
@staticmethod
def get_model_association_to_user(
) -> base_models.MODEL_ASSOCIATION_TO_USER:
"""Model does not contain user data."""
return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER
@classmethod
def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]:
"""Model doesn't contain any data directly corresponding to a user."""
return dict(super(cls, cls).get_export_policy(), **{
'question_id': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'skill_id': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'skill_difficulty': base_models.EXPORT_POLICY.NOT_APPLICABLE
})
@classmethod
def get_model_id(cls, question_id: str, skill_id: str) -> str:
"""Returns the model id by combining the questions and skill id.
Args:
question_id: str. The ID of the question.
skill_id: str. The ID of the skill to which the question is linked.
Returns:
str. The calculated model id.
"""
return '%s:%s' % (question_id, skill_id)
@classmethod
def create(
cls,
question_id: str,
skill_id: str,
skill_difficulty: float
) -> QuestionSkillLinkModel:
"""Creates a new QuestionSkillLinkModel entry.
Args:
question_id: str. The ID of the question.
skill_id: str. The ID of the skill to which the question is linked.
skill_difficulty: float. The difficulty between [0, 1] of the skill.
Raises:
Exception. The given question is already linked to the given skill.
Returns:
QuestionSkillLinkModel. Instance of the new QuestionSkillLinkModel
entry.
"""
question_skill_link_id = cls.get_model_id(question_id, skill_id)
if cls.get(question_skill_link_id, strict=False) is not None:
raise Exception(
'The question with ID %s is already linked to skill %s' %
(question_id, skill_id))
question_skill_link_model_instance = cls(
id=question_skill_link_id,
question_id=question_id,
skill_id=skill_id,
skill_difficulty=skill_difficulty
)
return question_skill_link_model_instance
@classmethod
def get_total_question_count_for_skill_ids(
cls, skill_ids: List[str]
) -> int:
"""Returns the number of questions assigned to the given skill_ids.
Args:
skill_ids: list(str). Skill IDs for which the question count is
requested.
Returns:
int. The number of questions assigned to the given skill_ids.
"""
total_question_count = cls.query().filter(
cls.skill_id.IN(skill_ids)).count()
return total_question_count
@classmethod
def get_question_skill_links_by_skill_ids(
cls, question_count: int, skill_ids: List[str], offset: int
) -> Sequence[QuestionSkillLinkModel]:
"""Fetches the list of QuestionSkillLinkModels linked to the skill in
batches.
Args:
question_count: int. The number of questions to be returned.
skill_ids: list(str). The ids of skills for which the linked
question ids are to be retrieved.
offset: int. Number of query results to skip.
Returns:
list(QuestionSkillLinkModel). The QuestionSkillLinkModels
corresponding to given skill_ids.
"""
question_skill_count = min(
len(skill_ids), constants.MAX_SKILLS_PER_QUESTION
) * question_count
return cls.query(
cls.skill_id.IN(skill_ids)
).order(-cls.last_updated).fetch(question_skill_count, offset=offset)
@classmethod
def get_question_skill_links_based_on_difficulty_equidistributed_by_skill(
cls,
total_question_count: int,
skill_ids: List[str],
difficulty_requested: float
) -> List[QuestionSkillLinkModel]:
"""Fetches the list of constant number of random QuestionSkillLinkModels
linked to the skills, sorted by the absolute value of the difference
between skill difficulty and the requested difficulty.
Args:
total_question_count: int. The number of questions expected.
skill_ids: list(str). The ids of skills for which the linked
question ids are to be retrieved.
difficulty_requested: float. The skill difficulty of the questions
requested to be fetched.
Returns:
list(QuestionSkillLinkModel). A list of random
QuestionSkillLinkModels corresponding to given skill_ids, with
total_question_count/len(skill_ids) number of questions for
each skill. If not evenly divisible, it will be rounded up.
If not enough questions for a skill, just return all questions
it links to.
Raises:
Exception. The number of skill IDs exceeds 20.
"""
if len(skill_ids) > feconf.MAX_NUMBER_OF_SKILL_IDS:
raise Exception('Please keep the number of skill IDs below 20.')
if (not skill_ids) or (total_question_count == 0):
return []
question_count_per_skill = int(
math.ceil(float(total_question_count) / float(len(skill_ids))))
question_skill_link_mapping = {}
# For fetching the questions randomly we have used a random offset.
# But this is a temporary solution since this method scales linearly.
# Other alternative methods were:
# 1) Using a random id in question id filter
# 2) Adding an additional column that can be filtered upon.
# But these methods are not viable because google datastore limits
# each query to have at most one inequality filter. So we can't filter
# on both question_id and difficulty. Please see
# https://github.com/oppia/oppia/pull/9061#issuecomment-629765809
# for more details.
def get_offset(query: datastore_services.Query) -> int:
"""Helper function to get the offset."""
question_count = query.count()
if question_count > 2 * question_count_per_skill:
return utils.get_random_int(
question_count - (question_count_per_skill * 2))
return 0
for skill_id in skill_ids:
query = cls.query(cls.skill_id == skill_id)
equal_questions_query = query.filter(
cls.skill_difficulty == difficulty_requested)
# We fetch more questions here in order to try and ensure that the
# eventual number of returned questions is sufficient to meet the
# number requested, even after deduplication.
new_question_skill_link_models: List[QuestionSkillLinkModel] = list(
equal_questions_query.fetch(
limit=question_count_per_skill * 2,
offset=get_offset(equal_questions_query)
)
)
for model in new_question_skill_link_models:
if model.question_id in question_skill_link_mapping:
new_question_skill_link_models.remove(model)
if len(new_question_skill_link_models) >= question_count_per_skill:
new_question_skill_link_models = random.sample(
new_question_skill_link_models, question_count_per_skill)
else:
# Fetch QuestionSkillLinkModels with difficulty smaller than
# requested difficulty.
easier_questions_query = query.filter(
cls.skill_difficulty < difficulty_requested)
easier_question_skill_link_models: List[
QuestionSkillLinkModel
] = list(
easier_questions_query.fetch(
limit=question_count_per_skill * 2,
offset=get_offset(easier_questions_query)
)
)
for model in easier_question_skill_link_models:
if model.question_id in question_skill_link_mapping:
easier_question_skill_link_models.remove(model)
question_extra_count = (
len(new_question_skill_link_models) +
len(easier_question_skill_link_models) -
question_count_per_skill)
if question_extra_count >= 0:
easier_question_skill_link_models = random.sample(
easier_question_skill_link_models,
question_count_per_skill -
len(new_question_skill_link_models)
)
new_question_skill_link_models.extend(
easier_question_skill_link_models)
else:
# Fetch QuestionSkillLinkModels with difficulty larger than
# requested difficulty.
new_question_skill_link_models.extend(
easier_question_skill_link_models)
harder_questions_query = query.filter(
cls.skill_difficulty > difficulty_requested)
harder_question_skill_link_models: List[
QuestionSkillLinkModel
] = list(
harder_questions_query.fetch(
limit=question_count_per_skill * 2,
offset=get_offset(harder_questions_query)
)
)
for model in harder_question_skill_link_models:
if model.question_id in question_skill_link_mapping:
harder_question_skill_link_models.remove(model)
question_extra_count = (
len(new_question_skill_link_models) +
len(harder_question_skill_link_models) -
question_count_per_skill)
if question_extra_count >= 0:
harder_question_skill_link_models = (
random.sample(
harder_question_skill_link_models,
question_count_per_skill -
len(new_question_skill_link_models)
))
new_question_skill_link_models.extend(
harder_question_skill_link_models)
new_question_skill_link_models = (
new_question_skill_link_models[:question_count_per_skill])
for model in new_question_skill_link_models:
if model.question_id not in question_skill_link_mapping:
question_skill_link_mapping[model.question_id] = model
return list(question_skill_link_mapping.values())
@classmethod
def get_question_skill_links_equidistributed_by_skill(
cls, total_question_count: int, skill_ids: List[str]
) -> List[QuestionSkillLinkModel]:
"""Fetches the list of constant number of random
QuestionSkillLinkModels linked to the skills.
Args:
total_question_count: int. The number of questions expected.
skill_ids: list(str). The ids of skills for which the linked
question ids are to be retrieved.
Returns:
list(QuestionSkillLinkModel). A list of random
QuestionSkillLinkModels corresponding to given skill_ids, with
total_question_count/len(skill_ids) number of questions for
each skill. If not evenly divisible, it will be rounded up.
If not enough questions for a skill, just return all questions
it links to.
Raises:
Exception. The number of skill IDs exceeds 20.
"""
if len(skill_ids) > feconf.MAX_NUMBER_OF_SKILL_IDS:
raise Exception('Please keep the number of skill IDs below 20.')
if not skill_ids:
return []
question_count_per_skill = int(
math.ceil(
float(total_question_count) / float(len(skill_ids))))
question_skill_link_models = []
existing_question_ids = []
def get_offset(query: datastore_services.Query) -> int:
"""Helper function to get the offset."""
question_count = query.count()
if question_count > 2 * question_count_per_skill:
return utils.get_random_int(
question_count - (question_count_per_skill * 2))
return 0
for skill_id in skill_ids:
query = cls.query(cls.skill_id == skill_id)
# We fetch more questions here in order to try and ensure that the
# eventual number of returned questions is sufficient to meet the
# number requested, even after deduplication.
new_question_skill_link_models: List[QuestionSkillLinkModel] = list(
query.fetch(
limit=question_count_per_skill * 2,
offset=get_offset(query)
)
)
# Deduplicate if the same question is linked to multiple skills.
for model in new_question_skill_link_models:
if model.question_id in existing_question_ids:
new_question_skill_link_models.remove(model)
if len(new_question_skill_link_models) > question_count_per_skill:
sampled_question_skill_link_models = random.sample(
new_question_skill_link_models,
question_count_per_skill
)
else:
sampled_question_skill_link_models = (
new_question_skill_link_models)
question_skill_link_models.extend(
sampled_question_skill_link_models)
existing_question_ids.extend([
model.question_id for model in (
sampled_question_skill_link_models)
])
return question_skill_link_models
@classmethod
def get_all_question_ids_linked_to_skill_id(
cls, skill_id: str
) -> List[str]:
"""Returns a list of all question ids corresponding to the given skill
id.
Args:
skill_id: str. ID of the skill.
Returns:
list(str). The list of all question ids corresponding to the given
skill id.
"""
question_skill_link_models = cls.query().filter(
cls.skill_id == skill_id,
cls.deleted == False) # pylint: disable=singleton-comparison
question_ids = [
model.question_id for model in question_skill_link_models
]
return question_ids
@classmethod
def get_models_by_skill_id(
cls, skill_id: str
) -> Sequence[QuestionSkillLinkModel]:
"""Returns a list of QuestionSkillLink domains of a particular skill ID.
Args:
skill_id: str. ID of the skill.
Returns:
list(QuestionSkillLinkModel)|None. The list of question skill link
domains that are linked to the skill ID. None if the skill
ID doesn't exist.
"""
return cls.get_all().filter(cls.skill_id == skill_id).fetch()
@classmethod
def get_models_by_question_id(
cls, question_id: str
) -> Sequence[QuestionSkillLinkModel]:
"""Returns a list of QuestionSkillLinkModels of a particular
question ID.
Args:
question_id: str. ID of the question.
Returns:
list(QuestionSkillLinkModel)|None. The list of question skill link
models that are linked to the question ID, or None if there are no
question skill link models associated with the question ID.
"""
return cls.get_all().filter(cls.question_id == question_id).fetch()
@classmethod
def put_multi_question_skill_links(
cls, question_skill_links: List[QuestionSkillLinkModel]
) -> None:
"""Puts multiple question skill link models into the datastore.
Args:
question_skill_links: list(QuestionSkillLink). The list of
question skill link domain objects to put into the datastore.
"""
cls.update_timestamps_multi(question_skill_links)
cls.put_multi(question_skill_links)
@classmethod
def delete_multi_question_skill_links(
cls, question_skill_links: List[QuestionSkillLinkModel]
) -> None:
"""Deletes multiple question skill links from the datastore.
Args:
question_skill_links: list(QuestionSkillLinkModel). The list of
question skill link domain objects to delete from the datastore.
"""
cls.delete_multi(question_skill_links)
class QuestionSummaryModel(base_models.BaseModel):
"""Summary model for an Oppia question.
This should be used whenever the content blob of the question is not
needed (e.g. in search results, etc).
A QuestionSummaryModel instance stores the following information:
question_model_last_updated, question_model_created_on,
question_state_data.
The key of each instance is the question id.
"""
# Time when the question model was last updated (not to be
# confused with last_updated, which is the time when the
# question *summary* model was last updated).
question_model_last_updated = datastore_services.DateTimeProperty(
indexed=True, required=True)
# Time when the question model was created (not to be confused
# with created_on, which is the time when the question *summary*
# model was created).
question_model_created_on = datastore_services.DateTimeProperty(
indexed=True, required=True)
# The html content for the question.
question_content = (
datastore_services.TextProperty(indexed=False, required=True))
# The ID of the interaction.
interaction_id = (
datastore_services.StringProperty(indexed=True, required=True))
# The misconception ids addressed in the question. This includes
# tagged misconceptions ids as well as inapplicable misconception
# ids in the question.
misconception_ids = (
datastore_services.StringProperty(indexed=True, repeated=True))
version = datastore_services.IntegerProperty(required=True)
@staticmethod
def get_deletion_policy() -> base_models.DELETION_POLICY:
"""Model doesn't contain any data directly corresponding to a user."""
return base_models.DELETION_POLICY.NOT_APPLICABLE
@staticmethod
def get_model_association_to_user(
) -> base_models.MODEL_ASSOCIATION_TO_USER:
"""Model data has already been exported as a part of the QuestionModel
export_data function, and thus a new export_data function does not
need to be defined here.
"""
return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER
@classmethod
def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]:
"""Model contains data corresponding to a user, but this isn't exported
because because noteworthy details that belong to this model have
already been exported as a part of the QuestionModel export_data
function.
"""
return dict(super(cls, cls).get_export_policy(), **{
'question_model_last_updated':
base_models.EXPORT_POLICY.NOT_APPLICABLE,
'question_model_created_on':
base_models.EXPORT_POLICY.NOT_APPLICABLE,
'question_content': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'interaction_id': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'misconception_ids': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'version': base_models.EXPORT_POLICY.NOT_APPLICABLE
})
|
4e70cace65614f6689dba0e134f538904ff18389
|
3a6a211ea0d32405497fbd6486c490bb147e25f9
|
/common/py_utils/py_utils/memory_debug.py
|
a5e5d006303264192c7f3a7db017f62de82a86c9
|
[
"BSD-3-Clause"
] |
permissive
|
catapult-project/catapult
|
e2cbdd5eb89f3b1492fc8752494e62ea1df4bae0
|
53102de187a48ac2cfc241fef54dcbc29c453a8e
|
refs/heads/main
| 2021-05-25T07:37:22.832505
| 2021-05-24T08:01:49
| 2021-05-25T06:07:38
| 33,947,548
| 2,032
| 742
|
BSD-3-Clause
| 2022-08-26T16:01:18
| 2015-04-14T17:49:05
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,957
|
py
|
memory_debug.py
|
#!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
import heapq
import logging
import os
import sys
try:
import psutil
except ImportError:
psutil = None
BYTE_UNITS = ['B', 'KiB', 'MiB', 'GiB']
def FormatBytes(value):
def GetValueAndUnit(value):
for unit in BYTE_UNITS[:-1]:
if abs(value) < 1024.0:
return value, unit
value /= 1024.0
return value, BYTE_UNITS[-1]
if value is not None:
return '%.1f %s' % GetValueAndUnit(value)
else:
return 'N/A'
def _GetProcessInfo(p):
pinfo = p.as_dict(attrs=['pid', 'name', 'memory_info'])
pinfo['mem_rss'] = getattr(pinfo['memory_info'], 'rss', 0)
return pinfo
def _LogProcessInfo(pinfo, level):
pinfo['mem_rss_fmt'] = FormatBytes(pinfo['mem_rss'])
logging.log(level, '%(mem_rss_fmt)s (pid=%(pid)s)', pinfo)
def LogHostMemoryUsage(top_n=10, level=logging.INFO):
if not psutil:
logging.warning('psutil module is not found, skipping logging memory info')
return
if psutil.version_info < (2, 0):
logging.warning('psutil %s too old, upgrade to version 2.0 or higher'
' for memory usage information.', psutil.__version__)
return
# TODO(crbug.com/777865): Remove the following pylint disable. Even if we
# check for a recent enough psutil version above, the catapult presubmit
# builder (still running some old psutil) fails pylint checks due to API
# changes in psutil.
# pylint: disable=no-member
mem = psutil.virtual_memory()
logging.log(level, 'Used %s out of %s memory available.',
FormatBytes(mem.used), FormatBytes(mem.total))
logging.log(level, 'Memory usage of top %i processes groups', top_n)
pinfos_by_names = {}
for p in psutil.process_iter():
try:
pinfo = _GetProcessInfo(p)
except psutil.NoSuchProcess:
logging.exception('process %s no longer exists', p)
continue
pname = pinfo['name']
if pname not in pinfos_by_names:
pinfos_by_names[pname] = {'name': pname, 'total_mem_rss': 0, 'pids': []}
pinfos_by_names[pname]['total_mem_rss'] += pinfo['mem_rss']
pinfos_by_names[pname]['pids'].append(str(pinfo['pid']))
sorted_pinfo_groups = heapq.nlargest(
top_n,
list(pinfos_by_names.values()),
key=lambda item: item['total_mem_rss'])
for group in sorted_pinfo_groups:
group['total_mem_rss_fmt'] = FormatBytes(group['total_mem_rss'])
group['pids_fmt'] = ', '.join(group['pids'])
logging.log(
level, '- %(name)s - %(total_mem_rss_fmt)s - pids: %(pids)s', group)
logging.log(level, 'Current process:')
pinfo = _GetProcessInfo(psutil.Process(os.getpid()))
_LogProcessInfo(pinfo, level)
def main():
logging.basicConfig(level=logging.INFO)
LogHostMemoryUsage()
if __name__ == '__main__':
sys.exit(main())
|
faba83f906111aa6f44af34cad14e47fd01fb4f1
|
5a6ccde5f37cc86b6fc0812b2bf40f42eab23906
|
/B-set/535B.Tavas and SaDDas.py
|
c3e34a7acf6ac4dc4d700864d25eb32ff7c6a6d5
|
[] |
no_license
|
Waqar-107/Codeforces
|
23f2b1edffb85f6f020107f03e09a455d3e6e792
|
f0d2f25aa6a09c06083b82c39cdf3288ec2eecba
|
refs/heads/master
| 2023-03-09T07:55:46.583363
| 2023-03-04T09:57:44
| 2023-03-04T09:57:44
| 82,915,896
| 196
| 138
| null | 2023-02-11T22:06:20
| 2017-02-23T10:29:34
|
C++
|
UTF-8
|
Python
| false
| false
| 198
|
py
|
535B.Tavas and SaDDas.py
|
#"from dust i have come, dust i will be"
a=str(input())
j=1
count=0
for i in range(len(a)-1,-1,-1):
if a[i]=='4':
count+=(1*j)
else:
count+=(2*j)
j*=2
print(count)
|
e1cb3d6f4a85802a50ba05615081ea0bf650cd32
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/SimCalorimetry/HcalSimAlgos/python/AddCaloSamplesAnalyzer.py
|
841848573679fc21ce35cd7c130ed758a0606366
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 1,404
|
py
|
AddCaloSamplesAnalyzer.py
|
import FWCore.ParameterSet.Config as cms
def customise(process):
# handle normal mixing or premixing
hcaldigi = None
if hasattr(process,'mix') and hasattr(process.mix,'digitizers') and hasattr(process.mix.digitizers,'hcal'):
hcaldigi = process.mix.digitizers.hcal
cstag = "mix"
if hasattr(process,'mixData'):
hcaldigi = process.mixData
cstag = "mixData"
if hcaldigi is None:
raise Exception("CaloSamplesAnalyzer requires a mix module, none found!")
hcaldigi.debugCaloSamples = cms.bool(True)
process.CaloSamplesAnalyzer = cms.EDAnalyzer("CaloSamplesAnalyzer",
# from hcalSimParameters
hf1 = hcaldigi.hf1,
hf2 = hcaldigi.hf2,
ho = hcaldigi.ho,
hb = hcaldigi.hb,
he = hcaldigi.he,
zdc = hcaldigi.zdc,
hoZecotek = hcaldigi.hoZecotek,
hoHamamatsu = hcaldigi.hoHamamatsu,
# from hcalUnsuppressedDigis
hitsProducer = hcaldigi.hitsProducer,
TestNumbering = hcaldigi.TestNumbering,
CaloSamplesTag = cms.InputTag(cstag,"HcalSamples"),
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string("debugcalosamples.root")
)
process.debug_step = cms.Path(process.CaloSamplesAnalyzer)
process.schedule.extend([process.debug_step])
return process
|
533e1b77c50d007c29270f52ff77a4d2af1eb9c0
|
d4412fbe37540e2c4cbe59ed6503d3661ccb7d9c
|
/tests/components_to_test/utils/__init__.py
|
150124b58800f4b8ae6694c6edc7464665fd61f6
|
[
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0",
"BSD-2-Clause",
"MIT"
] |
permissive
|
hpcaitech/ColossalAI
|
a082ed08a3807b53c49d1f86835b9808590d9042
|
c7b60f75470f067d1342705708810a660eabd684
|
refs/heads/main
| 2023-09-01T04:13:13.834565
| 2023-08-30T15:07:21
| 2023-08-30T15:07:21
| 422,274,596
| 32,044
| 4,084
|
Apache-2.0
| 2023-09-14T15:19:54
| 2021-10-28T16:19:44
|
Python
|
UTF-8
|
Python
| false
| false
| 96
|
py
|
__init__.py
|
from .dummy_data_generator import DummyDataGenerator
from .executor import run_fwd, run_fwd_bwd
|
315f7d287085f15cb74b43efa71c36d86b47660d
|
10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94
|
/Python/maximize-the-profit-as-the-salesman.py
|
e2248732d89f78662fbfc68a9effb32a1d7d43f7
|
[
"MIT"
] |
permissive
|
kamyu104/LeetCode-Solutions
|
f54822059405ef4df737d2e9898b024f051fd525
|
4dc4e6642dc92f1983c13564cc0fd99917cab358
|
refs/heads/master
| 2023-09-02T13:48:26.830566
| 2023-08-28T10:11:12
| 2023-08-28T10:11:12
| 152,631,182
| 4,549
| 1,651
|
MIT
| 2023-05-31T06:10:33
| 2018-10-11T17:38:35
|
C++
|
UTF-8
|
Python
| false
| false
| 535
|
py
|
maximize-the-profit-as-the-salesman.py
|
# Time: O(n + m), m = len(offers)
# Space: O(n + m)
# dp
class Solution(object):
def maximizeTheProfit(self, n, offers):
"""
:type n: int
:type offers: List[List[int]]
:rtype: int
"""
lookup = [[] for _ in xrange(n)]
for s, e, g in offers:
lookup[e].append([s, g])
dp = [0]*(n+1)
for e in xrange(n):
dp[e+1] = dp[(e-1)+1]
for s, g in lookup[e]:
dp[e+1] = max(dp[e+1], dp[(s-1)+1]+g)
return dp[-1]
|
56985c5be670a4d6c3966efe2f5525cdb57a5800
|
5f69a6549b8d5e417553d910622e6855b2ae679b
|
/src/opendr/perception/pose_estimation/__init__.py
|
66e9725a6af4c0dd43764b57d9042a99fdfd1c19
|
[
"Apache-2.0"
] |
permissive
|
opendr-eu/opendr
|
822219f709613d77c5eb62c5d02808d344239835
|
b3d6ce670cdf63469fc5766630eb295d67b3d788
|
refs/heads/master
| 2023-08-31T07:02:36.375231
| 2023-08-29T06:39:51
| 2023-08-29T06:39:51
| 293,755,225
| 535
| 82
|
Apache-2.0
| 2023-09-13T16:53:34
| 2020-09-08T08:55:04
|
Python
|
UTF-8
|
Python
| false
| false
| 457
|
py
|
__init__.py
|
from opendr.perception.pose_estimation.lightweight_open_pose.lightweight_open_pose_learner import \
LightweightOpenPoseLearner
from opendr.perception.pose_estimation.hr_pose_estimation.high_resolution_learner import \
HighResolutionPoseEstimationLearner
from opendr.perception.pose_estimation.lightweight_open_pose.utilities import draw, get_bbox
__all__ = ['LightweightOpenPoseLearner', 'draw', 'get_bbox', 'HighResolutionPoseEstimationLearner']
|
a93ad4eaf59288a753cfe996ff942a2cc37cbc49
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/battle_control/arena_info/settings.py
|
e964dc0188f2694a82077b5f567c881385fcdc11
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 3,312
|
py
|
settings.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/battle_control/arena_info/settings.py
from gui.Scaleform.locale.INGAME_GUI import INGAME_GUI
from gui.shared.gui_items.Vehicle import VEHICLE_BATTLE_TYPES_ORDER_INDICES
from shared_utils import BitmaskHelper
__all__ = ('UNKNOWN_VEHICLE_NAME', 'UNKNOWN_VEHICLE_CLASS_NAME', 'UNKNOWN_PLAYER_NAME', 'UNKNOWN_VEHICLE_LEVEL', 'UNKNOWN_VEHICLE_CLASS_ORDER', 'SQUAD_RANGE_TO_SHOW', 'VEHICLE_STATUS', 'PLAYER_STATUS', 'INVALIDATE_OP', 'getOrderByVehicleClass')
CONTOUR_ICON_SF_PATH = '../maps/icons/vehicle/contour/{0}.png'
CONTOUR_ICON_RES_PATH = 'gui/maps/icons/vehicle/contour/{0}.png'
SMALL_MAP_IMAGE_SF_PATH = '../maps/icons/map/battleLoading/%s.png'
SCREEN_MAP_IMAGE_RES_PATH = 'gui/maps/icons/map/screen/%s.dds'
RESPAWN_MAP_IMAGE_RES_PATH = 'gui/maps/icons/map/respawn/%s.dds'
DEFAULT_SCREEN_MAP_IMAGE_RES_PATH = 'gui/maps/icons/map/screen/default_screen.dds'
UNKNOWN_CONTOUR_ICON_NAME = 'unknown'
UNKNOWN_CONTOUR_ICON_SF_PATH = CONTOUR_ICON_SF_PATH.format(UNKNOWN_CONTOUR_ICON_NAME)
UNKNOWN_CONTOUR_ICON_RES_PATH = CONTOUR_ICON_RES_PATH.format(UNKNOWN_CONTOUR_ICON_NAME)
UNKNOWN_VEHICLE_NAME = INGAME_GUI.PLAYERS_PANEL_UNKNOWN_VEHICLE
UNKNOWN_VEHICLE_CLASS_NAME = 'unknown'
UNKNOWN_PLAYER_NAME = INGAME_GUI.PLAYERS_PANEL_UNKNOWN_NAME
UNKNOWN_VEHICLE_LEVEL = -1
UNKNOWN_VEHICLE_CLASS_ORDER = 100
SQUAD_RANGE_TO_SHOW = xrange(2, 4)
class ARENA_LISTENER_SCOPE(object):
LOAD = 1
VEHICLES = 2
TEAMS_BASES = 4
PERIOD = 8
INVITATIONS = 16
POSITIONS = 32
CONTACTS = 64
VIEW_POINTS = 128
class VEHICLE_STATUS(BitmaskHelper):
DEFAULT = 0
IS_ALIVE = 1
IS_READY = 2
NOT_AVAILABLE = 4
STOP_RESPAWN = 8
class PLAYER_STATUS(BitmaskHelper):
DEFAULT = 0
IS_TEAM_KILLER = 1
IS_SQUAD_MAN = 2
IS_SQUAD_PERSONAL = 4
IS_PLAYER_SELECTED = 8
IS_VOIP_DISABLED = 16
IS_ACTION_DISABLED = 32
class INVITATION_DELIVERY_STATUS(BitmaskHelper):
NONE = 0
FORBIDDEN_BY_RECEIVER = 1
FORBIDDEN_BY_SENDER = 2
RECEIVED_FROM = 4
RECEIVED_INACTIVE = 8
SENT_TO = 16
SENT_INACTIVE = 32
class PERSONAL_STATUS(BitmaskHelper):
DEFAULT = 0
CAN_SEND_INVITE_TO_ALLY = 1
CAN_SEND_INVITE_TO_ENEMY = 2
SQUAD_RESTRICTIONS = 4
IS_VEHICLE_LEVEL_SHOWN = 8
IS_VEHICLE_COUNTER_SHOWN = 16
IS_COLOR_BLIND = 32
SHOW_ALLY_INVITES = 64
SHOW_ENEMY_INVITES = 128
class INVALIDATE_OP(BitmaskHelper):
NONE = 0
SORTING = 1
VEHICLE_STATUS = 2
VEHICLE_INFO = 4
VEHICLE_STATS = 8
VEHICLE_ISTATS = 16
PLAYER_STATUS = 32
PREBATTLE_CHANGED = 64
INVITATION_DELIVERY_STATUS = 128
class VehicleSpottedStatus(BitmaskHelper):
DEFAULT = 0
SPOTTED = 1
UNSPOTTED = 2
def makeVehicleIconName(vName):
return vName.replace(':', '-')
def makeContourIconSFPath(vName):
return CONTOUR_ICON_SF_PATH.format(makeVehicleIconName(vName))
def makeContourIconResPath(vName):
return CONTOUR_ICON_RES_PATH.format(makeVehicleIconName(vName))
def getOrderByVehicleClass(className=None):
if className and className in VEHICLE_BATTLE_TYPES_ORDER_INDICES:
result = VEHICLE_BATTLE_TYPES_ORDER_INDICES[className]
else:
result = UNKNOWN_VEHICLE_CLASS_ORDER
return result
|
0b7234d907ef8db9cdab6c41bb7e2f513efcaf32
|
2c9672851f9b482dc77447f3647cd27606d48251
|
/cartridge/shop/fields.py
|
d2d05fed78000e8b9ce3ee2f087f285560a5e176
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
stephenmcd/cartridge
|
1a07d95f35cf72b7583a6cb6acc9f46e6b48bda6
|
065c9b71ec67141040c424ab3c26a17410581a43
|
refs/heads/master
| 2023-05-29T17:41:06.685633
| 2022-09-19T17:03:59
| 2022-09-19T17:03:59
| 854,078
| 477
| 274
|
BSD-2-Clause
| 2022-11-15T15:43:13
| 2010-08-22T00:36:43
|
Python
|
UTF-8
|
Python
| false
| false
| 2,118
|
py
|
fields.py
|
"""
Various model fields that mostly provide default field sizes to ensure
these are consistent when used across multiple models.
"""
from locale import localeconv
from django.db.models import CharField, DecimalField
from django.utils.translation import gettext_lazy as _
from cartridge.shop.utils import set_locale
class OptionField(CharField):
"""
A field for a selectable option of a product such as colour or
size. Ensure ``null`` is ``True`` and provide a default field size.
"""
def __init__(self, *args, **kwargs):
kwargs["null"] = True
defaults = {"max_length": 50}
defaults.update(kwargs)
super().__init__(*args, **defaults)
class PercentageField(DecimalField):
"""
A field for representing a percentage. Sets restrictions on admin
form fields to ensure it is between 0-100.
"""
def formfield(self, *args, **kwargs):
defaults = {"min_value": 0, "max_value": 100}
kwargs.update(**defaults)
return super().formfield(*args, **kwargs)
class MoneyField(DecimalField):
"""
A field for a monetary amount. Provide the default size and
precision.
"""
def __init__(self, *args, **kwargs):
set_locale()
defaults = {
"null": True,
"blank": True,
"max_digits": 10,
"decimal_places": localeconv()["frac_digits"],
}
defaults.update(kwargs)
super().__init__(*args, **defaults)
class SKUField(CharField):
"""
A field for a product SKU. Provide the name and default field size.
"""
def __init__(self, *args, **kwargs):
if not args and "verbose_name" not in kwargs:
args = (_("SKU"),)
defaults = {"max_length": 20}
defaults.update(kwargs)
super().__init__(*args, **defaults)
class DiscountCodeField(CharField):
"""
A field for Discount Codes. Provide the default field size.
"""
def __init__(self, *args, **kwargs):
defaults = {"max_length": 20}
defaults.update(kwargs)
super().__init__(*args, **defaults)
|
fb141807d3ce870311c6c664d82594af5f7c561c
|
e75c5412063078c9ea3e7c71a8dc7a2026083a34
|
/astropy/utils/xml/tests/test_iterparse.py
|
2156f8ceb04cb66af45d9c850f88e0e86daece1e
|
[
"BSD-3-Clause"
] |
permissive
|
astropy/astropy
|
d6636f24acdf2b18fc3e413ca0c4b1162a63dd41
|
53188c39a23c33b72df5850ec59e31886f84e29d
|
refs/heads/main
| 2023-08-27T18:16:44.061375
| 2023-08-27T16:07:35
| 2023-08-27T16:07:35
| 2,081,289
| 3,922
| 1,935
|
BSD-3-Clause
| 2023-09-14T09:23:26
| 2011-07-21T01:33:49
|
Python
|
UTF-8
|
Python
| false
| false
| 4,758
|
py
|
test_iterparse.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# LOCAL
# SYSTEM
import io
import zlib
from astropy.utils.xml.iterparser import _fast_iterparse
# The C-based XML parser for VOTables previously used fixed-sized
# buffers (allocated at __init__() time). This test will
# only pass with the patch that allows a dynamic realloc() of
# the queue. This addresses the bugs:
#
# - "RuntimeError: XML queue overflow"
# https://github.com/astropy/astropy/issues/5824
# (Kudos to Stefan Becker---ARI/ZAH Heidelberg)
#
# - "iterparse.c: add queue_realloc() + move 'buffersize / 2' logic there"
# https://github.com/astropy/astropy/issues/5869
#
# This test code can emulate a combination of network buffering and
# gzip decompression---with different request sizes, it can be used to
# demonstrate both under-reading and over-reading.
#
# Using the 512-tag VOTABLE XML sample input, and various combinations
# of minimum/maximum fetch sizes, the following situations can be
# generated:
#
# maximum_fetch = 1 (ValueError, no element found) still within gzip headers
# maximum_fetch = 80 (ValueError, unclosed token) short read
# maximum_fetch =217 passes, because decompressed_length > requested
# && <512 tags in a single parse
# maximum_fetch =218 (RuntimeError, XML queue overflow)
#
# The test provided here covers the over-reading identified in #5824
# (equivalent to the 217).
# Firstly, assemble a minimal VOTABLE header, table contents and footer.
# This is done in textual form, as the aim is to only test the parser, not
# the outputter!
HEADER = """<?xml version="1.0" encoding="UTF-8"?>
<VOTABLE>
<RESOURCE type="results">
<TABLE>
<FIELD ID="foo" name="foo" datatype="int" arraysize="1"/>
<DATA>
<TABLEDATA>
"""
ROW = """<TR><TD>0</TD></TR>
"""
FOOTER = """
</TABLEDATA>
</DATA>
</TABLE>
</RESOURCE>
</VOTABLE>
"""
# minimum passable buffer size => 1024
# 1024 / 2 => 512 tags for overflow
# 512 - 7 tags in header, - 5 tags in footer = 500 tags required for overflow
# 500 / 4 tags (<tr><td></td></tr>) per row == 125 rows required for overflow
VOTABLE_XML = HEADER + 125 * ROW + FOOTER
# UngzipFileWrapper() wraps an existing file-like Object,
# decompressing the content and returning the plaintext.
# This therefore emulates the behavior of the Python 'requests'
# library when transparently decompressing Gzip HTTP responses.
#
# The critical behavior is that---because of the
# decompression---read() can return considerably more
# bytes than were requested! (But, read() can also return less).
#
# inspiration:
# http://stackoverflow.com/questions/4013843/how-to-wrap-file-object-read-and-write-operation-which-are-readonly
class UngzipFileWrapper:
def __init__(self, fd, **kwargs):
self._file = fd
self._z = zlib.decompressobj(16 + zlib.MAX_WBITS)
def read(self, requested_length):
# emulate network buffering dynamics by clamping the read size
clamped_length = max(1, min(1 << 24, requested_length))
compressed = self._file.read(clamped_length)
plaintext = self._z.decompress(compressed)
# Only for real local files---just for the testcase
if len(compressed) == 0:
self.close()
return plaintext
def __getattr__(self, attr):
return getattr(self._file, attr)
# test_iterparser_over_read_simple() is a very cut down test,
# of the original more flexible test-case, but without external
# dependencies. The plaintext is compressed and then decompressed
# to provide a better emulation of the original situation where
# the bug was observed.
#
# If a dependency upon 'zlib' is not desired, it would be possible to
# simplify this testcase by replacing the compress/decompress with a
# read() method emulation that always returned more from a buffer
# that was requested.
def test_iterparser_over_read_simple():
# Take the plaintext of 512 tags, and compression it with a
# Gzip-style header (+16), to most closely emulate the behavior
# of most HTTP servers.
zlib_GZIP_STYLE_HEADER = 16
compo = zlib.compressobj(
zlib.Z_BEST_COMPRESSION, zlib.DEFLATED, zlib.MAX_WBITS + zlib_GZIP_STYLE_HEADER
)
# Bytes vs. String .encode()/.decode() for compatibility with Python 3.5.
s = compo.compress(VOTABLE_XML.encode())
s = s + compo.flush()
fd = io.BytesIO(s)
fd.seek(0)
# Finally setup the test of the C-based '_fast_iterparse()' iterator
# and a situation in which it can be called a-la the VOTable Parser.
MINIMUM_REQUESTABLE_BUFFER_SIZE = 1024
uncompressed_fd = UngzipFileWrapper(fd)
iterable = _fast_iterparse(uncompressed_fd.read, MINIMUM_REQUESTABLE_BUFFER_SIZE)
list(iterable)
|
04e5c92d135fb9f2683ba9ba5ece90cd5c1a6262
|
dafbc7e802ae7e84c6677e5f4de87c3c50d11307
|
/net/train.py
|
de4b03cca0481f583b97b5529df3fd2ac94b8136
|
[
"MIT"
] |
permissive
|
HelloRicky123/Siamese-RPN
|
bf5b7e983c96c487c73a135752013d6b3fee4a71
|
08625aa8828aea5d63cb8b301c7ec4e300cb047b
|
refs/heads/master
| 2021-09-24T02:14:20.440448
| 2021-09-09T05:28:36
| 2021-09-09T05:28:36
| 164,394,742
| 258
| 59
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,019
|
py
|
train.py
|
import torch
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as transforms
import torchvision
import numpy as np
import pandas as pd
import os
import cv2
import pickle
import lmdb
import torch.nn as nn
import time
from torch.autograd import Variable
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader
from glob import glob
from tqdm import tqdm
from sklearn.model_selection import train_test_split
from tensorboardX import SummaryWriter
from collections import OrderedDict
from .config import config
from .network import SiameseAlexNet
from .dataset import ImagnetVIDDataset
from lib.custom_transforms import Normalize, ToTensor, RandomStretch, \
RandomCrop, CenterCrop, RandomBlur, ColorAug
from lib.loss import rpn_smoothL1, rpn_cross_entropy_balance
from lib.visual import visual
from lib.utils import get_topk_box, add_box_img, compute_iou, box_transform_inv, adjust_learning_rate
from IPython import embed
torch.manual_seed(config.seed)
def train(data_dir, model_path=None, vis_port=None, init=None):
# loading meta data
# -----------------------------------------------------------------------------------------------------
meta_data_path = os.path.join(data_dir, "meta_data.pkl")
meta_data = pickle.load(open(meta_data_path, 'rb'))
all_videos = [x[0] for x in meta_data]
# split train/valid dataset
# -----------------------------------------------------------------------------------------------------
train_videos, valid_videos = train_test_split(all_videos,
test_size=1 - config.train_ratio, random_state=config.seed)
# define transforms
train_z_transforms = transforms.Compose([
ToTensor()
])
train_x_transforms = transforms.Compose([
ToTensor()
])
valid_z_transforms = transforms.Compose([
ToTensor()
])
valid_x_transforms = transforms.Compose([
ToTensor()
])
# open lmdb
db = lmdb.open(data_dir + '.lmdb', readonly=True, map_size=int(200e9))
# create dataset
# -----------------------------------------------------------------------------------------------------
train_dataset = ImagnetVIDDataset(db, train_videos, data_dir, train_z_transforms, train_x_transforms)
anchors = train_dataset.anchors
# dic_num = {}
# ind_random = list(range(len(train_dataset)))
# import random
# random.shuffle(ind_random)
# for i in tqdm(ind_random):
# exemplar_img, instance_img, regression_target, conf_target = train_dataset[i+1000]
valid_dataset = ImagnetVIDDataset(db, valid_videos, data_dir, valid_z_transforms, valid_x_transforms,
training=False)
# create dataloader
trainloader = DataLoader(train_dataset, batch_size=config.train_batch_size * torch.cuda.device_count(),
shuffle=True, pin_memory=True,
num_workers=config.train_num_workers * torch.cuda.device_count(), drop_last=True)
validloader = DataLoader(valid_dataset, batch_size=config.valid_batch_size * torch.cuda.device_count(),
shuffle=False, pin_memory=True,
num_workers=config.valid_num_workers * torch.cuda.device_count(), drop_last=True)
# create summary writer
if not os.path.exists(config.log_dir):
os.mkdir(config.log_dir)
summary_writer = SummaryWriter(config.log_dir)
if vis_port:
vis = visual(port=vis_port)
# start training
# -----------------------------------------------------------------------------------------------------
model = SiameseAlexNet()
model = model.cuda()
optimizer = torch.optim.SGD(model.parameters(), lr=config.lr,
momentum=config.momentum, weight_decay=config.weight_decay)
# load model weight
# -----------------------------------------------------------------------------------------------------
start_epoch = 1
if model_path and init:
print("init training with checkpoint %s" % model_path + '\n')
print('------------------------------------------------------------------------------------------------ \n')
checkpoint = torch.load(model_path)
if 'model' in checkpoint.keys():
model.load_state_dict(checkpoint['model'])
else:
model_dict = model.state_dict()
model_dict.update(checkpoint)
model.load_state_dict(model_dict)
del checkpoint
torch.cuda.empty_cache()
print("inited checkpoint")
elif model_path and not init:
print("loading checkpoint %s" % model_path + '\n')
print('------------------------------------------------------------------------------------------------ \n')
checkpoint = torch.load(model_path)
start_epoch = checkpoint['epoch'] + 1
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
del checkpoint
torch.cuda.empty_cache()
print("loaded checkpoint")
elif not model_path and config.pretrained_model:
print("init with pretrained checkpoint %s" % config.pretrained_model + '\n')
print('------------------------------------------------------------------------------------------------ \n')
checkpoint = torch.load(config.pretrained_model)
# change name and load parameters
checkpoint = {k.replace('features.features', 'featureExtract'): v for k, v in checkpoint.items()}
model_dict = model.state_dict()
model_dict.update(checkpoint)
model.load_state_dict(model_dict)
# freeze layers
def freeze_layers(model):
print('------------------------------------------------------------------------------------------------')
for layer in model.featureExtract[:10]:
if isinstance(layer, nn.BatchNorm2d):
layer.eval()
for k, v in layer.named_parameters():
v.requires_grad = False
elif isinstance(layer, nn.Conv2d):
for k, v in layer.named_parameters():
v.requires_grad = False
elif isinstance(layer, nn.MaxPool2d):
continue
elif isinstance(layer, nn.ReLU):
continue
else:
raise KeyError('error in fixing former 3 layers')
print("fixed layers:")
print(model.featureExtract[:10])
if torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
for epoch in range(start_epoch, config.epoch + 1):
train_loss = []
model.train()
if config.fix_former_3_layers:
if torch.cuda.device_count() > 1:
freeze_layers(model.module)
else:
freeze_layers(model)
loss_temp_cls = 0
loss_temp_reg = 0
for i, data in enumerate(tqdm(trainloader)):
exemplar_imgs, instance_imgs, regression_target, conf_target = data
# conf_target (8,1125) (8,225x5)
regression_target, conf_target = regression_target.cuda(), conf_target.cuda()
pred_score, pred_regression = model(exemplar_imgs.cuda(), instance_imgs.cuda())
pred_conf = pred_score.reshape(-1, 2, config.anchor_num * config.score_size * config.score_size).permute(0,
2,
1)
pred_offset = pred_regression.reshape(-1, 4,
config.anchor_num * config.score_size * config.score_size).permute(0,
2,
1)
cls_loss = rpn_cross_entropy_balance(pred_conf, conf_target, config.num_pos, config.num_neg, anchors,
ohem_pos=config.ohem_pos, ohem_neg=config.ohem_neg)
reg_loss = rpn_smoothL1(pred_offset, regression_target, conf_target, config.num_pos, ohem=config.ohem_reg)
loss = cls_loss + config.lamb * reg_loss
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), config.clip)
optimizer.step()
step = (epoch - 1) * len(trainloader) + i
summary_writer.add_scalar('train/cls_loss', cls_loss.data, step)
summary_writer.add_scalar('train/reg_loss', reg_loss.data, step)
train_loss.append(loss.detach().cpu())
loss_temp_cls += cls_loss.detach().cpu().numpy()
loss_temp_reg += reg_loss.detach().cpu().numpy()
# if vis_port:
# vis.plot_error({'rpn_cls_loss': cls_loss.detach().cpu().numpy().ravel()[0],
# 'rpn_regress_loss': reg_loss.detach().cpu().numpy().ravel()[0]}, win=0)
if (i + 1) % config.show_interval == 0:
tqdm.write("[epoch %2d][iter %4d] cls_loss: %.4f, reg_loss: %.4f lr: %.2e"
% (epoch, i, loss_temp_cls / config.show_interval, loss_temp_reg / config.show_interval,
optimizer.param_groups[0]['lr']))
loss_temp_cls = 0
loss_temp_reg = 0
if vis_port:
anchors_show = train_dataset.anchors
exem_img = exemplar_imgs[0].cpu().numpy().transpose(1, 2, 0)
inst_img = instance_imgs[0].cpu().numpy().transpose(1, 2, 0)
# show detected box with max score
topk = config.show_topK
vis.plot_img(exem_img.transpose(2, 0, 1), win=1, name='exemple')
cls_pred = conf_target[0]
gt_box = get_topk_box(cls_pred, regression_target[0], anchors_show)[0]
# show gt_box
img_box = add_box_img(inst_img, gt_box, color=(255, 0, 0))
vis.plot_img(img_box.transpose(2, 0, 1), win=2, name='instance')
# show anchor with max score
cls_pred = F.softmax(pred_conf, dim=2)[0, :, 1]
scores, index = torch.topk(cls_pred, k=topk)
img_box = add_box_img(inst_img, anchors_show[index.cpu()])
img_box = add_box_img(img_box, gt_box, color=(255, 0, 0))
vis.plot_img(img_box.transpose(2, 0, 1), win=3, name='anchor_max_score')
cls_pred = F.softmax(pred_conf, dim=2)[0, :, 1]
topk_box = get_topk_box(cls_pred, pred_offset[0], anchors_show, topk=topk)
img_box = add_box_img(inst_img, topk_box)
img_box = add_box_img(img_box, gt_box, color=(255, 0, 0))
vis.plot_img(img_box.transpose(2, 0, 1), win=4, name='box_max_score')
# show anchor and detected box with max iou
iou = compute_iou(anchors_show, gt_box).flatten()
index = np.argsort(iou)[-topk:]
img_box = add_box_img(inst_img, anchors_show[index])
img_box = add_box_img(img_box, gt_box, color=(255, 0, 0))
vis.plot_img(img_box.transpose(2, 0, 1), win=5, name='anchor_max_iou')
# detected box
regress_offset = pred_offset[0].cpu().detach().numpy()
topk_offset = regress_offset[index, :]
anchors_det = anchors_show[index, :]
pred_box = box_transform_inv(anchors_det, topk_offset)
img_box = add_box_img(inst_img, pred_box)
img_box = add_box_img(img_box, gt_box, color=(255, 0, 0))
vis.plot_img(img_box.transpose(2, 0, 1), win=6, name='box_max_iou')
train_loss = np.mean(train_loss)
valid_loss = []
model.eval()
for i, data in enumerate(tqdm(validloader)):
exemplar_imgs, instance_imgs, regression_target, conf_target = data
regression_target, conf_target = regression_target.cuda(), conf_target.cuda()
pred_score, pred_regression = model(exemplar_imgs.cuda(), instance_imgs.cuda())
pred_conf = pred_score.reshape(-1, 2, config.anchor_num * config.score_size * config.score_size).permute(0,
2,
1)
pred_offset = pred_regression.reshape(-1, 4,
config.anchor_num * config.score_size * config.score_size).permute(0,
2,
1)
cls_loss = rpn_cross_entropy_balance(pred_conf, conf_target, config.num_pos, config.num_neg, anchors,
ohem_pos=config.ohem_pos, ohem_neg=config.ohem_neg)
reg_loss = rpn_smoothL1(pred_offset, regression_target, conf_target, config.num_pos, ohem=config.ohem_reg)
loss = cls_loss + config.lamb * reg_loss
valid_loss.append(loss.detach().cpu())
valid_loss = np.mean(valid_loss)
print("EPOCH %d valid_loss: %.4f, train_loss: %.4f" % (epoch, valid_loss, train_loss))
summary_writer.add_scalar('valid/loss',
valid_loss, (epoch + 1) * len(trainloader))
adjust_learning_rate(optimizer,
config.gamma) # adjust before save, and it will be epoch+1's lr when next load
if epoch % config.save_interval == 0:
if not os.path.exists('./data/models/'):
os.makedirs("./data/models/")
save_name = "./data/models/siamrpn_{}.pth".format(epoch)
new_state_dict = model.state_dict()
if torch.cuda.device_count() > 1:
new_state_dict = OrderedDict()
for k, v in model.state_dict().items():
namekey = k[7:] # remove `module.`
new_state_dict[namekey] = v
torch.save({
'epoch': epoch,
'model': new_state_dict,
'optimizer': optimizer.state_dict(),
}, save_name)
print('save model: {}'.format(save_name))
|
b6319b536e1f9f97f9a69eb6b5049c7dfdac62e6
|
e70ce96317b9f197aed0f84e5d7e500dc08f7d30
|
/util/de_transform.py
|
29abcbcd405ebca5c33fac802cbd72226478f0e0
|
[
"MIT"
] |
permissive
|
hkchengrex/CascadePSP
|
c11a3e088b3b1a78ffb7bbaa6ab1c5ad870b8c85
|
83cc3b8783b595b2e47c75016f93654eaddb7412
|
refs/heads/master
| 2022-10-06T04:48:50.696529
| 2022-08-19T16:37:53
| 2022-08-19T16:37:53
| 248,813,772
| 760
| 102
|
MIT
| 2021-04-17T12:50:59
| 2020-03-20T17:25:15
|
Python
|
UTF-8
|
Python
| false
| false
| 2,173
|
py
|
de_transform.py
|
import cv2
import numpy as np
def get_random_structure(size):
# The provided model is trained with
# choice = np.random.randint(4)
# instead, which is a bug that we fixed here
choice = np.random.randint(1, 5)
if choice == 1:
return cv2.getStructuringElement(cv2.MORPH_RECT, (size, size))
elif choice == 2:
return cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (size, size))
elif choice == 3:
return cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (size, size//2))
elif choice == 4:
return cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (size//2, size))
def random_dilate(seg, min=3, max=10):
size = np.random.randint(min, max)
kernel = get_random_structure(size)
seg = cv2.dilate(seg,kernel,iterations = 1)
return seg
def random_erode(seg, min=3, max=10):
size = np.random.randint(min, max)
kernel = get_random_structure(size)
seg = cv2.erode(seg,kernel,iterations = 1)
return seg
def compute_iou(seg, gt):
intersection = seg*gt
union = seg+gt
return (np.count_nonzero(intersection) + 1e-6) / (np.count_nonzero(union) + 1e-6)
def perturb_seg(gt, iou_target=0.6):
h, w = gt.shape
seg = gt.copy()
_, seg = cv2.threshold(seg, 127, 255, 0)
# Rare case
if h <= 2 or w <= 2:
print('GT too small, returning original')
return seg
# Do a bunch of random operations
for _ in range(250):
for _ in range(4):
lx, ly = np.random.randint(w), np.random.randint(h)
lw, lh = np.random.randint(lx+1,w+1), np.random.randint(ly+1,h+1)
# Randomly set one pixel to 1/0. With the following dilate/erode, we can create holes/external regions
if np.random.rand() < 0.25:
cx = int((lx + lw) / 2)
cy = int((ly + lh) / 2)
seg[cy, cx] = np.random.randint(2) * 255
if np.random.rand() < 0.5:
seg[ly:lh, lx:lw] = random_dilate(seg[ly:lh, lx:lw])
else:
seg[ly:lh, lx:lw] = random_erode(seg[ly:lh, lx:lw])
if compute_iou(seg, gt) < iou_target:
break
return seg
|
6b00a7d921355a1cc8a5890397046c4ad176cfea
|
5f1881006aaf4f3c2515f375ad29c15fd6612de2
|
/lemon/executor/strongsup/tables/predicate.py
|
716e59efc3309e33d0cb11fb84bb7d106a3531da
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.1-or-later",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
microsoft/ContextualSP
|
4edb598d40f683f9a1143b92a9d24e1066d51ec4
|
4198ebce942f4afe7ddca6a96ab6f4464ade4518
|
refs/heads/master
| 2023-08-02T22:08:40.503853
| 2023-07-14T07:22:50
| 2023-07-14T07:22:50
| 255,534,819
| 332
| 70
|
MIT
| 2023-07-25T19:23:48
| 2020-04-14T07:01:54
|
Python
|
UTF-8
|
Python
| false
| false
| 3,124
|
py
|
predicate.py
|
from strongsup.predicate import Predicate
from strongsup.tables.executor import is_unary, is_binary, ALL_BUILT_INS
from strongsup.tables.graph import ALL_GRAPH_BUILT_INS
from strongsup.utils import EOU
class WikiTablePredicate(Predicate):
def __init__(self, name, original_string=None):
types = self._compute_types(name)
super(WikiTablePredicate, self).__init__(name, original_string, types=types)
def _compute_types(self, name):
"""Get the types (and a few features) of a predicate.
Args:
name (unicode): name of the predicate
Return:
tuple[string]
"""
types = []
if is_unary(name):
types.append(WikiTablePredicateType.UNARY)
if is_binary(name):
types.append(WikiTablePredicateType.BINARY)
if name in FIXED_PREDICATE_NAMES:
types.append(WikiTablePredicateType.BUILTIN)
if name.startswith('fb:cell.') and not name.startswith('fb:cell.cell.'):
types.append(WikiTablePredicateType.CELL)
elif name.startswith('fb:part.'):
types.append(WikiTablePredicateType.PART)
elif name.startswith('fb:row.row.'):
types.append(WikiTablePredicateType.COLUMN)
elif name.startswith('!fb:row.row.'):
types.append(WikiTablePredicateType.RCOLUMN)
elif name.startswith('N'):
types.append(WikiTablePredicateType.NUMBER)
elif name.startswith('D'):
types.append(WikiTablePredicateType.DATE)
return tuple(types)
@property
def types_vector(self):
"""Return the types as a k-hot vector.
Returns:
list[boolean]
"""
return [x in self.types for x in WikiTablePredicateType.ALL_TYPES]
@property
def words(self):
"""Get the words from the ID.
Returns:
list[unicode]
"""
return self.name.split('.')[-1].split('_')
@property
def delexicalized_name(self):
"""A placeholder used in a delexicalized utterance.
Returns:
unicode
"""
if WikiTablePredicateType.COLUMN in self.types:
return 'COL'
if WikiTablePredicateType.CELL in self.types:
return 'ENT'
return None
class WikiTablePredicateType(object):
UNARY = 'unary'
BINARY = 'binary'
BUILTIN = 'builtin'
CELL = 'cell'
PART = 'part'
COLUMN = 'column'
RCOLUMN = '!column'
NUMBER = 'number'
DATE = 'date'
ALL_TYPES = (UNARY, BINARY, BUILTIN, CELL, PART, COLUMN, RCOLUMN, NUMBER, DATE)
@classmethod
def is_relation(cls, pred):
return (WikiTablePredicateType.BINARY in pred.types) and not cls.is_builtin(pred)
@classmethod
def is_entity(cls, pred):
return WikiTablePredicateType.UNARY in pred.types
@classmethod
def is_builtin(cls, pred):
return WikiTablePredicateType.BUILTIN in pred.types
FIXED_PREDICATE_NAMES = (EOU,) + ALL_BUILT_INS + ALL_GRAPH_BUILT_INS
FIXED_PREDICATES = [WikiTablePredicate(name) for name in FIXED_PREDICATE_NAMES]
|
cd1d3b55b089b3eedabeb77d8654bcec3cec26fe
|
753cd066a9bd26b6c37c8d53a86c7a9c659ec18c
|
/tutorials/utils/lint/linters/url_linter.py
|
806169f2d3ee96c5e11dd0c1ba3f4ee2b67cf470
|
[
"MIT"
] |
permissive
|
graphcore/examples
|
ac872015808ed2a913d4d7bf0d63202ce15ebbae
|
e2f834dd60e7939672c1795b4ac62e89ad0bca49
|
refs/heads/master
| 2023-08-05T02:08:12.341836
| 2023-07-27T11:13:10
| 2023-07-27T11:13:10
| 143,977,106
| 311
| 80
|
MIT
| 2023-09-11T16:42:56
| 2018-08-08T07:29:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,134
|
py
|
url_linter.py
|
#!/usr/bin/env python3
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
import argparse
from typing import Optional
from typing import Sequence
import contextlib
import io
from ...tutorials_tests.urls_test import test_urls
def apply_lint_function() -> int:
"""Lint function to be called by pre-commit. Simply calls the URL tests
does nothing itself.
Returns:
int: If there is no modification to the source file the function returns 0,
else it will rewrite the file and return 1
"""
try:
f = io.StringIO()
with contextlib.redirect_stdout(f), contextlib.redirect_stderr(f):
test_urls.test_all_internal_links()
test_urls.test_links_are_pegged()
return 0
except AssertionError as err:
print("Link test failed on:")
print(str(err))
return 1
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument("filenames", nargs="*")
args = parser.parse_args(argv)
return apply_lint_function()
if __name__ == "__main__":
raise SystemExit(main())
|
339db2786011045980261f561a03f7374e9d8423
|
704976ea552111c6a5af9cd7cb62b9d9abaf3996
|
/rpython/jit/metainterp/opencoder.py
|
79827d94eb7451f16de5a5166a1aef60c6a17151
|
[
"BSD-3-Clause"
] |
permissive
|
mesalock-linux/mesapy
|
4f02c5819ce7f2f6e249d34840f1aa097577645d
|
ed546d59a21b36feb93e2309d5c6b75aa0ad95c9
|
refs/heads/mesapy2.7
| 2023-08-16T21:33:02.239581
| 2019-08-13T10:29:43
| 2019-08-13T18:06:45
| 136,080,721
| 396
| 33
|
NOASSERTION
| 2020-04-01T03:05:18
| 2018-06-04T20:45:17
|
Python
|
UTF-8
|
Python
| false
| false
| 18,288
|
py
|
opencoder.py
|
""" Storage format:
for each operation (inputargs numbered with negative numbers)
<opnum> [size-if-unknown-arity] [<arg0> <arg1> ...] [descr-or-snapshot-index]
Snapshot index for guards points to snapshot stored in _snapshots of trace
"""
from rpython.jit.metainterp.history import ConstInt, Const, ConstFloat, ConstPtr
from rpython.jit.metainterp.resoperation import AbstractResOp, AbstractInputArg,\
ResOperation, oparity, rop, opwithdescr, GuardResOp, IntOp, FloatOp, RefOp,\
opclasses
from rpython.rlib.rarithmetic import intmask, r_uint
from rpython.rlib.objectmodel import we_are_translated, specialize
from rpython.rtyper.lltypesystem import rffi, lltype, llmemory
from rpython.jit.metainterp.typesystem import llhelper
TAGINT, TAGCONSTPTR, TAGCONSTOTHER, TAGBOX = range(4)
TAGMASK = 0x3
TAGSHIFT = 2
class Model:
STORAGE_TP = rffi.USHORT
# this is the initial size of the trace - note that we probably
# want something that would fit the inital "max_trace_length"
INIT_SIZE = 30000
MIN_VALUE = 0
MAX_VALUE = 2**16 - 1
class BigModel:
INIT_SIZE = 30000
STORAGE_TP = rffi.UINT
MIN_VALUE = 0
MAX_VALUE = int(2**31 - 1) # we could go to 2**32-1 on 64-bit, but
# that seems already far too huge
def get_model(self):
return _get_model(self.metainterp_sd)
@specialize.memo()
def _get_model(metainterp_sd):
return getattr(metainterp_sd, 'opencoder_model', Model)
SMALL_INT_STOP = (2 ** (15 - TAGSHIFT)) - 1
SMALL_INT_START = -SMALL_INT_STOP # we might want to distribute them uneven
def expand_sizes_to_signed():
""" This function will make sure we can use sizes all the
way up to lltype.Signed for indexes everywhere
"""
class BaseTrace(object):
pass
class SnapshotIterator(object):
def __init__(self, main_iter, snapshot):
self.main_iter = main_iter
# reverse the snapshots and store the vable, vref lists
assert isinstance(snapshot, TopSnapshot)
self.vable_array = snapshot.vable_array
self.vref_array = snapshot.vref_array
self.size = len(self.vable_array) + len(self.vref_array) + 3
jc_index, pc = unpack_uint(snapshot.packed_jitcode_pc)
self.framestack = []
if jc_index == 2**16-1:
return
while snapshot:
self.framestack.append(snapshot)
self.size += len(snapshot.box_array) + 2
snapshot = snapshot.prev
self.framestack.reverse()
def get(self, index):
return self.main_iter._untag(index)
def unpack_jitcode_pc(self, snapshot):
return unpack_uint(snapshot.packed_jitcode_pc)
def unpack_array(self, arr):
# NOT_RPYTHON
return [self.get(i) for i in arr]
def _update_liverange(item, index, liveranges):
tag, v = untag(item)
if tag == TAGBOX:
liveranges[v] = index
def update_liveranges(snapshot, index, liveranges):
assert isinstance(snapshot, TopSnapshot)
for item in snapshot.vable_array:
_update_liverange(item, index, liveranges)
for item in snapshot.vref_array:
_update_liverange(item, index, liveranges)
while snapshot:
for item in snapshot.box_array:
_update_liverange(item, index, liveranges)
snapshot = snapshot.prev
class TraceIterator(BaseTrace):
def __init__(self, trace, start, end, force_inputargs=None,
metainterp_sd=None):
self.trace = trace
self.metainterp_sd = metainterp_sd
self.all_descr_len = len(metainterp_sd.all_descrs)
self._cache = [None] * trace._index
if force_inputargs is not None:
# the trace here is cut and we're working from
# inputargs that are in the middle, shuffle stuff around a bit
self.inputargs = [rop.inputarg_from_tp(arg.type) for
arg in force_inputargs]
for i, arg in enumerate(force_inputargs):
self._cache[arg.get_position()] = self.inputargs[i]
else:
self.inputargs = [rop.inputarg_from_tp(arg.type) for
arg in self.trace.inputargs]
for i, arg in enumerate(self.inputargs):
self._cache[i] = arg
self.start = start
self.pos = start
self._count = start
self._index = start
self.start_index = start
self.end = end
def get_dead_ranges(self):
return self.trace.get_dead_ranges()
def kill_cache_at(self, pos):
if pos:
self._cache[pos] = None
def _get(self, i):
res = self._cache[i]
assert res is not None
return res
def done(self):
return self.pos >= self.end
def _next(self):
if self.done():
raise IndexError
res = rffi.cast(lltype.Signed, self.trace._ops[self.pos])
self.pos += 1
return res
def _untag(self, tagged):
tag, v = untag(tagged)
if tag == TAGBOX:
return self._get(v)
elif tag == TAGINT:
return ConstInt(v + SMALL_INT_START)
elif tag == TAGCONSTPTR:
return ConstPtr(self.trace._refs[v])
elif tag == TAGCONSTOTHER:
if v & 1:
return ConstFloat(self.trace._floats[v >> 1])
else:
return ConstInt(self.trace._bigints[v >> 1])
else:
assert False
def get_snapshot_iter(self, index):
return SnapshotIterator(self, self.trace._snapshots[index])
def next_element_update_live_range(self, index, liveranges):
opnum = self._next()
if oparity[opnum] == -1:
argnum = self._next()
else:
argnum = oparity[opnum]
for i in range(argnum):
tagged = self._next()
tag, v = untag(tagged)
if tag == TAGBOX:
liveranges[v] = index
if opclasses[opnum].type != 'v':
liveranges[index] = index
if opwithdescr[opnum]:
descr_index = self._next()
if rop.is_guard(opnum):
update_liveranges(self.trace._snapshots[descr_index], index,
liveranges)
if opclasses[opnum].type != 'v':
return index + 1
return index
def next(self):
opnum = self._next()
if oparity[opnum] == -1:
argnum = self._next()
else:
argnum = oparity[opnum]
args = []
for i in range(argnum):
args.append(self._untag(self._next()))
descr_index = -1
if opwithdescr[opnum]:
descr_index = self._next()
if descr_index == 0 or rop.is_guard(opnum):
descr = None
else:
if descr_index < self.all_descr_len + 1:
descr = self.metainterp_sd.all_descrs[descr_index - 1]
else:
descr = self.trace._descrs[descr_index - self.all_descr_len - 1]
else:
descr = None
res = ResOperation(opnum, args, descr=descr)
if rop.is_guard(opnum):
assert isinstance(res, GuardResOp)
res.rd_resume_position = descr_index
if res.type != 'v':
self._cache[self._index] = res
self._index += 1
self._count += 1
return res
class CutTrace(BaseTrace):
def __init__(self, trace, start, count, index, inputargs):
self.trace = trace
self.start = start
self.inputargs = inputargs
self.count = count
self.index = index
def cut_at(self, cut):
assert cut[1] > self.count
self.trace.cut_at(cut)
def get_iter(self):
iter = TraceIterator(self.trace, self.start, self.trace._pos,
self.inputargs,
metainterp_sd=self.trace.metainterp_sd)
iter._count = self.count
iter.start_index = self.index
iter._index = self.index
return iter
def combine_uint(index1, index2):
assert 0 <= index1 < 65536
assert 0 <= index2 < 65536
return index1 << 16 | index2 # it's ok to return signed here,
# we need only 32bit, but 64 is ok for now
def unpack_uint(packed):
return (packed >> 16) & 0xffff, packed & 0xffff
class Snapshot(object):
_attrs_ = ('packed_jitcode_pc', 'box_array', 'prev')
prev = None
def __init__(self, packed_jitcode_pc, box_array):
self.packed_jitcode_pc = packed_jitcode_pc
self.box_array = box_array
class TopSnapshot(Snapshot):
def __init__(self, packed_jitcode_pc, box_array, vable_array, vref_array):
Snapshot.__init__(self, packed_jitcode_pc, box_array)
self.vable_array = vable_array
self.vref_array = vref_array
class Trace(BaseTrace):
_deadranges = (-1, None)
def __init__(self, inputargs, metainterp_sd):
self.metainterp_sd = metainterp_sd
self._ops = [rffi.cast(get_model(self).STORAGE_TP, 0)] * get_model(self).INIT_SIZE
self._pos = 0
self._consts_bigint = 0
self._consts_float = 0
self._total_snapshots = 0
self._consts_ptr = 0
self._descrs = [None]
self._refs = [lltype.nullptr(llmemory.GCREF.TO)]
self._refs_dict = llhelper.new_ref_dict_3()
self._bigints = []
self._bigints_dict = {}
self._floats = []
self._snapshots = []
for i, inparg in enumerate(inputargs):
inparg.set_position(i)
self._count = len(inputargs) # total count
self._index = len(inputargs) # "position" of resulting resops
self._start = len(inputargs)
self._pos = self._start
self.inputargs = inputargs
self.tag_overflow = False
def append(self, v):
model = get_model(self)
if self._pos >= len(self._ops):
# grow by 2X
self._ops = self._ops + [rffi.cast(model.STORAGE_TP, 0)] * len(self._ops)
if not model.MIN_VALUE <= v <= model.MAX_VALUE:
v = 0 # broken value, but that's fine, tracing will stop soon
self.tag_overflow = True
self._ops[self._pos] = rffi.cast(model.STORAGE_TP, v)
self._pos += 1
def tracing_done(self):
from rpython.rlib.debug import debug_start, debug_stop, debug_print
assert not self.tag_overflow
self._bigints_dict = {}
self._refs_dict = llhelper.new_ref_dict_3()
debug_start("jit-trace-done")
debug_print("trace length: " + str(self._pos))
debug_print(" total snapshots: " + str(self._total_snapshots))
debug_print(" bigint consts: " + str(self._consts_bigint) + " " + str(len(self._bigints)))
debug_print(" float consts: " + str(self._consts_float) + " " + str(len(self._floats)))
debug_print(" ref consts: " + str(self._consts_ptr) + " " + str(len(self._refs)))
debug_print(" descrs: " + str(len(self._descrs)))
debug_stop("jit-trace-done")
def length(self):
return self._pos
def cut_point(self):
return self._pos, self._count, self._index
def cut_at(self, end):
self._pos = end[0]
self._count = end[1]
self._index = end[2]
def cut_trace_from(self, (start, count, index), inputargs):
return CutTrace(self, start, count, index, inputargs)
def _encode(self, box):
if isinstance(box, Const):
if (isinstance(box, ConstInt) and
isinstance(box.getint(), int) and # symbolics
SMALL_INT_START <= box.getint() < SMALL_INT_STOP):
return tag(TAGINT, box.getint() - SMALL_INT_START)
elif isinstance(box, ConstInt):
self._consts_bigint += 1
if not isinstance(box.getint(), int):
# symbolics, for tests, don't worry about caching
v = len(self._bigints) << 1
self._bigints.append(box.getint())
else:
v = self._bigints_dict.get(box.getint(), -1)
if v == -1:
v = len(self._bigints) << 1
self._bigints_dict[box.getint()] = v
self._bigints.append(box.getint())
return tag(TAGCONSTOTHER, v)
elif isinstance(box, ConstFloat):
# don't intern float constants
self._consts_float += 1
v = (len(self._floats) << 1) | 1
self._floats.append(box.getfloatstorage())
return tag(TAGCONSTOTHER, v)
else:
self._consts_ptr += 1
assert isinstance(box, ConstPtr)
if not box.getref_base():
return tag(TAGCONSTPTR, 0)
addr = box.getref_base()
v = self._refs_dict.get(addr, -1)
if v == -1:
v = len(self._refs)
self._refs_dict[addr] = v
self._refs.append(box.getref_base())
return tag(TAGCONSTPTR, v)
elif isinstance(box, AbstractResOp):
assert box.get_position() >= 0
return tag(TAGBOX, box.get_position())
else:
assert False, "unreachable code"
def record_op(self, opnum, argboxes, descr=None):
pos = self._index
old_pos = self._pos
self.append(opnum)
expected_arity = oparity[opnum]
if expected_arity == -1:
self.append(len(argboxes))
else:
assert len(argboxes) == expected_arity
for box in argboxes:
self.append(self._encode(box))
if opwithdescr[opnum]:
# note that for guards we always store 0 which is later
# patched during capture_resumedata
if descr is None:
self.append(0)
else:
self.append(self._encode_descr(descr))
self._count += 1
if opclasses[opnum].type != 'v':
self._index += 1
if self.tag_overflow:
# potentially a broken op is left behind
# clean it up
self._pos = old_pos
return pos
def _encode_descr(self, descr):
if descr.descr_index != -1:
return descr.descr_index + 1
self._descrs.append(descr)
return len(self._descrs) - 1 + len(self.metainterp_sd.all_descrs) + 1
def _list_of_boxes(self, boxes):
array = [rffi.cast(get_model(self).STORAGE_TP, 0)] * len(boxes)
for i in range(len(boxes)):
array[i] = self._encode_cast(boxes[i])
return array
def new_array(self, lgt):
return [rffi.cast(get_model(self).STORAGE_TP, 0)] * lgt
def _encode_cast(self, i):
return rffi.cast(get_model(self).STORAGE_TP, self._encode(i))
def create_top_snapshot(self, jitcode, pc, frame, flag, vable_boxes, vref_boxes):
self._total_snapshots += 1
array = frame.get_list_of_active_boxes(flag, self.new_array, self._encode_cast)
vable_array = self._list_of_boxes(vable_boxes)
vref_array = self._list_of_boxes(vref_boxes)
s = TopSnapshot(combine_uint(jitcode.index, pc), array, vable_array,
vref_array)
# guards have no descr
self._snapshots.append(s)
if not self.tag_overflow: # otherwise we're broken anyway
assert rffi.cast(lltype.Signed, self._ops[self._pos - 1]) == 0
self._ops[self._pos - 1] = rffi.cast(get_model(self).STORAGE_TP, len(self._snapshots) - 1)
return s
def create_empty_top_snapshot(self, vable_boxes, vref_boxes):
self._total_snapshots += 1
vable_array = self._list_of_boxes(vable_boxes)
vref_array = self._list_of_boxes(vref_boxes)
s = TopSnapshot(combine_uint(2**16 - 1, 0), [], vable_array,
vref_array)
# guards have no descr
self._snapshots.append(s)
if not self.tag_overflow: # otherwise we're broken anyway
assert rffi.cast(lltype.Signed, self._ops[self._pos - 1]) == 0
self._ops[self._pos - 1] = rffi.cast(get_model(self).STORAGE_TP, len(self._snapshots) - 1)
return s
def create_snapshot(self, jitcode, pc, frame, flag):
self._total_snapshots += 1
array = frame.get_list_of_active_boxes(flag, self.new_array, self._encode_cast)
return Snapshot(combine_uint(jitcode.index, pc), array)
def get_iter(self):
return TraceIterator(self, self._start, self._pos,
metainterp_sd=self.metainterp_sd)
def get_live_ranges(self):
t = self.get_iter()
liveranges = [0] * self._index
index = t._count
while not t.done():
index = t.next_element_update_live_range(index, liveranges)
return liveranges
def get_dead_ranges(self):
""" Same as get_live_ranges, but returns a list of "dying" indexes,
such as for each index x, the number found there is for sure dead
before x
"""
def insert(ranges, pos, v):
# XXX skiplist
while ranges[pos]:
pos += 1
if pos == len(ranges):
return
ranges[pos] = v
if self._deadranges != (-1, None):
if self._deadranges[0] == self._count:
return self._deadranges[1]
liveranges = self.get_live_ranges()
deadranges = [0] * (self._index + 2)
assert len(deadranges) == len(liveranges) + 2
for i in range(self._start, len(liveranges)):
elem = liveranges[i]
if elem:
insert(deadranges, elem + 1, i)
self._deadranges = (self._count, deadranges)
return deadranges
def unpack(self):
iter = self.get_iter()
ops = []
try:
while True:
ops.append(iter.next())
except IndexError:
pass
return iter.inputargs, ops
def tag(kind, pos):
return (pos << TAGSHIFT) | kind
@specialize.ll()
def untag(tagged):
return intmask(tagged) & TAGMASK, intmask(tagged) >> TAGSHIFT
|
a36063dfaafe01d63d93a2994cdc06d528a04923
|
c46754b9600a12df4f9d7a6320dfc19aa96b1e1d
|
/src/transformers/models/longformer/configuration_longformer.py
|
1542c497989ff05a351026f7a19f9918c9f28154
|
[
"Apache-2.0"
] |
permissive
|
huggingface/transformers
|
ccd52a0d7c59e5f13205f32fd96f55743ebc8814
|
4fa0aff21ee083d0197a898cdf17ff476fae2ac3
|
refs/heads/main
| 2023-09-05T19:47:38.981127
| 2023-09-05T19:21:33
| 2023-09-05T19:21:33
| 155,220,641
| 102,193
| 22,284
|
Apache-2.0
| 2023-09-14T20:44:49
| 2018-10-29T13:56:00
|
Python
|
UTF-8
|
Python
| false
| false
| 9,564
|
py
|
configuration_longformer.py
|
# coding=utf-8
# Copyright 2020 The Allen Institute for AI team and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Longformer configuration"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
logger = logging.get_logger(__name__)
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class LongformerConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`LongformerModel`] or a [`TFLongformerModel`]. It
is used to instantiate a Longformer model according to the specified arguments, defining the model architecture.
This is the configuration class to store the configuration of a [`LongformerModel`]. It is used to instantiate an
Longformer model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the LongFormer
[allenai/longformer-base-4096](https://huggingface.co/allenai/longformer-base-4096) architecture with a sequence
length 4,096.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the Longformer model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`LongformerModel`] or [`TFLongformerModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`LongformerModel`] or
[`TFLongformerModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
attention_window (`int` or `List[int]`, *optional*, defaults to 512):
Size of an attention window around each token. If an `int`, use the same size for all layers. To specify a
different window size for each layer, use a `List[int]` where `len(attention_window) == num_hidden_layers`.
Example:
```python
>>> from transformers import LongformerConfig, LongformerModel
>>> # Initializing a Longformer configuration
>>> configuration = LongformerConfig()
>>> # Initializing a model from the configuration
>>> model = LongformerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "longformer"
def __init__(
self,
attention_window: Union[List[int], int] = 512,
sep_token_id: int = 2,
pad_token_id: int = 1,
bos_token_id: int = 0,
eos_token_id: int = 2,
vocab_size: int = 30522,
hidden_size: int = 768,
num_hidden_layers: int = 12,
num_attention_heads: int = 12,
intermediate_size: int = 3072,
hidden_act: str = "gelu",
hidden_dropout_prob: float = 0.1,
attention_probs_dropout_prob: float = 0.1,
max_position_embeddings: int = 512,
type_vocab_size: int = 2,
initializer_range: float = 0.02,
layer_norm_eps: float = 1e-12,
onnx_export: bool = False,
**kwargs,
):
"""Constructs LongformerConfig."""
super().__init__(pad_token_id=pad_token_id, **kwargs)
self.attention_window = attention_window
self.sep_token_id = sep_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.onnx_export = onnx_export
class LongformerOnnxConfig(OnnxConfig):
def __init__(self, config: "PretrainedConfig", task: str = "default", patching_specs: "List[PatchingSpec]" = None):
super().__init__(config, task, patching_specs)
config.onnx_export = True
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
else:
dynamic_axis = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
]
)
@property
def outputs(self) -> Mapping[str, Mapping[int, str]]:
outputs = super().outputs
if self.task == "default":
outputs["pooler_output"] = {0: "batch"}
return outputs
@property
def atol_for_validation(self) -> float:
"""
What absolute tolerance value to use during model conversion validation.
Returns:
Float absolute tolerance value.
"""
return 1e-4
@property
def default_onnx_opset(self) -> int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset, 14)
def generate_dummy_inputs(
self,
tokenizer: "PreTrainedTokenizerBase",
batch_size: int = -1,
seq_length: int = -1,
is_pair: bool = False,
framework: Optional[TensorType] = None,
) -> Mapping[str, Any]:
inputs = super().generate_dummy_inputs(
preprocessor=tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
)
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
inputs["global_attention_mask"] = torch.zeros_like(inputs["input_ids"])
# make every second token global
inputs["global_attention_mask"][:, ::2] = 1
return inputs
|
72012275f241f89f13d02499f71e65bb7b77ccac
|
fd9f5186fa5d19db077dbf302fe9c940cb42e82f
|
/tests/admin/test_parts.py
|
b9f5cf1b88b0d1d9e7efeb26211a4dfa6f34aad7
|
[
"Apache-2.0"
] |
permissive
|
vesoft-inc/nebula
|
a0b9af548e124e59ecbfb0c5152098a1020b621c
|
7c32088dec1891870a24aaa37ee5818e69f5ad6d
|
refs/heads/master
| 2023-08-17T00:00:29.022525
| 2023-08-16T04:02:03
| 2023-08-16T04:02:03
| 146,459,443
| 11,007
| 1,220
|
Apache-2.0
| 2023-09-05T05:48:16
| 2018-08-28T14:25:09
|
C++
|
UTF-8
|
Python
| false
| false
| 1,976
|
py
|
test_parts.py
|
# --coding:utf-8--
#
# Copyright (c) 2020 vesoft inc. All rights reserved.
#
# This source code is licensed under Apache 2.0 License.
import time
import re
from tests.common.nebula_test_suite import NebulaTestSuite
leader_pattern = re.compile(r'127.0.0.1:.*|^$')
peers_pattern = re.compile(r'127.0.0.1:.*')
losts_pattern = re.compile(r'')
class TestParts(NebulaTestSuite):
@classmethod
def prepare(self):
resp = self.client.execute('CREATE SPACE space_show_parts(partition_num=1, vid_type=FIXED_STRING(8));'
'USE space_show_parts;')
self.check_resp_succeeded(resp)
# Wait for leader info
time.sleep(self.delay)
@classmethod
def cleanup(self):
resp = self.client.execute('DROP SPACE space_show_parts;')
self.check_resp_succeeded(resp)
def test_part(self):
# All
resp = self.client.execute('SHOW PARTS')
self.check_resp_succeeded(resp)
expected_col_names = ["Partition ID", "Leader", "Peers", "Losts"]
self.check_column_names(resp, expected_col_names)
expected_result = [
[re.compile(r'{}'.format(i)),
leader_pattern,
peers_pattern,
losts_pattern]
for i in range(1, 2)
]
self.check_result(resp, expected_result, is_regex=True)
# Specify the part id
resp = self.client.execute('SHOW PART 1')
self.check_resp_succeeded(resp)
expected_col_names = ["Partition ID", "Leader", "Peers", "Losts"]
self.check_column_names(resp, expected_col_names)
expected_result = [[re.compile(r'1'),
leader_pattern,
peers_pattern,
losts_pattern]]
self.check_result(resp, expected_result, is_regex=True)
# Not exist part id
resp = self.client.execute('SHOW PART 10')
self.check_resp_failed(resp)
|
4182e3918d2137db947b624ae3db16ad57bd2002
|
d141f35262ceb75a4b38c2c4972a6f267e184199
|
/execution_trace/tests/functions/f_conditional_else.py
|
dc267a11309ba103f1dda2a0351611be43d616a1
|
[
"MIT"
] |
permissive
|
mihneadb/python-execution-trace
|
14c46dd2f0a9657937b9f14c96a3d3a5900a7d87
|
c4397a5342e51e7451c4c85b538d047636f391b0
|
refs/heads/master
| 2023-05-10T19:30:09.518141
| 2023-05-03T06:56:42
| 2023-05-03T06:56:42
| 46,447,496
| 198
| 20
|
MIT
| 2023-05-03T06:56:43
| 2015-11-18T21:08:08
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 612
|
py
|
f_conditional_else.py
|
from execution_trace.record import record
@record(10) # 1
def f(): # 2
"""Fn with conditional having else.""" # 3
x = 3 # 4
y = 2 # 5
if x != 3: # 6
y = 5 # 7
else: # 8
y = 6 # 9
args = ()
expected_trace = [{u'data': [{u'lineno': 3, u'state': {}},
{u'lineno': 4, u'state': {u'x': u'3'}},
{u'lineno': 5, u'state': {u'x': u'3', u'y': u'2'}},
{u'lineno': 6, u'state': {u'x': u'3', u'y': u'2'}},
{u'lineno': 9, u'state': {u'x': u'3', u'y': u'6'}}]}]
|
51e666a1f16046daeff941fcd8e4d80b159f2c97
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/cv/SiamFC/src/create_dataset_ILSVRC.py
|
a71fb890a97b26b90e96380b0a31682b7007b79a
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616
| 2023-07-17T11:43:18
| 2023-07-17T11:43:18
| 417,393,380
| 301
| 92
|
Apache-2.0
| 2023-05-17T11:22:28
| 2021-10-15T06:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 4,260
|
py
|
create_dataset_ILSVRC.py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""start create_dataset_ILSVRC"""
import pickle
import os
import functools
import xml.etree.ElementTree as ET
import sys
import argparse
import multiprocessing
from multiprocessing import Pool
from glob import glob
import cv2
from tqdm import tqdm
from src import config, get_instance_image
sys.path.append(os.getcwd())
multiprocessing.set_start_method('spawn', True)
def worker(output_dir, video_dir):
"""worker used read image and box position"""
image_names = glob(os.path.join(video_dir, '*.JPEG'))
video_name = video_dir.split('/')[-1]
save_folder = os.path.join(output_dir, video_name)
if not os.path.exists(save_folder):
os.mkdir(save_folder)
trajs = {}
for image_name in image_names:
img = cv2.imread(image_name)
img_mean = tuple(map(int, img.mean(axis=(0, 1))))
anno_name = image_name.replace('Data', 'Annotations')
anno_name = anno_name.replace('JPEG', 'xml')
tree = ET.parse(anno_name)
root = tree.getroot()
filename = root.find('filename').text
for obj in root.iter('object'):
bbox = obj.find(
'bndbox')
bbox = list(map(int, [bbox.find('xmin').text,
bbox.find('ymin').text,
bbox.find('xmax').text,
bbox.find('ymax').text]))
trkid = int(obj.find('trackid').text)
if trkid in trajs:
trajs[trkid].append(filename)
else:
trajs[trkid] = [filename]
instance_img, _, _ = get_instance_image(img, bbox,
config.exemplar_size,
config.instance_size,
config.context_amount,
img_mean)
instance_img_name = os.path.join(save_folder, filename + ".{:02d}.x.jpg".format(trkid))
cv2.imwrite(instance_img_name, instance_img)
return video_name, trajs
def processing(data_dir, output_dir, num_threads=32):
"""
the mian process to pretreatment picture and use multi-threads
"""
video_dir = os.path.join(data_dir, 'Data/VID')
all_videos = glob(os.path.join(video_dir, 'train/ILSVRC2015_VID_train_0000/*')) + \
glob(os.path.join(video_dir, 'train/ILSVRC2015_VID_train_0001/*')) + \
glob(os.path.join(video_dir, 'train/ILSVRC2015_VID_train_0002/*')) + \
glob(os.path.join(video_dir, 'train/ILSVRC2015_VID_train_0003/*')) + \
glob(os.path.join(video_dir, 'val/*'))
meta_data = []
if not os.path.exists(output_dir):
os.makedirs(output_dir)
with Pool(processes=num_threads) as pool:
for ret in tqdm(pool.imap_unordered(functools.partial(worker, output_dir), all_videos),
total=len(all_videos)):
meta_data.append(ret)
pickle.dump(meta_data, open(os.path.join(output_dir, "meta_data.pkl"), 'wb'))
Data_dir = '/data/VID/ILSVRC2015'
Output_dir = '/data/VID/ILSVRC_VID_CURATION_train'
Num_threads = 32
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Demo SiamFC")
parser.add_argument('--d', default=Data_dir, type=str, help="data_dir")
parser.add_argument('--o', default=Output_dir, type=str, help="out put")
parser.add_argument('--t', default=Num_threads, type=int, help="thread_num")
args = parser.parse_args()
processing(args.d, args.o, args.t)
|
2d2b0716959ee6519afefc54c6a155b78a24e6a1
|
6a468c1650b3c083f102f19ace0b0d6e4d0686f7
|
/sympy/printing/tests/test_numpy.py
|
ae2a86676522f0339f284cc756cf7a7cae871d2a
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
sympy/sympy
|
a5f8accaa7686c59d9b5c94212fef60d746dac4b
|
69f98fb2b0d845e76874067a381dba37b577e8c5
|
refs/heads/master
| 2023-09-01T15:51:37.886107
| 2023-08-31T20:54:33
| 2023-08-31T20:54:33
| 640,534
| 10,928
| 5,362
|
NOASSERTION
| 2023-09-14T17:29:13
| 2010-04-30T20:37:14
|
Python
|
UTF-8
|
Python
| false
| false
| 10,360
|
py
|
test_numpy.py
|
from sympy.concrete.summations import Sum
from sympy.core.mod import Mod
from sympy.core.relational import (Equality, Unequality)
from sympy.core.symbol import Symbol
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.piecewise import Piecewise
from sympy.functions.special.gamma_functions import polygamma
from sympy.functions.special.error_functions import (Si, Ci)
from sympy.matrices.expressions.blockmatrix import BlockMatrix
from sympy.matrices.expressions.matexpr import MatrixSymbol
from sympy.matrices.expressions.special import Identity
from sympy.utilities.lambdify import lambdify
from sympy.abc import x, i, j, a, b, c, d
from sympy.core import Pow
from sympy.codegen.matrix_nodes import MatrixSolve
from sympy.codegen.numpy_nodes import logaddexp, logaddexp2
from sympy.codegen.cfunctions import log1p, expm1, hypot, log10, exp2, log2, Sqrt
from sympy.tensor.array import Array
from sympy.tensor.array.expressions.array_expressions import ArrayTensorProduct, ArrayAdd, \
PermuteDims, ArrayDiagonal
from sympy.printing.numpy import NumPyPrinter, SciPyPrinter, _numpy_known_constants, \
_numpy_known_functions, _scipy_known_constants, _scipy_known_functions
from sympy.tensor.array.expressions.from_matrix_to_array import convert_matrix_to_array
from sympy.testing.pytest import skip, raises
from sympy.external import import_module
np = import_module('numpy')
if np:
deafult_float_info = np.finfo(np.array([]).dtype)
NUMPY_DEFAULT_EPSILON = deafult_float_info.eps
def test_numpy_piecewise_regression():
"""
NumPyPrinter needs to print Piecewise()'s choicelist as a list to avoid
breaking compatibility with numpy 1.8. This is not necessary in numpy 1.9+.
See gh-9747 and gh-9749 for details.
"""
printer = NumPyPrinter()
p = Piecewise((1, x < 0), (0, True))
assert printer.doprint(p) == \
'numpy.select([numpy.less(x, 0),True], [1,0], default=numpy.nan)'
assert printer.module_imports == {'numpy': {'select', 'less', 'nan'}}
def test_numpy_logaddexp():
lae = logaddexp(a, b)
assert NumPyPrinter().doprint(lae) == 'numpy.logaddexp(a, b)'
lae2 = logaddexp2(a, b)
assert NumPyPrinter().doprint(lae2) == 'numpy.logaddexp2(a, b)'
def test_sum():
if not np:
skip("NumPy not installed")
s = Sum(x ** i, (i, a, b))
f = lambdify((a, b, x), s, 'numpy')
a_, b_ = 0, 10
x_ = np.linspace(-1, +1, 10)
assert np.allclose(f(a_, b_, x_), sum(x_ ** i_ for i_ in range(a_, b_ + 1)))
s = Sum(i * x, (i, a, b))
f = lambdify((a, b, x), s, 'numpy')
a_, b_ = 0, 10
x_ = np.linspace(-1, +1, 10)
assert np.allclose(f(a_, b_, x_), sum(i_ * x_ for i_ in range(a_, b_ + 1)))
def test_multiple_sums():
if not np:
skip("NumPy not installed")
s = Sum((x + j) * i, (i, a, b), (j, c, d))
f = lambdify((a, b, c, d, x), s, 'numpy')
a_, b_ = 0, 10
c_, d_ = 11, 21
x_ = np.linspace(-1, +1, 10)
assert np.allclose(f(a_, b_, c_, d_, x_),
sum((x_ + j_) * i_ for i_ in range(a_, b_ + 1) for j_ in range(c_, d_ + 1)))
def test_codegen_einsum():
if not np:
skip("NumPy not installed")
M = MatrixSymbol("M", 2, 2)
N = MatrixSymbol("N", 2, 2)
cg = convert_matrix_to_array(M * N)
f = lambdify((M, N), cg, 'numpy')
ma = np.array([[1, 2], [3, 4]])
mb = np.array([[1,-2], [-1, 3]])
assert (f(ma, mb) == np.matmul(ma, mb)).all()
def test_codegen_extra():
if not np:
skip("NumPy not installed")
M = MatrixSymbol("M", 2, 2)
N = MatrixSymbol("N", 2, 2)
P = MatrixSymbol("P", 2, 2)
Q = MatrixSymbol("Q", 2, 2)
ma = np.array([[1, 2], [3, 4]])
mb = np.array([[1,-2], [-1, 3]])
mc = np.array([[2, 0], [1, 2]])
md = np.array([[1,-1], [4, 7]])
cg = ArrayTensorProduct(M, N)
f = lambdify((M, N), cg, 'numpy')
assert (f(ma, mb) == np.einsum(ma, [0, 1], mb, [2, 3])).all()
cg = ArrayAdd(M, N)
f = lambdify((M, N), cg, 'numpy')
assert (f(ma, mb) == ma+mb).all()
cg = ArrayAdd(M, N, P)
f = lambdify((M, N, P), cg, 'numpy')
assert (f(ma, mb, mc) == ma+mb+mc).all()
cg = ArrayAdd(M, N, P, Q)
f = lambdify((M, N, P, Q), cg, 'numpy')
assert (f(ma, mb, mc, md) == ma+mb+mc+md).all()
cg = PermuteDims(M, [1, 0])
f = lambdify((M,), cg, 'numpy')
assert (f(ma) == ma.T).all()
cg = PermuteDims(ArrayTensorProduct(M, N), [1, 2, 3, 0])
f = lambdify((M, N), cg, 'numpy')
assert (f(ma, mb) == np.transpose(np.einsum(ma, [0, 1], mb, [2, 3]), (1, 2, 3, 0))).all()
cg = ArrayDiagonal(ArrayTensorProduct(M, N), (1, 2))
f = lambdify((M, N), cg, 'numpy')
assert (f(ma, mb) == np.diagonal(np.einsum(ma, [0, 1], mb, [2, 3]), axis1=1, axis2=2)).all()
def test_relational():
if not np:
skip("NumPy not installed")
e = Equality(x, 1)
f = lambdify((x,), e)
x_ = np.array([0, 1, 2])
assert np.array_equal(f(x_), [False, True, False])
e = Unequality(x, 1)
f = lambdify((x,), e)
x_ = np.array([0, 1, 2])
assert np.array_equal(f(x_), [True, False, True])
e = (x < 1)
f = lambdify((x,), e)
x_ = np.array([0, 1, 2])
assert np.array_equal(f(x_), [True, False, False])
e = (x <= 1)
f = lambdify((x,), e)
x_ = np.array([0, 1, 2])
assert np.array_equal(f(x_), [True, True, False])
e = (x > 1)
f = lambdify((x,), e)
x_ = np.array([0, 1, 2])
assert np.array_equal(f(x_), [False, False, True])
e = (x >= 1)
f = lambdify((x,), e)
x_ = np.array([0, 1, 2])
assert np.array_equal(f(x_), [False, True, True])
def test_mod():
if not np:
skip("NumPy not installed")
e = Mod(a, b)
f = lambdify((a, b), e)
a_ = np.array([0, 1, 2, 3])
b_ = 2
assert np.array_equal(f(a_, b_), [0, 1, 0, 1])
a_ = np.array([0, 1, 2, 3])
b_ = np.array([2, 2, 2, 2])
assert np.array_equal(f(a_, b_), [0, 1, 0, 1])
a_ = np.array([2, 3, 4, 5])
b_ = np.array([2, 3, 4, 5])
assert np.array_equal(f(a_, b_), [0, 0, 0, 0])
def test_pow():
if not np:
skip('NumPy not installed')
expr = Pow(2, -1, evaluate=False)
f = lambdify([], expr, 'numpy')
assert f() == 0.5
def test_expm1():
if not np:
skip("NumPy not installed")
f = lambdify((a,), expm1(a), 'numpy')
assert abs(f(1e-10) - 1e-10 - 5e-21) <= 1e-10 * NUMPY_DEFAULT_EPSILON
def test_log1p():
if not np:
skip("NumPy not installed")
f = lambdify((a,), log1p(a), 'numpy')
assert abs(f(1e-99) - 1e-99) <= 1e-99 * NUMPY_DEFAULT_EPSILON
def test_hypot():
if not np:
skip("NumPy not installed")
assert abs(lambdify((a, b), hypot(a, b), 'numpy')(3, 4) - 5) <= NUMPY_DEFAULT_EPSILON
def test_log10():
if not np:
skip("NumPy not installed")
assert abs(lambdify((a,), log10(a), 'numpy')(100) - 2) <= NUMPY_DEFAULT_EPSILON
def test_exp2():
if not np:
skip("NumPy not installed")
assert abs(lambdify((a,), exp2(a), 'numpy')(5) - 32) <= NUMPY_DEFAULT_EPSILON
def test_log2():
if not np:
skip("NumPy not installed")
assert abs(lambdify((a,), log2(a), 'numpy')(256) - 8) <= NUMPY_DEFAULT_EPSILON
def test_Sqrt():
if not np:
skip("NumPy not installed")
assert abs(lambdify((a,), Sqrt(a), 'numpy')(4) - 2) <= NUMPY_DEFAULT_EPSILON
def test_sqrt():
if not np:
skip("NumPy not installed")
assert abs(lambdify((a,), sqrt(a), 'numpy')(4) - 2) <= NUMPY_DEFAULT_EPSILON
def test_matsolve():
if not np:
skip("NumPy not installed")
M = MatrixSymbol("M", 3, 3)
x = MatrixSymbol("x", 3, 1)
expr = M**(-1) * x + x
matsolve_expr = MatrixSolve(M, x) + x
f = lambdify((M, x), expr)
f_matsolve = lambdify((M, x), matsolve_expr)
m0 = np.array([[1, 2, 3], [3, 2, 5], [5, 6, 7]])
assert np.linalg.matrix_rank(m0) == 3
x0 = np.array([3, 4, 5])
assert np.allclose(f_matsolve(m0, x0), f(m0, x0))
def test_16857():
if not np:
skip("NumPy not installed")
a_1 = MatrixSymbol('a_1', 10, 3)
a_2 = MatrixSymbol('a_2', 10, 3)
a_3 = MatrixSymbol('a_3', 10, 3)
a_4 = MatrixSymbol('a_4', 10, 3)
A = BlockMatrix([[a_1, a_2], [a_3, a_4]])
assert A.shape == (20, 6)
printer = NumPyPrinter()
assert printer.doprint(A) == 'numpy.block([[a_1, a_2], [a_3, a_4]])'
def test_issue_17006():
if not np:
skip("NumPy not installed")
M = MatrixSymbol("M", 2, 2)
f = lambdify(M, M + Identity(2))
ma = np.array([[1, 2], [3, 4]])
mr = np.array([[2, 2], [3, 5]])
assert (f(ma) == mr).all()
from sympy.core.symbol import symbols
n = symbols('n', integer=True)
N = MatrixSymbol("M", n, n)
raises(NotImplementedError, lambda: lambdify(N, N + Identity(n)))
def test_numpy_array():
assert NumPyPrinter().doprint(Array(((1, 2), (3, 5)))) == 'numpy.array([[1, 2], [3, 5]])'
assert NumPyPrinter().doprint(Array((1, 2))) == 'numpy.array((1, 2))'
def test_numpy_known_funcs_consts():
assert _numpy_known_constants['NaN'] == 'numpy.nan'
assert _numpy_known_constants['EulerGamma'] == 'numpy.euler_gamma'
assert _numpy_known_functions['acos'] == 'numpy.arccos'
assert _numpy_known_functions['log'] == 'numpy.log'
def test_scipy_known_funcs_consts():
assert _scipy_known_constants['GoldenRatio'] == 'scipy.constants.golden_ratio'
assert _scipy_known_constants['Pi'] == 'scipy.constants.pi'
assert _scipy_known_functions['erf'] == 'scipy.special.erf'
assert _scipy_known_functions['factorial'] == 'scipy.special.factorial'
def test_numpy_print_methods():
prntr = NumPyPrinter()
assert hasattr(prntr, '_print_acos')
assert hasattr(prntr, '_print_log')
def test_scipy_print_methods():
prntr = SciPyPrinter()
assert hasattr(prntr, '_print_acos')
assert hasattr(prntr, '_print_log')
assert hasattr(prntr, '_print_erf')
assert hasattr(prntr, '_print_factorial')
assert hasattr(prntr, '_print_chebyshevt')
k = Symbol('k', integer=True, nonnegative=True)
x = Symbol('x', real=True)
assert prntr.doprint(polygamma(k, x)) == "scipy.special.polygamma(k, x)"
assert prntr.doprint(Si(x)) == "scipy.special.sici(x)[0]"
assert prntr.doprint(Ci(x)) == "scipy.special.sici(x)[1]"
|
715dfc816b24b638bac140006c0b3d6295701761
|
6f0ceee714bccf2a89c34a06aabd3bcb781a2fa4
|
/example/gluon/style_transfer/option.py
|
5faa52259d7c627a1f9f7abc8e6fe1c44c13b1c0
|
[
"Apache-2.0",
"MIT",
"Unlicense",
"BSL-1.0",
"NCSA",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause",
"OFL-1.0",
"BSD-2-Clause-Views",
"Zlib",
"CC-BY-4.0"
] |
permissive
|
yajiedesign/mxnet
|
5a495fd06dd1730c17d2d27d7e46c8a770847f17
|
8e5a16cf673db5aceb48d2cf7a0fc1abd0ee5e51
|
refs/heads/master
| 2021-03-30T22:37:18.603396
| 2020-10-23T06:40:17
| 2020-10-23T06:40:17
| 43,763,550
| 214
| 59
|
Apache-2.0
| 2020-06-01T23:31:15
| 2015-10-06T16:36:40
|
C++
|
UTF-8
|
Python
| false
| false
| 7,063
|
py
|
option.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import os
class Options():
def __init__(self):
self.parser = argparse.ArgumentParser(description="parser for MXNet-Gluon-Style-Transfer")
subparsers = self.parser.add_subparsers(title="subcommands", dest="subcommand")
# training args
train_arg = subparsers.add_parser("train",
help="parser for training arguments")
train_arg.add_argument("--ngf", type=int, default=128,
help="number of generator filter channels, default 128")
train_arg.add_argument("--epochs", type=int, default=4,
help="number of training epochs, default is 2")
train_arg.add_argument("--batch-size", type=int, default=4,
help="batch size for training, default is 4")
train_arg.add_argument("--dataset", type=str, default="dataset/",
help="path to training dataset, the path should point to a folder "
"containing another folder with all the training images")
train_arg.add_argument("--style-folder", type=str, default="images/styles/",
help="path to style-folder")
train_arg.add_argument("--save-model-dir", type=str, default="models/",
help="path to folder where trained model will be saved.")
train_arg.add_argument("--image-size", type=int, default=256,
help="size of training images, default is 256 X 256")
train_arg.add_argument("--style-size", type=int, default=512,
help="size of style-image, default is the original size of style image")
train_arg.add_argument("--cuda", type=int, default=1,
help="set it to 1 for running on GPU, 0 for CPU")
train_arg.add_argument("--seed", type=int, default=42,
help="random seed for training")
train_arg.add_argument("--content-weight", type=float, default=1.0,
help="weight for content-loss, default is 1.0")
train_arg.add_argument("--style-weight", type=float, default=5.0,
help="weight for style-loss, default is 5.0")
train_arg.add_argument("--lr", type=float, default=1e-3,
help="learning rate, default is 0.001")
train_arg.add_argument("--log-interval", type=int, default=500,
help="number of images after which the training loss is logged, default is 500")
train_arg.add_argument("--resume", type=str, default=None,
help="resume if needed")
# optim args (Gatys CVPR 2016)
optim_arg = subparsers.add_parser("optim",
help="parser for optimization arguments")
optim_arg.add_argument("--iters", type=int, default=500,
help="number of training iterations, default is 500")
optim_arg.add_argument("--content-image", type=str, default="images/content/venice-boat.jpg",
help="path to content image you want to stylize")
optim_arg.add_argument("--style-image", type=str, default="images/9styles/candy.jpg",
help="path to style-image")
optim_arg.add_argument("--content-size", type=int, default=512,
help="factor for scaling down the content image")
optim_arg.add_argument("--style-size", type=int, default=512,
help="size of style-image, default is the original size of style image")
optim_arg.add_argument("--output-image", type=str, default="output.jpg",
help="path for saving the output image")
optim_arg.add_argument("--cuda", type=int, default=1,
help="set it to 1 for running on GPU, 0 for CPU")
optim_arg.add_argument("--content-weight", type=float, default=1.0,
help="weight for content-loss, default is 1.0")
optim_arg.add_argument("--style-weight", type=float, default=5.0,
help="weight for style-loss, default is 5.0")
optim_arg.add_argument("--lr", type=float, default=1e1,
help="learning rate, default is 0.001")
optim_arg.add_argument("--log-interval", type=int, default=50,
help="number of images after which the training loss is logged, default is 50")
# evaluation args
eval_arg = subparsers.add_parser("eval", help="parser for evaluation/stylizing arguments")
eval_arg.add_argument("--ngf", type=int, default=128,
help="number of generator filter channels, default 128")
eval_arg.add_argument("--content-image", type=str, required=True,
help="path to content image you want to stylize")
eval_arg.add_argument("--style-image", type=str, default="images/9styles/candy.jpg",
help="path to style-image")
eval_arg.add_argument("--content-size", type=int, default=512,
help="factor for scaling down the content image")
eval_arg.add_argument("--style-size", type=int, default=512,
help="size of style-image, default is the original size of style image")
eval_arg.add_argument("--style-folder", type=str, default="images/9styles/",
help="path to style-folder")
eval_arg.add_argument("--output-image", type=str, default="output.jpg",
help="path for saving the output image")
eval_arg.add_argument("--model", type=str, required=True,
help="saved model to be used for stylizing the image")
eval_arg.add_argument("--cuda", type=int, default=1,
help="set it to 1 for running on GPU, 0 for CPU")
def parse(self):
return self.parser.parse_args()
|
5e3fcc2f88d1fae25c71e26e6920f10ddcba782f
|
2ae0b8d95d439ccfd55ea7933ad4a2994ad0f6c5
|
/tools/mo/openvino/tools/mo/middle/FusedBatchNormNonConstant.py
|
62b21cf3c103f0580108de145060e23fbb612f72
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/openvino
|
38ea745a247887a4e14580dbc9fc68005e2149f9
|
e4bed7a31c9f00d8afbfcabee3f64f55496ae56a
|
refs/heads/master
| 2023-08-18T03:47:44.572979
| 2023-08-17T21:24:59
| 2023-08-17T21:24:59
| 153,097,643
| 3,953
| 1,492
|
Apache-2.0
| 2023-09-14T21:42:24
| 2018-10-15T10:54:40
|
C++
|
UTF-8
|
Python
| false
| false
| 3,539
|
py
|
FusedBatchNormNonConstant.py
|
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.tools.mo.ops.elementwise import Mul, Add, Pow
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.graph.graph import Graph
from openvino.tools.mo.middle.replacement import MiddleReplacementPattern
from openvino.tools.mo.ops.const import Const
class FusedBatchNormNonConstant(MiddleReplacementPattern):
"""
Replaces FusedBatchNorm(input, beta, gamma, mean, variance) with non-constant mean and variance,
but with constant beta and gamma to a sub-expression consisting of a combinatin of Eltwise layers and ScaleShift.
"""
enabled = True
def run_after(self):
from openvino.tools.mo.middle.pass_separator import MiddleStart
return [MiddleStart]
def run_before(self):
from openvino.tools.mo.middle.pass_separator import MiddleFinish
return [MiddleFinish]
def pattern(self):
return dict(
nodes=[
('op', dict(kind='op', op=lambda op: op in ['FusedBatchNorm', 'FusedBatchNormV2',
'FusedBatchNormV3']))],
edges=[]
)
def replace_pattern(self, graph: Graph, match: dict):
node = match['op']
if (node.data_format != b'NHWC' or
len(node.in_nodes()) != 5 or
node.in_node(0).value is not None or # input
node.in_node(1).value is None or # scale
node.in_node(2).value is None or # offset
node.in_node(3).value is not None or # mean
node.in_node(4).value is not None or # variance
node.in_node(1).value.ndim != 1 or
node.in_node(2).value.ndim != 1):
return
scale_mul = Mul(graph, dict(name=node.name + '/scale_mul_'))
shift_add = Add(graph, dict(name=node.name + '/shift_add_'))
mean_add = Add(graph, dict(name=node.name + '/mean_add_'))
variance_mul = Mul(graph, dict(name=node.name + '/variance_mul_'))
neg_const = Const(graph, dict(value=mo_array(-1), name=node.name + '/mean_negate_'))
mean_negate = Mul(graph, dict(name=node.name + '/mean_negate_'))
mean_arg = mean_add.create_node_with_data([
node.in_node(0),
mean_negate.create_node_with_data([node.in_node(3),
neg_const.create_node_with_data()
])])
shift_const = Const(graph, dict(value=node.eps, name=node.name + '/variance_denom_shift_const_'))
power_const = Const(graph, dict(value=-0.5, name=node.name + '/variance_denom_power_const_'))
variance_denom_shift = Add(graph, dict(name=node.name + '/variance_denom_shift_'))
variance_denom_power = Pow(graph, dict(name=node.name + '/variance_denom_power_'))
variance_arg = variance_mul.create_node_with_data([
mean_arg,
variance_denom_power.create_node_with_data([
variance_denom_shift.create_node_with_data([node.in_node(4), shift_const.create_node_with_data()]),
power_const.create_node_with_data()]
)])
shift_add.create_node_with_data([
scale_mul.create_node_with_data([
variance_arg,
node.in_node(1)]),
node.in_node(2)],
data_nodes=node.out_node())
node.graph.remove_node(node.id)
|
4c3754b8b358bdefb3ddd53e31156a673a692a2c
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/DQMOffline/Trigger/python/HEP17Monitoring_Client_cff.py
|
576f481f4fe9da66f531302145d88a8f5135567e
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 2,738
|
py
|
HEP17Monitoring_Client_cff.py
|
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDHarvester import DQMEDHarvester
hep17Efficiency = DQMEDHarvester("DQMGenericClient",
subDirs = cms.untracked.vstring("HLT/EGM/HEP17/*"),
verbose = cms.untracked.uint32(0), # Set to 2 for all messages
resolution = cms.vstring(),
efficiency = cms.vstring(
"egamma_passFilter_et_ratioHEP17HEM17 'HEP17/HEM17;E_{T} [GeV];ratio' egamma_passFilter_et_hep17 egamma_passFilter_et_hem17",
"egamma_passFilter_etSC_ratioHEP17HEM17 'HEP17/HEM17;E_{T}^{SC} [GeV];ratio' egamma_passFilter_etSC_hep17 egamma_passFilter_etSC_hem17",
"egamma_passFilter_energy_ratioHEP17HEM17 'HEP17/HEM17;Energy [GeV];ratio' egamma_passFilter_energy_hep17 egamma_passFilter_energy_hem17",
"egamma_passFilter_phi_ratioHEP17HEM17 'HEP17/HEM17;#phi [rad];ratio' egamma_passFilter_phi_hep17 egamma_passFilter_phi_hem17",
"egamma_passFilter_hOverE_ratioHEP17HEM17 'HEP17/HEM17;H/E;ratio' egamma_passFilter_hOverE_hep17 egamma_passFilter_hOverE_hem17",
"egamma_passFilter_sigmaIEtaIEta_ratioHEP17HEM17 'HEP17/HEM17;#sigma_{i#etai#eta};ratio' egamma_passFilter_sigmaIEtaIEta_hep17 egamma_passFilter_sigmaIEtaIEta_hem17",
"egamma_passFilter_maxr9_ratioHEP17HEM17 'HEP17/HEM17;Max R9;ratio' egamma_passFilter_maxr9_hep17 egamma_passFilter_maxr9_hem17",
"egamma_passFilter_hltIsolEM_ratioHEP17HEM17 'HEP17/HEM17;HLT Iso EM [GeV];ratio' egamma_passFilter_hltIsolEM_hep17 egamma_passFilter_hltIsolEM_hem17",
"egamma_passFilter_hltIsolHad_ratioHEP17HEM17 'HEP17/HEM17;HLT Iso Had [GeV];ratio' egamma_passFilter_hltIsolHad_hep17 egamma_passFilter_hltIsolHad_hem17",
"egamma_passFilter_hltIsolTrksEle_ratioHEP17HEM17 'HEP17/HEM17;HLT Ele Iso Tracks [GeV];ratio' egamma_passFilter_hltIsolTrksEle_hep17 egamma_passFilter_hltIsolTrksEle_hem17",
"egamma_passFilter_HLTenergy_ratioHEP17HEM17 'HEP17/HEM17;HLT Energy [GeV];ratio' egamma_passFilter_HLTenergy_hep17 egamma_passFilter_HLTenergy_hem17",
"egamma_passFilter_HLTeta_ratioHEP17HEM17 'HEP17/HEM17;HLT #eta [rad];ratio' egamma_passFilter_HLTeta_hep17 egamma_passFilter_HLTeta_hem17",
"egamma_passFilter_HLTphi_ratioHEP17HEM17 'HEP17/HEM17;HLT #phi [rad];ratio' egamma_passFilter_HLTphi_hep17 egamma_passFilter_HLTphi_hem17",
),
efficiencyProfile = cms.untracked.vstring(
),
)
|
d6bc1a8280c595c24c00deb5fb44fb295e4cbbae
|
99dcb18a9e3ea367272f740b8cbf3c34285a0c08
|
/vertexai/preview/vision_models.py
|
67290e67366995dfbabb7284e76b50aaec2d219d
|
[
"Apache-2.0"
] |
permissive
|
googleapis/python-aiplatform
|
926a4873f35dbea15b2fd86c0e16b5e6556d803e
|
76b95b92c1d3b87c72d754d8c02b1bca652b9a27
|
refs/heads/main
| 2023-08-19T23:49:02.180075
| 2023-08-19T13:25:59
| 2023-08-19T13:27:27
| 298,017,988
| 418
| 240
|
Apache-2.0
| 2023-09-14T21:08:33
| 2020-09-23T15:43:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,096
|
py
|
vision_models.py
|
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Classes for working with vision models."""
from vertexai.vision_models._vision_models import (
Image,
ImageGenerationModel,
ImageGenerationResponse,
ImageCaptioningModel,
ImageQnAModel,
GeneratedImage,
MultiModalEmbeddingModel,
MultiModalEmbeddingResponse,
)
__all__ = [
"Image",
"ImageGenerationModel",
"ImageGenerationResponse",
"ImageCaptioningModel",
"ImageQnAModel",
"GeneratedImage",
"MultiModalEmbeddingModel",
"MultiModalEmbeddingResponse",
]
|
0643991ff11cc3bee108130f44c59ed9fcf49d0b
|
e09924b3ad57b24849db1611d8435a86de8e6f54
|
/src/stacking/datasets.py
|
8fac361d1c0c7761a36460998d592ab814dbf7f7
|
[
"MIT"
] |
permissive
|
lRomul/argus-freesound
|
3ae9617c72cb3adc3d0c0f232a65795fb560c7b6
|
4faf8f192035b413e8946bda3555474cb9ad8237
|
refs/heads/master
| 2023-08-19T23:46:44.634984
| 2021-03-24T20:21:08
| 2021-03-24T20:21:08
| 182,380,511
| 332
| 63
|
MIT
| 2022-06-22T02:43:30
| 2019-04-20T08:24:56
|
Python
|
UTF-8
|
Python
| false
| false
| 2,533
|
py
|
datasets.py
|
import time
import torch
import random
import numpy as np
import pandas as pd
from torch.utils.data import Dataset
from src import config
def load_fname_probs(experiments, fold, fname):
prob_lst = []
for experiment in experiments:
npy_path = config.predictions_dir / experiment / f'fold_{fold}' / 'val' / (fname + '.npy')
prob = np.load(npy_path)
prob_lst.append(prob)
probs = np.concatenate(prob_lst, axis=1)
return probs
def get_out_of_folds_data(experiments, corrections=None):
train_folds_df = pd.read_csv(config.train_folds_path)
probs_lst = []
targets_lst = []
folds_lst = []
fname_lst = []
for i, row in train_folds_df.iterrows():
labels = row.labels
if corrections is not None:
if row.fname in corrections:
action = corrections[row.fname]
if action == 'remove':
print(f"Skip {row.fname}")
continue
else:
print(f"Replace labels {row.fname} from {labels} to {action}")
labels = action
folds_lst.append(row.fold)
probs = load_fname_probs(experiments, row.fold, row.fname)
probs_lst.append(probs)
target = torch.zeros(len(config.classes))
for label in labels.split(','):
target[config.class2index[label]] = 1.
targets_lst.append(target)
fname_lst.append(row.fname)
return probs_lst, targets_lst, folds_lst
class StackingDataset(Dataset):
def __init__(self, folds_data, folds,
transform=None, size=None):
super().__init__()
self.folds = folds
self.transform = transform
self.size = size
self.probs_lst = []
self.targets_lst = []
for prob, trg, fold in zip(*folds_data):
if fold in folds:
self.probs_lst.append(prob)
self.targets_lst.append(trg)
def __len__(self):
if self.size is None:
return len(self.probs_lst)
else:
return self.size
def __getitem__(self, idx):
if self.size is not None:
seed = int(time.time() * 1000.0) + idx
np.random.seed(seed % (2 ** 31))
idx = np.random.randint(len(self.probs_lst))
probs = self.probs_lst[idx].copy()
target = self.targets_lst[idx].clone()
if self.transform is not None:
probs = self.transform(probs)
return probs, target
|
287a57caad11111253c14b623f719e5a84f6c92b
|
10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94
|
/Python/find-all-anagrams-in-a-string.py
|
6175e718a195f6e10cd2dcdbb60b74763009bfbb
|
[
"MIT"
] |
permissive
|
kamyu104/LeetCode-Solutions
|
f54822059405ef4df737d2e9898b024f051fd525
|
4dc4e6642dc92f1983c13564cc0fd99917cab358
|
refs/heads/master
| 2023-09-02T13:48:26.830566
| 2023-08-28T10:11:12
| 2023-08-28T10:11:12
| 152,631,182
| 4,549
| 1,651
|
MIT
| 2023-05-31T06:10:33
| 2018-10-11T17:38:35
|
C++
|
UTF-8
|
Python
| false
| false
| 666
|
py
|
find-all-anagrams-in-a-string.py
|
# Time: O(n)
# Space: O(1)
class Solution(object):
def findAnagrams(self, s, p):
"""
:type s: str
:type p: str
:rtype: List[int]
"""
result = []
cnts = [0] * 26
for c in p:
cnts[ord(c) - ord('a')] += 1
left, right = 0, 0
while right < len(s):
cnts[ord(s[right]) - ord('a')] -= 1
while left <= right and cnts[ord(s[right]) - ord('a')] < 0:
cnts[ord(s[left]) - ord('a')] += 1
left += 1
if right - left + 1 == len(p):
result.append(left)
right += 1
return result
|
c6a02233bcbcaf3dad4312bf766412d3b4a4eeda
|
e31f84c20af7be8646f03faf22ac55ad041444a3
|
/tests/test_encoding/test_count_frequency_encoder.py
|
f961a2960fbdcc6ed77e1084cddb4fc38cfa928b
|
[
"BSD-3-Clause"
] |
permissive
|
feature-engine/feature_engine
|
564aa2f298bb1beb0606bd5d51261b4d1085a8df
|
3343305a01d1acfeff846b65d33a5686c6e8c84f
|
refs/heads/main
| 2023-08-07T09:19:24.315277
| 2023-06-08T06:27:45
| 2023-06-08T06:27:45
| 163,630,824
| 874
| 105
|
BSD-3-Clause
| 2023-09-13T14:02:23
| 2018-12-31T01:47:18
|
Python
|
UTF-8
|
Python
| false
| false
| 14,113
|
py
|
test_count_frequency_encoder.py
|
import warnings
import pandas as pd
import pytest
from numpy import nan
from sklearn.exceptions import NotFittedError
from feature_engine.encoding import CountFrequencyEncoder
# init parameters
@pytest.mark.parametrize("enc_method", ["arbitrary", False, 1])
def test_error_if_encoding_method_not_permitted_value(enc_method):
with pytest.raises(ValueError):
CountFrequencyEncoder(encoding_method=enc_method)
@pytest.mark.parametrize(
"errors", ["empanada", False, 1, ("raise", "ignore"), ["ignore"]]
)
def test_error_if_unseen_gets_not_permitted_value(errors):
with pytest.raises(ValueError):
CountFrequencyEncoder(unseen=errors)
@pytest.mark.parametrize(
"params", [("count", "raise", True), ("frequency", "ignore", False)]
)
def test_init_param_assignment(params):
CountFrequencyEncoder(
encoding_method=params[0],
missing_values=params[1],
ignore_format=params[2],
unseen=params[1],
)
# fit and transform
def test_encode_1_variable_with_counts(df_enc):
# test case 1: 1 variable, counts
encoder = CountFrequencyEncoder(encoding_method="count", variables=["var_A"])
X = encoder.fit_transform(df_enc)
# expected result
transf_df = df_enc.copy()
transf_df["var_A"] = [
6,
6,
6,
6,
6,
6,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
4,
4,
4,
4,
]
# init params
assert encoder.encoding_method == "count"
assert encoder.variables == ["var_A"]
# fit params
assert encoder.variables_ == ["var_A"]
assert encoder.encoder_dict_ == {"var_A": {"A": 6, "B": 10, "C": 4}}
assert encoder.n_features_in_ == 3
# transform params
pd.testing.assert_frame_equal(X, transf_df)
def test_automatically_select_variables_encode_with_frequency(df_enc):
# test case 2: automatically select variables, frequency
encoder = CountFrequencyEncoder(encoding_method="frequency", variables=None)
X = encoder.fit_transform(df_enc)
# expected output
transf_df = df_enc.copy()
transf_df["var_A"] = [
0.3,
0.3,
0.3,
0.3,
0.3,
0.3,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.2,
0.2,
0.2,
0.2,
]
transf_df["var_B"] = [
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.3,
0.3,
0.3,
0.3,
0.3,
0.3,
0.2,
0.2,
0.2,
0.2,
]
# init params
assert encoder.encoding_method == "frequency"
assert encoder.variables is None
# fit params
assert encoder.variables_ == ["var_A", "var_B"]
assert encoder.encoder_dict_ == {
"var_A": {"A": 0.3, "B": 0.5, "C": 0.2},
"var_B": {"A": 0.5, "B": 0.3, "C": 0.2},
}
assert encoder.n_features_in_ == 3
# transform params
pd.testing.assert_frame_equal(X, transf_df)
def test_encoding_when_nan_in_fit_df(df_enc):
df = df_enc.copy()
df.loc[len(df)] = [nan, nan, nan]
encoder = CountFrequencyEncoder(
encoding_method="frequency",
missing_values="ignore",
)
encoder.fit(df_enc)
X = encoder.transform(
pd.DataFrame({"var_A": ["A", nan], "var_B": ["A", nan], "target": [1, 0]})
)
# transform params
pd.testing.assert_frame_equal(
X,
pd.DataFrame({"var_A": [0.3, nan], "var_B": [0.5, nan], "target": [1, 0]}),
)
@pytest.mark.parametrize("enc_method", ["arbitrary", False, 1])
def test_error_if_encoding_method_not_recognized_in_fit(enc_method, df_enc):
enc = CountFrequencyEncoder()
enc.encoding_method = enc_method
with pytest.raises(ValueError) as record:
enc.fit(df_enc)
msg = (
"Unrecognized value for encoding_method. It should be 'count' or "
f"'frequency'. Got {enc_method} instead."
)
assert str(record.value) == msg
def test_warning_when_df_contains_unseen_categories(df_enc, df_enc_rare):
# dataset to be transformed contains categories not present in
# training dataset (unseen categories), unseen set to ignore.
msg = "During the encoding, NaN values were introduced in the feature(s) var_A."
# check for warning when unseen equals 'ignore'
encoder = CountFrequencyEncoder(unseen="ignore")
encoder.fit(df_enc)
with pytest.warns(UserWarning) as record:
encoder.transform(df_enc_rare)
# check that only one warning was raised
assert len(record) == 1
# check that the message matches
assert record[0].message.args[0] == msg
def test_error_when_df_contains_unseen_categories(df_enc, df_enc_rare):
# dataset to be transformed contains categories not present in
# training dataset (unseen categories), unseen set to raise.
msg = "During the encoding, NaN values were introduced in the feature(s) var_A."
encoder = CountFrequencyEncoder(unseen="raise")
encoder.fit(df_enc)
# check for exception when unseen equals 'raise'
with pytest.raises(ValueError) as record:
encoder.transform(df_enc_rare)
# check that the error message matches
assert str(record.value) == msg
# check for no error and no warning when unseen equals 'encode'
with warnings.catch_warnings():
warnings.simplefilter("error")
encoder = CountFrequencyEncoder(unseen="encode")
encoder.fit(df_enc)
encoder.transform(df_enc_rare)
def test_no_error_triggered_when_df_contains_unseen_categories_and_unseen_is_encode(
df_enc, df_enc_rare
):
# dataset to be transformed contains categories not present in
# training dataset (unseen categories).
# check for no error and no warning when unseen equals 'encode'
warnings.simplefilter("error")
encoder = CountFrequencyEncoder(unseen="encode")
encoder.fit(df_enc)
with warnings.catch_warnings():
encoder.transform(df_enc_rare)
@pytest.mark.parametrize("errors", ["raise", "ignore", "encode"])
def test_fit_raises_error_if_df_contains_na(errors, df_enc_na):
# test case 4: when dataset contains na, fit method
encoder = CountFrequencyEncoder(unseen=errors)
with pytest.raises(ValueError) as record:
encoder.fit(df_enc_na)
msg = (
"Some of the variables in the dataset contain NaN. Check and "
"remove those before using this transformer or set the parameter "
"`missing_values='ignore'` when initialising this transformer."
)
assert str(record.value) == msg
@pytest.mark.parametrize("errors", ["raise", "ignore", "encode"])
def test_transform_raises_error_if_df_contains_na(errors, df_enc, df_enc_na):
# test case 4: when dataset contains na, transform method
encoder = CountFrequencyEncoder(unseen=errors)
encoder.fit(df_enc)
with pytest.raises(ValueError) as record:
encoder.transform(df_enc_na)
msg = (
"Some of the variables in the dataset contain NaN. Check and "
"remove those before using this transformer or set the parameter "
"`missing_values='ignore'` when initialising this transformer."
)
assert str(record.value) == msg
def test_zero_encoding_for_new_categories():
df_fit = pd.DataFrame(
{"col1": ["a", "a", "b", "a", "c"], "col2": ["1", "2", "3", "1", "2"]}
)
df_transf = pd.DataFrame(
{"col1": ["a", "d", "b", "a", "c"], "col2": ["1", "2", "3", "1", "4"]}
)
encoder = CountFrequencyEncoder(unseen="encode").fit(df_fit)
result = encoder.transform(df_transf)
# check that no NaNs are added
assert pd.isnull(result).sum().sum() == 0
# check that the counts are correct for both new and old
expected_result = pd.DataFrame({"col1": [3, 0, 1, 3, 1], "col2": [2, 2, 1, 2, 0]})
pd.testing.assert_frame_equal(result, expected_result)
def test_zero_encoding_for_unseen_categories_if_unseen_is_encode():
df_fit = pd.DataFrame(
{"col1": ["a", "a", "b", "a", "c"], "col2": ["1", "2", "3", "1", "2"]}
)
df_transform = pd.DataFrame(
{"col1": ["a", "d", "b", "a", "c"], "col2": ["1", "2", "3", "1", "4"]}
)
# count encoding
encoder = CountFrequencyEncoder(unseen="encode").fit(df_fit)
result = encoder.transform(df_transform)
# check that no NaNs are added
assert pd.isnull(result).sum().sum() == 0
# check that the counts are correct
expected_result = pd.DataFrame({"col1": [3, 0, 1, 3, 1], "col2": [2, 2, 1, 2, 0]})
pd.testing.assert_frame_equal(result, expected_result)
# with frequency
encoder = CountFrequencyEncoder(encoding_method="frequency", unseen="encode").fit(
df_fit
)
result = encoder.transform(df_transform)
# check that no NaNs are added
assert pd.isnull(result).sum().sum() == 0
# check that the frequencies are correct
expected_result = pd.DataFrame(
{"col1": [0.6, 0, 0.2, 0.6, 0.2], "col2": [0.4, 0.4, 0.2, 0.4, 0]}
)
pd.testing.assert_frame_equal(result, expected_result)
def test_nan_encoding_for_new_categories_if_unseen_is_ignore():
df_fit = pd.DataFrame(
{"col1": ["a", "a", "b", "a", "c"], "col2": ["1", "2", "3", "1", "2"]}
)
df_transf = pd.DataFrame(
{"col1": ["a", "d", "b", "a", "c"], "col2": ["1", "2", "3", "1", "4"]}
)
encoder = CountFrequencyEncoder(unseen="ignore").fit(df_fit)
result = encoder.transform(df_transf)
# check that no NaNs are added
assert pd.isnull(result).sum().sum() == 2
# check that the counts are correct for both new and old
expected_result = pd.DataFrame(
{"col1": [3, nan, 1, 3, 1], "col2": [2, 2, 1, 2, nan]}
)
pd.testing.assert_frame_equal(result, expected_result)
def test_ignore_variable_format_with_frequency(df_vartypes):
encoder = CountFrequencyEncoder(
encoding_method="frequency", variables=None, ignore_format=True
)
X = encoder.fit_transform(df_vartypes)
# expected output
transf_df = {
"Name": [0.25, 0.25, 0.25, 0.25],
"City": [0.25, 0.25, 0.25, 0.25],
"Age": [0.25, 0.25, 0.25, 0.25],
"Marks": [0.25, 0.25, 0.25, 0.25],
"dob": [0.25, 0.25, 0.25, 0.25],
}
transf_df = pd.DataFrame(transf_df)
# init params
assert encoder.encoding_method == "frequency"
assert encoder.variables is None
# fit params
assert encoder.variables_ == ["Name", "City", "Age", "Marks", "dob"]
assert encoder.n_features_in_ == 5
# transform params
pd.testing.assert_frame_equal(X, transf_df)
def test_column_names_are_numbers(df_numeric_columns):
encoder = CountFrequencyEncoder(
encoding_method="frequency", variables=[0, 1, 2, 3], ignore_format=True
)
X = encoder.fit_transform(df_numeric_columns)
# expected output
transf_df = {
0: [0.25, 0.25, 0.25, 0.25],
1: [0.25, 0.25, 0.25, 0.25],
2: [0.25, 0.25, 0.25, 0.25],
3: [0.25, 0.25, 0.25, 0.25],
4: pd.date_range("2020-02-24", periods=4, freq="T"),
}
transf_df = pd.DataFrame(transf_df)
# init params
assert encoder.encoding_method == "frequency"
assert encoder.variables == [0, 1, 2, 3]
# fit params
assert encoder.variables_ == [0, 1, 2, 3]
assert encoder.n_features_in_ == 5
# transform params
pd.testing.assert_frame_equal(X, transf_df)
def test_variables_cast_as_category(df_enc_category_dtypes):
encoder = CountFrequencyEncoder(encoding_method="count", variables=["var_A"])
X = encoder.fit_transform(df_enc_category_dtypes)
# expected result
transf_df = df_enc_category_dtypes.copy()
transf_df["var_A"] = [
6,
6,
6,
6,
6,
6,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
4,
4,
4,
4,
]
# transform params
pd.testing.assert_frame_equal(X, transf_df, check_dtype=False)
assert X["var_A"].dtypes == int
encoder = CountFrequencyEncoder(encoding_method="frequency", variables=["var_A"])
X = encoder.fit_transform(df_enc_category_dtypes)
assert X["var_A"].dtypes == float
def test_inverse_transform_when_no_unseen():
df = pd.DataFrame({"words": ["dog", "dog", "cat", "cat", "cat", "bird"]})
enc = CountFrequencyEncoder()
enc.fit(df)
dft = enc.transform(df)
pd.testing.assert_frame_equal(enc.inverse_transform(dft), df)
def test_inverse_transform_when_ignore_unseen():
df1 = pd.DataFrame({"words": ["dog", "dog", "cat", "cat", "cat", "bird"]})
df2 = pd.DataFrame({"words": ["dog", "dog", "cat", "cat", "cat", "frog"]})
df3 = pd.DataFrame({"words": ["dog", "dog", "cat", "cat", "cat", nan]})
enc = CountFrequencyEncoder(unseen="ignore")
enc.fit(df1)
dft = enc.transform(df2)
pd.testing.assert_frame_equal(enc.inverse_transform(dft), df3)
def test_inverse_transform_when_encode_unseen():
df1 = pd.DataFrame({"words": ["dog", "dog", "cat", "cat", "cat", "bird"]})
df2 = pd.DataFrame({"words": ["dog", "dog", "cat", "cat", "cat", "frog"]})
df3 = pd.DataFrame({"words": ["dog", "dog", "cat", "cat", "cat", nan]})
enc = CountFrequencyEncoder(unseen="encode")
enc.fit(df1)
dft = enc.transform(df2)
pd.testing.assert_frame_equal(enc.inverse_transform(dft), df3)
def test_inverse_transform_raises_non_fitted_error():
df1 = pd.DataFrame({"words": ["dog", "dog", "cat", "cat", "cat", "bird"]})
enc = CountFrequencyEncoder()
# Test when fit is not called prior to transform.
with pytest.raises(NotFittedError):
enc.inverse_transform(df1)
df1.loc[len(df1) - 1] = nan
with pytest.raises(ValueError):
enc.fit(df1)
# Test when fit is not called prior to transform.
with pytest.raises(NotFittedError):
enc.inverse_transform(df1)
|
6dc939d953a77b18094e36c7294e79ac68cd201e
|
123e2e28017973eefedaffb273cb3a5164f582c5
|
/junction/base/admin.py
|
66935741a4ab4c38ec00e735d3835e802a4cd667
|
[
"MIT"
] |
permissive
|
pythonindia/junction
|
ef4c0bf64f8c396edd2407f6d91444ab60a36b02
|
208d1757bf39c4727cf78b52cd2285e902eec84d
|
refs/heads/master
| 2023-08-17T09:30:50.961028
| 2023-08-10T06:44:34
| 2023-08-10T06:44:34
| 27,966,694
| 209
| 226
|
MIT
| 2023-08-10T06:44:35
| 2014-12-13T16:40:17
|
Python
|
UTF-8
|
Python
| false
| false
| 884
|
py
|
admin.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.contrib import admin
def save_model(self, request, obj, form, change):
""" Overriding this to update created_by & modified by users """
instance = form.save(commit=False)
if not instance.created_at and not instance.modified_at:
instance.created_by = request.user
instance.modified_by = request.user
instance.save()
form.save_m2m()
return instance
class TimeAuditAdmin(admin.ModelAdmin):
list_display = (
"created_at",
"modified_at",
)
class AuditAdmin(TimeAuditAdmin):
list_display = ("created_by", "modified_by",) + TimeAuditAdmin.list_display
exclude = (
"created_by",
"modified_by",
)
def save_model(self, request, obj, form, change):
save_model(self, request, obj, form, change)
|
1dd434dd22545c3c6e59a0de8e67929c8fec1759
|
d110546d747d7e3865ce5742d5fca09f404623c0
|
/tests/pytests/integration/modules/saltutil/test_modules.py
|
9d10189bb30db118c9bd3ec68a627f69cd3008e4
|
[
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] |
permissive
|
saltstack/salt
|
354fc86a7be1f69514b3dd3b2edb9e6f66844c1d
|
1ef90cbdc7203f97775edb7666db86a41eb9fc15
|
refs/heads/master
| 2023-07-19T20:56:20.210556
| 2023-06-29T23:12:28
| 2023-07-19T11:47:47
| 1,390,248
| 11,026
| 6,296
|
Apache-2.0
| 2023-09-14T20:45:37
| 2011-02-20T20:16:56
|
Python
|
UTF-8
|
Python
| false
| false
| 4,144
|
py
|
test_modules.py
|
"""
Integration tests for the saltutil module.
"""
import pytest
pytestmark = [
pytest.mark.windows_whitelisted,
]
@pytest.fixture(autouse=True)
def refresh_pillar(salt_call_cli, salt_minion, salt_sub_minion):
whitelist = {
"modules": [],
}
ret = salt_call_cli.run("saltutil.sync_all", extmod_whitelist=whitelist)
assert ret.returncode == 0
assert ret.data
try:
yield
finally:
ret = salt_call_cli.run("saltutil.sync_all")
assert ret.returncode == 0
assert ret.data
@pytest.mark.slow_test
def test_sync_all(salt_call_cli):
"""
Test syncing all ModuleCase
"""
expected_return = {
"engines": [],
"clouds": [],
"grains": [],
"beacons": [],
"utils": [],
"returners": [],
"modules": [
"modules.depends_versioned",
"modules.depends_versionless",
"modules.override_test",
"modules.runtests_decorators",
"modules.runtests_helpers",
"modules.salttest",
],
"renderers": [],
"log_handlers": [],
"matchers": [],
"states": [],
"sdb": [],
"proxymodules": [],
"executors": [],
"output": [],
"thorium": [],
"serializers": [],
}
ret = salt_call_cli.run("saltutil.sync_all")
assert ret.returncode == 0
assert ret.data
assert ret.data == expected_return
@pytest.mark.slow_test
def test_sync_all_whitelist(salt_call_cli):
"""
Test syncing all ModuleCase with whitelist
"""
expected_return = {
"engines": [],
"clouds": [],
"grains": [],
"beacons": [],
"utils": [],
"returners": [],
"modules": ["modules.salttest"],
"renderers": [],
"log_handlers": [],
"matchers": [],
"states": [],
"sdb": [],
"proxymodules": [],
"executors": [],
"output": [],
"thorium": [],
"serializers": [],
}
ret = salt_call_cli.run(
"saltutil.sync_all", extmod_whitelist={"modules": ["salttest"]}
)
assert ret.returncode == 0
assert ret.data
assert ret.data == expected_return
@pytest.mark.slow_test
def test_sync_all_blacklist(salt_call_cli):
"""
Test syncing all ModuleCase with blacklist
"""
expected_return = {
"engines": [],
"clouds": [],
"grains": [],
"beacons": [],
"utils": [],
"returners": [],
"modules": [
"modules.override_test",
"modules.runtests_helpers",
"modules.salttest",
],
"renderers": [],
"log_handlers": [],
"matchers": [],
"states": [],
"sdb": [],
"proxymodules": [],
"executors": [],
"output": [],
"thorium": [],
"serializers": [],
}
ret = salt_call_cli.run(
"saltutil.sync_all",
extmod_blacklist={
"modules": [
"runtests_decorators",
"depends_versioned",
"depends_versionless",
]
},
)
assert ret.returncode == 0
assert ret.data
assert ret.data == expected_return
@pytest.mark.slow_test
def test_sync_all_blacklist_and_whitelist(salt_call_cli):
"""
Test syncing all ModuleCase with whitelist and blacklist
"""
expected_return = {
"engines": [],
"clouds": [],
"grains": [],
"beacons": [],
"utils": [],
"returners": [],
"executors": [],
"modules": [],
"renderers": [],
"log_handlers": [],
"matchers": [],
"states": [],
"sdb": [],
"proxymodules": [],
"output": [],
"thorium": [],
"serializers": [],
}
ret = salt_call_cli.run(
"saltutil.sync_all",
extmod_whitelist={"modules": ["runtests_decorators"]},
extmod_blacklist={"modules": ["runtests_decorators"]},
)
assert ret.returncode == 0
assert ret.data
assert ret.data == expected_return
|
a166cf0a4de019176a0fd84fa392778167a05952
|
0f59e486ea9d7c96b8c3f7f92bf063fc8389f1e8
|
/PE/carve.py
|
7a4f8dc73a94dbc5d4937122da66069d56a35e33
|
[
"Apache-2.0"
] |
permissive
|
vivisect/vivisect
|
ac259918b6281d9431c32a0b2307c61f9cab0dec
|
b07e161cc28b19fdda0d047eefafed22c5b00f15
|
refs/heads/master
| 2023-08-25T09:02:00.526532
| 2023-07-26T03:07:07
| 2023-07-26T03:07:07
| 26,651,759
| 833
| 181
|
Apache-2.0
| 2023-09-07T03:43:53
| 2014-11-14T18:28:47
|
Python
|
UTF-8
|
Python
| false
| false
| 2,611
|
py
|
carve.py
|
import sys
import struct
import argparse
from io import BytesIO
from itertools import cycle
import PE
MAX_OFFSET_PE_AFTER_MZ = 0x200
def xorbytes(data, key):
return bytes([(x ^ y) for (x, y) in zip(data, cycle(key))])
def xorstatic(data, i):
return bytes([(c ^ i) for c in data])
mz_xor = [(xorstatic(b'MZ', i), xorstatic(b'PE', i), i) for i in range(256)]
def carve(pbytes, offset=0):
'''
Return a list of (offset, size, xor) tuples of embedded PEs
'''
pblen = len(pbytes)
todo = [(pbytes.find(mzx, offset), mzx, pex, i) for mzx, pex, i in mz_xor]
todo = [(off, mzx, pex, i) for (off, mzx, pex, i) in todo if off != -1]
while len(todo):
off, mzx, pex, i = todo.pop()
# The MZ header has one field we will check
# e_lfanew is at 0x3c
e_lfanew = off + 0x3c
if pblen < (e_lfanew + 4):
continue
newoff = struct.unpack('<I', xorstatic(pbytes[e_lfanew:e_lfanew + 4], i))[0]
# DEV: find the next possible slice *first* just in case this one fails
nextres = pbytes.find(mzx, off+1)
if nextres != -1:
todo.append((nextres, mzx, pex, i))
# PE header should occur soon after MZ
if newoff > MAX_OFFSET_PE_AFTER_MZ:
continue
peoff = off + newoff
if pblen < (peoff + 2):
continue
if pbytes[peoff:peoff + 2] == pex:
yield (off, i)
class CarvedPE(PE.PE):
def __init__(self, fbytes, offset, xkey):
self.carved_offset = offset
self.fbytes = fbytes
self.xorkey = xkey
PE.PE.__init__(self, BytesIO())
# ensure sections can be parsed
self.getSections()
def readAtOffset(self, offset, size):
offset += self.carved_offset
return xorbytes(self.fbytes[offset:offset+size], self.xorkey)
def getFileSize(self):
ret = 0
for sec in self.sections:
ret = max(ret, sec.PointerToRawData + sec.SizeOfRawData)
return ret
def setup():
desc = 'Output info about PEs embedded inside another PE'
ap = argparse.ArgumentParser('PE.carve', description=desc)
ap.add_argument('file', help='Path to PE file')
return ap
def main(argv):
opts = setup().parse_args(argv)
with open(opts.file, 'rb') as fd:
fbytes = fd.read()
for offset, i in carve(fbytes):
print('OFFSET: %d (xor: %d)' % (offset, i))
p = CarvedPE(fbytes, offset, chr(i))
print('SIZE: %d' % p.getFileSize())
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
9f7bf13e5a47f8975b07c0ee65ff108b22ed7d51
|
4d28185e7a78a569f9a449f39f183cac3024f711
|
/packages/Python/lldbsuite/test/lang/objc/foundation/TestObjCMethods.py
|
85d34c3d5006217b6512d92ea26df29ae4c17775
|
[
"NCSA",
"Apache-2.0",
"LLVM-exception"
] |
permissive
|
apple/swift-lldb
|
2789bf44f648609a1674ee520ac20b64c95de072
|
d74be846ef3e62de946df343e8c234bde93a8912
|
refs/heads/stable
| 2023-04-06T00:28:15.882479
| 2019-10-25T22:46:59
| 2019-10-25T22:46:59
| 44,838,862
| 780
| 291
|
Apache-2.0
| 2020-01-10T19:28:43
| 2015-10-23T21:13:18
|
C++
|
UTF-8
|
Python
| false
| false
| 11,513
|
py
|
TestObjCMethods.py
|
"""
Set breakpoints on objective-c class and instance methods in foundation.
Also lookup objective-c data types and evaluate expressions.
"""
from __future__ import print_function
import os
import os.path
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
file_index = 0
@skipUnlessDarwin
class FoundationTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break inside main().
self.main_source = "main.m"
self.line = line_number(
self.main_source,
'// Set break point at this line.')
def test_break(self):
"""Test setting objc breakpoints using '_regexp-break' and 'breakpoint set'."""
self.build()
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Stop at +[NSString stringWithFormat:].
break_results = lldbutil.run_break_set_command(
self, "_regexp-break +[NSString stringWithFormat:]")
lldbutil.check_breakpoint_result(
self,
break_results,
symbol_name='+[NSString stringWithFormat:]',
num_locations=1)
# Stop at -[MyString initWithNSString:].
lldbutil.run_break_set_by_symbol(
self,
'-[MyString initWithNSString:]',
num_expected_locations=1,
sym_exact=True)
# Stop at the "description" selector.
lldbutil.run_break_set_by_selector(
self,
'description',
num_expected_locations=1,
module_name='a.out')
# Stop at -[NSAutoreleasePool release].
break_results = lldbutil.run_break_set_command(
self, "_regexp-break -[NSAutoreleasePool release]")
lldbutil.check_breakpoint_result(
self,
break_results,
symbol_name='-[NSAutoreleasePool release]',
num_locations=1)
self.runCmd("run", RUN_SUCCEEDED)
# First stop is +[NSString stringWithFormat:].
self.expect(
"thread backtrace",
"Stop at +[NSString stringWithFormat:]",
substrs=["Foundation`+[NSString stringWithFormat:]"])
self.runCmd("process continue")
# Second stop is still +[NSString stringWithFormat:].
self.expect(
"thread backtrace",
"Stop at +[NSString stringWithFormat:]",
substrs=["Foundation`+[NSString stringWithFormat:]"])
self.runCmd("process continue")
# Followed by a.out`-[MyString initWithNSString:].
self.expect(
"thread backtrace",
"Stop at a.out`-[MyString initWithNSString:]",
substrs=["a.out`-[MyString initWithNSString:]"])
self.runCmd("process continue")
# Followed by -[MyString description].
self.expect("thread backtrace", "Stop at -[MyString description]",
substrs=["a.out`-[MyString description]"])
self.runCmd("process continue")
# Followed by the same -[MyString description].
self.expect("thread backtrace", "Stop at -[MyString description]",
substrs=["a.out`-[MyString description]"])
self.runCmd("process continue")
# Followed by -[NSAutoreleasePool release].
self.expect("thread backtrace", "Stop at -[NSAutoreleasePool release]",
substrs=["Foundation`-[NSAutoreleasePool release]"])
# rdar://problem/8542091
# rdar://problem/8492646
def test_data_type_and_expr(self):
"""Lookup objective-c data types and evaluate expressions."""
self.build()
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Stop at -[MyString description].
lldbutil.run_break_set_by_symbol(
self,
'-[MyString description]',
num_expected_locations=1,
sym_exact=True)
# self.expect("breakpoint set -n '-[MyString description]", BREAKPOINT_CREATED,
# startstr = "Breakpoint created: 1: name = '-[MyString description]',
# locations = 1")
self.runCmd("run", RUN_SUCCEEDED)
# The backtrace should show we stop at -[MyString description].
self.expect("thread backtrace", "Stop at -[MyString description]",
substrs=["a.out`-[MyString description]"])
# Lookup objc data type MyString and evaluate some expressions.
self.expect("image lookup -t NSString", DATA_TYPES_DISPLAYED_CORRECTLY,
substrs=['name = "NSString"',
'compiler_type = "@interface NSString'])
self.expect("image lookup -t MyString", DATA_TYPES_DISPLAYED_CORRECTLY,
substrs=['name = "MyString"',
'compiler_type = "@interface MyString',
'NSString * str;',
'NSDate * date;'])
self.expect(
"frame variable --show-types --scope",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=["ARG: (MyString *) self"],
patterns=[
"ARG: \(.*\) _cmd",
"(objc_selector *)|(SEL)"])
# rdar://problem/8651752
# don't crash trying to ask clang how many children an empty record has
self.runCmd("frame variable *_cmd")
# rdar://problem/8492646
# test/foundation fails after updating to tot r115023
# self->str displays nothing as output
self.expect(
"frame variable --show-types self->str",
VARIABLES_DISPLAYED_CORRECTLY,
startstr="(NSString *) self->str")
# rdar://problem/8447030
# 'frame variable self->date' displays the wrong data member
self.expect(
"frame variable --show-types self->date",
VARIABLES_DISPLAYED_CORRECTLY,
startstr="(NSDate *) self->date")
# This should display the str and date member fields as well.
self.expect(
"frame variable --show-types *self",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=[
"(MyString) *self",
"(NSString *) str",
"(NSDate *) date"])
# isa should be accessible.
self.expect("expression self->isa", VARIABLES_DISPLAYED_CORRECTLY,
substrs=["(Class)"])
# This should fail expectedly.
self.expect(
"expression self->non_existent_member",
COMMAND_FAILED_AS_EXPECTED,
error=True,
startstr="error: 'MyString' does not have a member named 'non_existent_member'")
# Use expression parser.
self.runCmd("expression self->str")
self.runCmd("expression self->date")
# (lldb) expression self->str
# error: instance variable 'str' is protected
# error: 1 errors parsing expression
#
# (lldb) expression self->date
# error: instance variable 'date' is protected
# error: 1 errors parsing expression
#
self.runCmd("breakpoint delete 1")
lldbutil.run_break_set_by_file_and_line(
self, "main.m", self.line, num_expected_locations=1, loc_exact=True)
self.runCmd("process continue")
# rdar://problem/8542091
# test/foundation: expr -o -- my not working?
#
# Test new feature with r115115:
# Add "-o" option to "expression" which prints the object description
# if available.
self.expect(
"expression --object-description -- my",
"Object description displayed correctly",
patterns=["Hello from.*a.out.*with timestamp: "])
@add_test_categories(['pyapi'])
def test_print_ivars_correctly(self):
self.build()
# See: <rdar://problem/8717050> lldb needs to use the ObjC runtime symbols for ivar offsets
# Only fails for the ObjC 2.0 runtime.
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
break1 = target.BreakpointCreateByLocation(self.main_source, self.line)
self.assertTrue(break1, VALID_BREAKPOINT)
# Now launch the process, and do not stop at entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
# The stop reason of the thread should be breakpoint.
thread = process.GetThreadAtIndex(0)
if thread.GetStopReason() != lldb.eStopReasonBreakpoint:
from lldbsuite.test.lldbutil import stop_reason_to_str
self.fail(STOPPED_DUE_TO_BREAKPOINT_WITH_STOP_REASON_AS %
stop_reason_to_str(thread.GetStopReason()))
# Make sure we stopped at the first breakpoint.
cur_frame = thread.GetFrameAtIndex(0)
line_number = cur_frame.GetLineEntry().GetLine()
self.assertTrue(line_number == self.line, "Hit the first breakpoint.")
my_var = cur_frame.FindVariable("my")
self.assertTrue(my_var, "Made a variable object for my")
str_var = cur_frame.FindVariable("str")
self.assertTrue(str_var, "Made a variable object for str")
# Now make sure that the my->str == str:
my_str_var = my_var.GetChildMemberWithName("str")
self.assertTrue(my_str_var, "Found a str ivar in my")
str_value = int(str_var.GetValue(), 0)
my_str_value = int(my_str_var.GetValue(), 0)
self.assertTrue(
str_value == my_str_value,
"Got the correct value for my->str")
def test_expression_lookups_objc(self):
"""Test running an expression detect spurious debug info lookups (DWARF)."""
self.build()
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Stop at -[MyString initWithNSString:].
lldbutil.run_break_set_by_symbol(
self,
'-[MyString initWithNSString:]',
num_expected_locations=1,
sym_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
global file_index
# Log any DWARF lookups
++file_index
logfile = os.path.join(
self.getBuildDir(),
"dwarf-lookups-" +
self.getArchitecture() +
"-" +
str(file_index) +
".txt")
self.runCmd("log enable -f %s dwarf lookups" % (logfile))
self.runCmd("expr self")
self.runCmd("log disable dwarf lookups")
def cleanup():
if os.path.exists(logfile):
os.unlink(logfile)
self.addTearDownHook(cleanup)
if os.path.exists(logfile):
f = open(logfile)
lines = f.readlines()
num_errors = 0
for line in lines:
if "$__lldb" in line:
if num_errors == 0:
print(
"error: found spurious name lookups when evaluating an expression:")
num_errors += 1
print(line, end='')
self.assertTrue(num_errors == 0, "Spurious lookups detected")
f.close()
|
08fb58100e361c550eafee69c1e61915aa3a302b
|
10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94
|
/Python/happy-number.py
|
33b2e31d3b74d153484452ee24d5f45b3199e109
|
[
"MIT"
] |
permissive
|
kamyu104/LeetCode-Solutions
|
f54822059405ef4df737d2e9898b024f051fd525
|
4dc4e6642dc92f1983c13564cc0fd99917cab358
|
refs/heads/master
| 2023-09-02T13:48:26.830566
| 2023-08-28T10:11:12
| 2023-08-28T10:11:12
| 152,631,182
| 4,549
| 1,651
|
MIT
| 2023-05-31T06:10:33
| 2018-10-11T17:38:35
|
C++
|
UTF-8
|
Python
| false
| false
| 443
|
py
|
happy-number.py
|
# Time: O(k), where k is the steps to be happy number
# Space: O(k)
class Solution(object):
# @param {integer} n
# @return {boolean}
def isHappy(self, n):
lookup = {}
while n != 1 and n not in lookup:
lookup[n] = True
n = self.nextNumber(n)
return n == 1
def nextNumber(self, n):
new = 0
for char in str(n):
new += int(char)**2
return new
|
3f9b2e17b91ca3a91665d6ffcb9ba873793981bb
|
88efd76316e4184d76a5e0585d95fe734233942c
|
/yellowbrick/text/tsne.py
|
ead496eb4844db00304144c5cd60c71b7300383f
|
[
"Apache-2.0"
] |
permissive
|
DistrictDataLabs/yellowbrick
|
1ecd9f33e58f0d007569904401c204a6cdeb5661
|
f7a8e950bd31452ea2f5d402a1c5d519cd163fd5
|
refs/heads/develop
| 2023-08-03T12:25:26.511916
| 2023-07-05T18:14:28
| 2023-07-05T18:14:28
| 59,121,694
| 4,242
| 660
|
Apache-2.0
| 2023-07-15T17:50:31
| 2016-05-18T14:12:17
|
Python
|
UTF-8
|
Python
| false
| false
| 15,368
|
py
|
tsne.py
|
# yellowbrick.text.tsne
# Implements TSNE visualizations of documents in 2D space.
#
# Author: Benjamin Bengfort
# Author: Rebecca Bilbro
# Created: Mon Feb 20 06:33:29 2017 -0500
#
# Copyright (C) 2016 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: tsne.py [6aa9198] benjamin@bengfort.com $
"""
Implements TSNE visualizations of documents in 2D space.
"""
##########################################################################
## Imports
##########################################################################
import numpy as np
from collections import defaultdict
from yellowbrick.draw import manual_legend
from yellowbrick.text.base import TextVisualizer
from yellowbrick.style.colors import resolve_colors
from yellowbrick.exceptions import YellowbrickValueError
from sklearn.manifold import TSNE
from sklearn.pipeline import Pipeline
from sklearn.decomposition import TruncatedSVD, PCA
##########################################################################
## Quick Methods
##########################################################################
def tsne(
X,
y=None,
ax=None,
decompose="svd",
decompose_by=50,
labels=None,
colors=None,
colormap=None,
alpha=0.7,
show=True,
**kwargs
):
"""
Display a projection of a vectorized corpus in two dimensions using TSNE,
a nonlinear dimensionality reduction method that is particularly well
suited to embedding in two or three dimensions for visualization as a
scatter plot. TSNE is widely used in text analysis to show clusters or
groups of documents or utterances and their relative proximities.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features representing the corpus of
vectorized documents to visualize with tsne.
y : ndarray or Series of length n
An optional array or series of target or class values for instances.
If this is specified, then the points will be colored according to
their class. Often cluster labels are passed in to color the documents
in cluster space, so this method is used both for classification and
clustering methods.
ax : matplotlib axes
The axes to plot the figure on.
decompose : string or None
A preliminary decomposition is often used prior to TSNE to make the
projection faster. Specify `"svd"` for sparse data or `"pca"` for
dense data. If decompose is None, the original data set will be used.
decompose_by : int
Specify the number of components for preliminary decomposition, by
default this is 50; the more components, the slower TSNE will be.
labels : list of strings
The names of the classes in the target, used to create a legend.
colors : list or tuple of colors
Specify the colors for each individual class
colormap : string or matplotlib cmap
Sequential colormap for continuous target
alpha : float, default: 0.7
Specify a transparency where 1 is completely opaque and 0 is completely
transparent. This property makes densely clustered points more visible.
show : bool, default: True
If True, calls ``show()``, which in turn calls ``plt.show()`` however you cannot
call ``plt.savefig`` from this signature, nor ``clear_figure``. If False, simply
calls ``finalize()``
kwargs : dict
Pass any additional keyword arguments to the TSNE transformer.
Example
--------
>>> from yellowbrick.text.tsne import tsne
>>> from sklearn.feature_extraction.text import TfidfVectorizer
>>> from yellowbrick.datasets import load_hobbies
>>> corpus = load_hobbies()
>>> tfidf = TfidfVectorizer()
>>> X = tfidf.fit_transform(corpus.data)
>>> y = corpus.target
>>> tsne(X, y)
Returns
-------
visualizer: TSNEVisualizer
Returns the fitted, finalized visualizer
"""
# Instantiate the visualizer
visualizer = TSNEVisualizer(
ax=ax,
decompose=decompose,
decompose_by=decompose_by,
labels=labels,
colors=colors,
colormap=colormap,
alpha=alpha,
**kwargs
)
# Fit and transform the visualizer (calls draw)
visualizer.fit(X, y, **kwargs)
if show:
visualizer.show()
else:
visualizer.finalize()
# Return the visualizer object
return visualizer
##########################################################################
## TSNEVisualizer
##########################################################################
class TSNEVisualizer(TextVisualizer):
"""
Display a projection of a vectorized corpus in two dimensions using TSNE,
a nonlinear dimensionality reduction method that is particularly well
suited to embedding in two or three dimensions for visualization as a
scatter plot. TSNE is widely used in text analysis to show clusters or
groups of documents or utterances and their relative proximities.
TSNE will return a scatter plot of the vectorized corpus, such that each
point represents a document or utterance. The distance between two points
in the visual space is embedded using the probability distribution of
pairwise similarities in the higher dimensionality; thus TSNE shows
clusters of similar documents and the relationships between groups of
documents as a scatter plot.
TSNE can be used with either clustering or classification; by specifying
the ``classes`` argument, points will be colored based on their similar
traits. For example, by passing ``cluster.labels_`` as ``y`` in ``fit()``, all
points in the same cluster will be grouped together. This extends the
neighbor embedding with more information about similarity, and can allow
better interpretation of both clusters and classes.
For more, see https://lvdmaaten.github.io/tsne/
Parameters
----------
ax : matplotlib axes
The axes to plot the figure on.
decompose : string or None, default: ``'svd'``
A preliminary decomposition is often used prior to TSNE to make the
projection faster. Specify ``"svd"`` for sparse data or ``"pca"`` for
dense data. If None, the original data set will be used.
decompose_by : int, default: 50
Specify the number of components for preliminary decomposition, by
default this is 50; the more components, the slower TSNE will be.
labels : list of strings
The names of the classes in the target, used to create a legend.
Labels must match names of classes in sorted order.
colors : list or tuple of colors
Specify the colors for each individual class
colormap : string or matplotlib cmap
Sequential colormap for continuous target
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by np.random. The random state is applied to the preliminary
decomposition as well as tSNE.
alpha : float, default: 0.7
Specify a transparency where 1 is completely opaque and 0 is completely
transparent. This property makes densely clustered points more visible.
kwargs : dict
Pass any additional keyword arguments to the TSNE transformer.
"""
# NOTE: cannot be np.nan
NULL_CLASS = None
def __init__(
self,
ax=None,
decompose="svd",
decompose_by=50,
labels=None,
classes=None,
colors=None,
colormap=None,
random_state=None,
alpha=0.7,
**kwargs
):
# Visual Parameters
self.alpha = alpha
self.labels = labels
self.colors = colors
self.colormap = colormap
self.random_state = random_state
# Fetch TSNE kwargs from kwargs by popping only keys belonging to TSNE params
tsne_kwargs = {
key: kwargs.pop(key) for key in TSNE().get_params() if key in kwargs
}
self.transformer_ = self.make_transformer(decompose, decompose_by, tsne_kwargs)
# Call super at the end so that size and title are set correctly
super(TSNEVisualizer, self).__init__(ax=ax, **kwargs)
def make_transformer(self, decompose="svd", decompose_by=50, tsne_kwargs={}):
"""
Creates an internal transformer pipeline to project the data set into
2D space using TSNE, applying an pre-decomposition technique ahead of
embedding if necessary. This method will reset the transformer on the
class, and can be used to explore different decompositions.
Parameters
----------
decompose : string or None, default: ``'svd'``
A preliminary decomposition is often used prior to TSNE to make
the projection faster. Specify ``"svd"`` for sparse data or ``"pca"``
for dense data. If decompose is None, the original data set will
be used.
decompose_by : int, default: 50
Specify the number of components for preliminary decomposition, by
default this is 50; the more components, the slower TSNE will be.
Returns
-------
transformer : Pipeline
Pipelined transformer for TSNE projections
"""
# TODO: detect decompose by inferring from sparse matrix or dense or
# If number of features > 50 etc.
decompositions = {"svd": TruncatedSVD, "pca": PCA}
if decompose and decompose.lower() not in decompositions:
raise YellowbrickValueError(
"'{}' is not a valid decomposition, use {}, or None".format(
decompose, ", ".join(decompositions.keys())
)
)
# Create the pipeline steps
steps = []
# Add the pre-decomposition
if decompose:
klass = decompositions[decompose]
steps.append(
(
decompose,
klass(n_components=decompose_by, random_state=self.random_state),
)
)
# Add the TSNE manifold
steps.append(
(
"tsne",
TSNE(n_components=2, random_state=self.random_state, **tsne_kwargs),
)
)
# return the pipeline
return Pipeline(steps)
def fit(self, X, y=None, **kwargs):
"""
The fit method is the primary drawing input for the TSNE projection
since the visualization requires both X and an optional y value. The
fit method expects an array of numeric vectors, so text documents must
be vectorized before passing them to this method.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features representing the corpus of
vectorized documents to visualize with tsne.
y : ndarray or Series of length n
An optional array or series of target or class values for
instances. If this is specified, then the points will be colored
according to their class. Often cluster labels are passed in to
color the documents in cluster space, so this method is used both
for classification and clustering methods.
kwargs : dict
Pass generic arguments to the drawing method
Returns
-------
self : instance
Returns the instance of the transformer/visualizer
"""
# Store the classes we observed in y
if y is not None:
self.classes_ = np.unique(y)
elif y is None and self.labels is not None:
self.classes_ = np.array([self.labels[0]])
else:
self.classes_ = np.array([self.NULL_CLASS])
# Fit our internal transformer and transform the data.
vecs = self.transformer_.fit_transform(X)
self.n_instances_ = vecs.shape[0]
# Draw the vectors
self.draw(vecs, y, **kwargs)
# Fit always returns self.
return self
def draw(self, points, target=None, **kwargs):
"""
Called from the fit method, this method draws the TSNE scatter plot,
from a set of decomposed points in 2 dimensions. This method also
accepts a third dimension, target, which is used to specify the colors
of each of the points. If the target is not specified, then the points
are plotted as a single cloud to show similar documents.
"""
# Resolve the labels with the classes
labels = self.labels if self.labels is not None else self.classes_
if len(labels) != len(self.classes_):
raise YellowbrickValueError(
(
"number of supplied labels ({}) does not "
"match the number of classes ({})"
).format(len(labels), len(self.classes_))
)
# Create the color mapping for the labels.
self.color_values_ = resolve_colors(
n_colors=len(labels), colormap=self.colormap, colors=self.colors
)
colors = dict(zip(labels, self.color_values_))
# Transform labels into a map of class to label
labels = dict(zip(self.classes_, labels))
# Expand the points into vectors of x and y for scatter plotting,
# assigning them to their label if the label has been passed in.
# Additionally, filter classes not specified directly by the user.
series = defaultdict(lambda: {"x": [], "y": []})
if target is not None:
for t, point in zip(target, points):
label = labels[t]
series[label]["x"].append(point[0])
series[label]["y"].append(point[1])
else:
label = self.classes_[0]
for x, y in points:
series[label]["x"].append(x)
series[label]["y"].append(y)
# Plot the points
for label, points in series.items():
self.ax.scatter(
points["x"], points["y"], c=colors[label], alpha=self.alpha, label=label
)
return self.ax
def finalize(self, **kwargs):
"""
Finalize the drawing by adding a title and legend, and removing the
axes objects that do not convey information about TNSE.
"""
self.set_title("TSNE Projection of {} Documents".format(self.n_instances_))
# Remove the ticks
self.ax.set_yticks([])
self.ax.set_xticks([])
# Add the legend outside of the figure box.
if not all(self.classes_ == np.array([self.NULL_CLASS])):
box = self.ax.get_position()
self.ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
manual_legend(
self,
self.classes_,
self.color_values_,
loc="center left",
bbox_to_anchor=(1, 0.5),
)
|
7d0bde9bc893f8d136199a87b5fda534f6c35989
|
167c6226bc77c5daaedab007dfdad4377f588ef4
|
/python/ql/test/query-tests/Security/lib/Crypto/Cipher/__init__.py
|
ab22f65be6e35f2e7d085463d3f4b178f8bfd414
|
[
"MIT",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-other-copyleft",
"GPL-1.0-or-later",
"LicenseRef-scancode-free-unknown",
"Python-2.0"
] |
permissive
|
github/codeql
|
1eebb449a34f774db9e881b52cb8f7a1b1a53612
|
d109637e2d7ab3b819812eb960c05cb31d9d2168
|
refs/heads/main
| 2023-08-20T11:32:39.162059
| 2023-08-18T14:33:32
| 2023-08-18T14:33:32
| 143,040,428
| 5,987
| 1,363
|
MIT
| 2023-09-14T19:36:50
| 2018-07-31T16:35:51
|
CodeQL
|
UTF-8
|
Python
| false
| false
| 26
|
py
|
__init__.py
|
__all__ = ['AES', 'ARC4']
|
86e7760f2e492ace158254aebc90731322adce90
|
45412e6175a50a2cf468cee53c5eeda28fea108d
|
/topaz/modules/topaz.py
|
7cf5f34375b4b008d6ec35c64a277a913d7757f0
|
[
"BSD-3-Clause"
] |
permissive
|
topazproject/topaz
|
fa03fdfc1977e7a3411eadf748475668bbd1c34a
|
059eac0ac884d677c3539e156e0ac528723d6238
|
refs/heads/master
| 2023-03-09T10:08:54.025001
| 2022-06-18T11:47:49
| 2022-06-18T11:47:49
| 3,966,078
| 271
| 28
|
BSD-3-Clause
| 2022-06-18T11:47:50
| 2012-04-08T18:34:50
|
Python
|
UTF-8
|
Python
| false
| false
| 3,480
|
py
|
topaz.py
|
from __future__ import absolute_import
import sys
from rpython.rlib.rarithmetic import intmask
from rpython.rlib.rtermios import tcsetattr, tcgetattr, all_constants
from topaz.error import error_for_oserror
from topaz.module import ModuleDef
from topaz.objects.classobject import W_ClassObject
class Topaz(object):
moduledef = ModuleDef("Topaz")
@moduledef.setup_module
def setup_module(space, w_mod):
space.set_const(w_mod, "FIXNUM_MAX", space.newint(sys.maxint))
w_termioconsts = space.newmodule("TermIOConstants", None)
space.set_const(w_mod, "TermIOConstants", w_termioconsts)
for name, value in all_constants.iteritems():
space.set_const(w_termioconsts, name, space.newint(value))
@moduledef.function("intmask")
def method_intmask(self, space, w_int):
if space.is_kind_of(w_int, space.w_fixnum):
return w_int
elif space.is_kind_of(w_int, space.w_bignum):
bigint = space.bigint_w(w_int)
return space.newint(intmask(bigint.uintmask()))
@moduledef.function("convert_type", method="symbol")
def method_convert_type(self, space, w_obj, w_type, method):
if not isinstance(w_type, W_ClassObject):
raise space.error(
space.w_TypeError, "type argument must be a class")
return space.convert_type(w_obj, w_type, method)
@moduledef.function("try_convert_type", method="symbol")
def method_try_convert_type(self, space, w_obj, w_type, method):
if not isinstance(w_type, W_ClassObject):
raise space.error(
space.w_TypeError, "type argument must be a class")
return space.convert_type(w_obj, w_type, method, raise_error=False)
@moduledef.function("compare")
def method_compare(self, space, w_a, w_b, block=None):
return space.compare(w_a, w_b, block)
@moduledef.function("infect", taint="bool", untrust="bool", freeze="bool")
def method_infect(self, space, w_dest, w_src, taint=True, untrust=True,
freeze=False):
space.infect(
w_dest, w_src, taint=taint, untrust=untrust, freeze=freeze)
return self
@moduledef.function("tcsetattr", fd="int", when="int", mode_w="array")
def method_tcsetattr(self, space, fd, when, mode_w):
cc = [space.str_w(w_char) for w_char in space.listview(mode_w[6])]
mode = (
space.int_w(mode_w[0]), # iflag
space.int_w(mode_w[1]), # oflag
space.int_w(mode_w[2]), # cflag
space.int_w(mode_w[3]), # lflag
space.int_w(mode_w[4]), # ispeed
space.int_w(mode_w[5]), # ospeed
cc
)
try:
tcsetattr(fd, when, mode)
except OSError as e:
raise error_for_oserror(space, e)
return self
@moduledef.function("tcgetattr", fd="int")
def method_tcgetattr(self, space, fd):
try:
mode = tcgetattr(fd)
except OSError as e:
raise error_for_oserror(space, e)
mode_w = [
space.newint(mode[0]), # iflag
space.newint(mode[1]), # oflag
space.newint(mode[2]), # cflag
space.newint(mode[3]), # lflag
space.newint(mode[4]), # ispeed
space.newint(mode[5]), # ospeed
space.newarray([space.newstr_fromstr(cc) for cc in mode[6]])
]
return space.newarray(mode_w)
|
71c39444d29087b1bbc58556d3d0e7fedfe9f3da
|
234c46d1249c9209f268417a19018afc12e378b4
|
/tests/data/image_loader_test.py
|
22ae32b1c948e344b3922cd1a58f756204357643
|
[
"Apache-2.0"
] |
permissive
|
allenai/allennlp
|
1f4bcddcb6f5ce60c7ef03a9a3cd6a38bdb987cf
|
80fb6061e568cb9d6ab5d45b661e86eb61b92c82
|
refs/heads/main
| 2023-07-07T11:43:33.781690
| 2022-11-22T00:42:46
| 2022-11-22T00:42:46
| 91,356,408
| 12,257
| 2,712
|
Apache-2.0
| 2022-11-22T00:42:47
| 2017-05-15T15:52:41
|
Python
|
UTF-8
|
Python
| false
| false
| 3,241
|
py
|
image_loader_test.py
|
import pytest
import torch
import torchvision
from allennlp.common.testing import AllenNlpTestCase, multi_device
from allennlp.data.image_loader import TorchImageLoader
class TorchImageLoaderTest(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.image_fixture_path = str(
self.FIXTURES_ROOT / "data" / "images" / "COCO_train2014_000000458752.jpg"
)
torchvision.set_image_backend("accimage")
# Create a few small images of different sizes from the fixture.
image = torchvision.io.read_image(self.image_fixture_path)
assert image.shape == (3, 480, 640)
image1 = image[:, 0:7, 0:15]
image2 = image[:, 0:9, 0:12]
torchvision.io.write_jpeg(image1, str(self.TEST_DIR / "image1.jpg"))
torchvision.io.write_jpeg(image2, str(self.TEST_DIR / "image2.jpg"))
@multi_device
@pytest.mark.parametrize(
"loader_params",
[
{"size_divisibility": 0, "pad_value": 0.0},
{"size_divisibility": 1, "pad_value": 0.0},
{"size_divisibility": 4, "pad_value": 0.0},
],
ids=str,
)
def test_basic_load(self, device, loader_params):
loader = TorchImageLoader(resize=False, normalize=False, device=device, **loader_params)
torch_device = torch.device(device)
images, sizes = loader([self.TEST_DIR / "image1.jpg", self.TEST_DIR / "image2.jpg"])
assert images.device == torch_device
assert sizes.device == torch_device
assert images.shape[0] == 2
assert images.shape[1] == 3
assert sizes.shape == (2, 2)
assert list(sizes[0]) == [7, 15]
assert list(sizes[1]) == [9, 12]
if loader.size_divisibility <= 1:
assert images.shape[2] == 9
assert images.shape[3] == 15
else:
assert images.shape[2] >= 9
assert images.shape[3] >= 15
assert (images.shape[2] / loader.size_divisibility) % 1 == 0
image, size = loader(self.TEST_DIR / "image1.jpg")
assert image.device == torch_device
assert size.device == torch_device
assert len(image.shape) == 3
assert list(size) == [7, 15]
@multi_device
def test_resize_and_normalize(self, device):
loader = TorchImageLoader(resize=True, normalize=True, device=device)
torch_device = torch.device(device)
image, size = loader(self.image_fixture_path)
assert image.device == torch_device
assert size.device == torch_device
assert image.shape[1] == 800
def test_resize_and_normalize_matches_generalized_rcnn_transform(self):
loader = TorchImageLoader(resize=True, normalize=True, size_divisibility=32)
transform = torchvision.models.detection.transform.GeneralizedRCNNTransform(
loader.min_size, loader.max_size, loader.pixel_mean, loader.pixel_std
)
loaded_image, _ = loader([self.image_fixture_path])
raw_image, _ = TorchImageLoader(resize=False, normalize=False)(self.image_fixture_path)
transformed_raw_image, _ = transform([raw_image])
assert loaded_image.shape == transformed_raw_image.tensors.shape
|
d68eb5313aa011ab642b08dbcb940461acade8d8
|
ee87c715e5d937b0380ddb87d56e9ebc4877a02b
|
/sklearn/metrics/_pairwise_distances_reduction/_dispatcher.py
|
937a6fd08326045394a05c6587c70e5034323f90
|
[
"BSD-3-Clause"
] |
permissive
|
scikit-learn/scikit-learn
|
27a2196f3173e0f32f7a5c5d652b70a6c57c7644
|
061f8777b48e5491b0c57bb8e0bc7067c103079d
|
refs/heads/main
| 2023-08-18T15:32:59.764468
| 2023-08-18T14:39:08
| 2023-08-18T14:39:08
| 843,222
| 58,456
| 29,777
|
BSD-3-Clause
| 2023-09-14T19:08:34
| 2010-08-17T09:43:38
|
Python
|
UTF-8
|
Python
| false
| false
| 22,377
|
py
|
_dispatcher.py
|
from abc import abstractmethod
from typing import List
import numpy as np
from scipy.sparse import issparse
from ... import get_config
from .._dist_metrics import (
BOOL_METRICS,
METRIC_MAPPING64,
DistanceMetric,
)
from ._argkmin import (
ArgKmin32,
ArgKmin64,
)
from ._argkmin_classmode import (
ArgKminClassMode32,
ArgKminClassMode64,
)
from ._base import _sqeuclidean_row_norms32, _sqeuclidean_row_norms64
from ._radius_neighbors import (
RadiusNeighbors32,
RadiusNeighbors64,
)
def sqeuclidean_row_norms(X, num_threads):
"""Compute the squared euclidean norm of the rows of X in parallel.
Parameters
----------
X : ndarray or CSR matrix of shape (n_samples, n_features)
Input data. Must be c-contiguous.
num_threads : int
The number of OpenMP threads to use.
Returns
-------
sqeuclidean_row_norms : ndarray of shape (n_samples,)
Arrays containing the squared euclidean norm of each row of X.
"""
if X.dtype == np.float64:
return np.asarray(_sqeuclidean_row_norms64(X, num_threads))
if X.dtype == np.float32:
return np.asarray(_sqeuclidean_row_norms32(X, num_threads))
raise ValueError(
"Only float64 or float32 datasets are supported at this time, "
f"got: X.dtype={X.dtype}."
)
class BaseDistancesReductionDispatcher:
"""Abstract base dispatcher for pairwise distance computation & reduction.
Each dispatcher extending the base :class:`BaseDistancesReductionDispatcher`
dispatcher must implement the :meth:`compute` classmethod.
"""
@classmethod
def valid_metrics(cls) -> List[str]:
excluded = {
# PyFunc cannot be supported because it necessitates interacting with
# the CPython interpreter to call user defined functions.
"pyfunc",
"mahalanobis", # is numerically unstable
# In order to support discrete distance metrics, we need to have a
# stable simultaneous sort which preserves the order of the indices
# because there generally is a lot of occurrences for a given values
# of distances in this case.
# TODO: implement a stable simultaneous_sort.
"hamming",
*BOOL_METRICS,
}
return sorted(({"sqeuclidean"} | set(METRIC_MAPPING64.keys())) - excluded)
@classmethod
def is_usable_for(cls, X, Y, metric) -> bool:
"""Return True if the dispatcher can be used for the
given parameters.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples_X, n_features)
Input data.
Y : {ndarray, sparse matrix} of shape (n_samples_Y, n_features)
Input data.
metric : str, default='euclidean'
The distance metric to use.
For a list of available metrics, see the documentation of
:class:`~sklearn.metrics.DistanceMetric`.
Returns
-------
True if the dispatcher can be used, else False.
"""
def is_numpy_c_ordered(X):
return hasattr(X, "flags") and getattr(X.flags, "c_contiguous", False)
def is_valid_sparse_matrix(X):
return (
issparse(X)
and X.format == "csr"
and
# TODO: support CSR matrices without non-zeros elements
X.nnz > 0
and
# TODO: support CSR matrices with int64 indices and indptr
# See: https://github.com/scikit-learn/scikit-learn/issues/23653
X.indices.dtype == X.indptr.dtype == np.int32
)
is_usable = (
get_config().get("enable_cython_pairwise_dist", True)
and (is_numpy_c_ordered(X) or is_valid_sparse_matrix(X))
and (is_numpy_c_ordered(Y) or is_valid_sparse_matrix(Y))
and X.dtype == Y.dtype
and X.dtype in (np.float32, np.float64)
and (metric in cls.valid_metrics() or isinstance(metric, DistanceMetric))
)
return is_usable
@classmethod
@abstractmethod
def compute(
cls,
X,
Y,
**kwargs,
):
"""Compute the reduction.
Parameters
----------
X : ndarray or CSR matrix of shape (n_samples_X, n_features)
Input data.
Y : ndarray or CSR matrix of shape (n_samples_Y, n_features)
Input data.
**kwargs : additional parameters for the reduction
Notes
-----
This method is an abstract class method: it has to be implemented
for all subclasses.
"""
class ArgKmin(BaseDistancesReductionDispatcher):
"""Compute the argkmin of row vectors of X on the ones of Y.
For each row vector of X, computes the indices of k first the rows
vectors of Y with the smallest distances.
ArgKmin is typically used to perform
bruteforce k-nearest neighbors queries.
This class is not meant to be instantiated, one should only use
its :meth:`compute` classmethod which handles allocation and
deallocation consistently.
"""
@classmethod
def compute(
cls,
X,
Y,
k,
metric="euclidean",
chunk_size=None,
metric_kwargs=None,
strategy=None,
return_distance=False,
):
"""Compute the argkmin reduction.
Parameters
----------
X : ndarray or CSR matrix of shape (n_samples_X, n_features)
Input data.
Y : ndarray or CSR matrix of shape (n_samples_Y, n_features)
Input data.
k : int
The k for the argkmin reduction.
metric : str, default='euclidean'
The distance metric to use for argkmin.
For a list of available metrics, see the documentation of
:class:`~sklearn.metrics.DistanceMetric`.
chunk_size : int, default=None,
The number of vectors per chunk. If None (default) looks-up in
scikit-learn configuration for `pairwise_dist_chunk_size`,
and use 256 if it is not set.
metric_kwargs : dict, default=None
Keyword arguments to pass to specified metric function.
strategy : str, {'auto', 'parallel_on_X', 'parallel_on_Y'}, default=None
The chunking strategy defining which dataset parallelization are made on.
For both strategies the computations happens with two nested loops,
respectively on chunks of X and chunks of Y.
Strategies differs on which loop (outer or inner) is made to run
in parallel with the Cython `prange` construct:
- 'parallel_on_X' dispatches chunks of X uniformly on threads.
Each thread then iterates on all the chunks of Y. This strategy is
embarrassingly parallel and comes with no datastructures
synchronisation.
- 'parallel_on_Y' dispatches chunks of Y uniformly on threads.
Each thread processes all the chunks of X in turn. This strategy is
a sequence of embarrassingly parallel subtasks (the inner loop on Y
chunks) with intermediate datastructures synchronisation at each
iteration of the sequential outer loop on X chunks.
- 'auto' relies on a simple heuristic to choose between
'parallel_on_X' and 'parallel_on_Y': when `X.shape[0]` is large enough,
'parallel_on_X' is usually the most efficient strategy.
When `X.shape[0]` is small but `Y.shape[0]` is large, 'parallel_on_Y'
brings more opportunity for parallelism and is therefore more efficient
- None (default) looks-up in scikit-learn configuration for
`pairwise_dist_parallel_strategy`, and use 'auto' if it is not set.
return_distance : boolean, default=False
Return distances between each X vector and its
argkmin if set to True.
Returns
-------
If return_distance=False:
- argkmin_indices : ndarray of shape (n_samples_X, k)
Indices of the argkmin for each vector in X.
If return_distance=True:
- argkmin_distances : ndarray of shape (n_samples_X, k)
Distances to the argkmin for each vector in X.
- argkmin_indices : ndarray of shape (n_samples_X, k)
Indices of the argkmin for each vector in X.
Notes
-----
This classmethod inspects the arguments values to dispatch to the
dtype-specialized implementation of :class:`ArgKmin`.
This allows decoupling the API entirely from the implementation details
whilst maintaining RAII: all temporarily allocated datastructures necessary
for the concrete implementation are therefore freed when this classmethod
returns.
"""
if X.dtype == Y.dtype == np.float64:
return ArgKmin64.compute(
X=X,
Y=Y,
k=k,
metric=metric,
chunk_size=chunk_size,
metric_kwargs=metric_kwargs,
strategy=strategy,
return_distance=return_distance,
)
if X.dtype == Y.dtype == np.float32:
return ArgKmin32.compute(
X=X,
Y=Y,
k=k,
metric=metric,
chunk_size=chunk_size,
metric_kwargs=metric_kwargs,
strategy=strategy,
return_distance=return_distance,
)
raise ValueError(
"Only float64 or float32 datasets pairs are supported at this time, "
f"got: X.dtype={X.dtype} and Y.dtype={Y.dtype}."
)
class RadiusNeighbors(BaseDistancesReductionDispatcher):
"""Compute radius-based neighbors for two sets of vectors.
For each row-vector X[i] of the queries X, find all the indices j of
row-vectors in Y such that:
dist(X[i], Y[j]) <= radius
The distance function `dist` depends on the values of the `metric`
and `metric_kwargs` parameters.
This class is not meant to be instantiated, one should only use
its :meth:`compute` classmethod which handles allocation and
deallocation consistently.
"""
@classmethod
def compute(
cls,
X,
Y,
radius,
metric="euclidean",
chunk_size=None,
metric_kwargs=None,
strategy=None,
return_distance=False,
sort_results=False,
):
"""Return the results of the reduction for the given arguments.
Parameters
----------
X : ndarray or CSR matrix of shape (n_samples_X, n_features)
Input data.
Y : ndarray or CSR matrix of shape (n_samples_Y, n_features)
Input data.
radius : float
The radius defining the neighborhood.
metric : str, default='euclidean'
The distance metric to use.
For a list of available metrics, see the documentation of
:class:`~sklearn.metrics.DistanceMetric`.
chunk_size : int, default=None,
The number of vectors per chunk. If None (default) looks-up in
scikit-learn configuration for `pairwise_dist_chunk_size`,
and use 256 if it is not set.
metric_kwargs : dict, default=None
Keyword arguments to pass to specified metric function.
strategy : str, {'auto', 'parallel_on_X', 'parallel_on_Y'}, default=None
The chunking strategy defining which dataset parallelization are made on.
For both strategies the computations happens with two nested loops,
respectively on chunks of X and chunks of Y.
Strategies differs on which loop (outer or inner) is made to run
in parallel with the Cython `prange` construct:
- 'parallel_on_X' dispatches chunks of X uniformly on threads.
Each thread then iterates on all the chunks of Y. This strategy is
embarrassingly parallel and comes with no datastructures
synchronisation.
- 'parallel_on_Y' dispatches chunks of Y uniformly on threads.
Each thread processes all the chunks of X in turn. This strategy is
a sequence of embarrassingly parallel subtasks (the inner loop on Y
chunks) with intermediate datastructures synchronisation at each
iteration of the sequential outer loop on X chunks.
- 'auto' relies on a simple heuristic to choose between
'parallel_on_X' and 'parallel_on_Y': when `X.shape[0]` is large enough,
'parallel_on_X' is usually the most efficient strategy.
When `X.shape[0]` is small but `Y.shape[0]` is large, 'parallel_on_Y'
brings more opportunity for parallelism and is therefore more efficient
despite the synchronization step at each iteration of the outer loop
on chunks of `X`.
- None (default) looks-up in scikit-learn configuration for
`pairwise_dist_parallel_strategy`, and use 'auto' if it is not set.
return_distance : boolean, default=False
Return distances between each X vector and its neighbors if set to True.
sort_results : boolean, default=False
Sort results with respect to distances between each X vector and its
neighbors if set to True.
Returns
-------
If return_distance=False:
- neighbors_indices : ndarray of n_samples_X ndarray
Indices of the neighbors for each vector in X.
If return_distance=True:
- neighbors_indices : ndarray of n_samples_X ndarray
Indices of the neighbors for each vector in X.
- neighbors_distances : ndarray of n_samples_X ndarray
Distances to the neighbors for each vector in X.
Notes
-----
This classmethod inspects the arguments values to dispatch to the
dtype-specialized implementation of :class:`RadiusNeighbors`.
This allows decoupling the API entirely from the implementation details
whilst maintaining RAII: all temporarily allocated datastructures necessary
for the concrete implementation are therefore freed when this classmethod
returns.
"""
if X.dtype == Y.dtype == np.float64:
return RadiusNeighbors64.compute(
X=X,
Y=Y,
radius=radius,
metric=metric,
chunk_size=chunk_size,
metric_kwargs=metric_kwargs,
strategy=strategy,
sort_results=sort_results,
return_distance=return_distance,
)
if X.dtype == Y.dtype == np.float32:
return RadiusNeighbors32.compute(
X=X,
Y=Y,
radius=radius,
metric=metric,
chunk_size=chunk_size,
metric_kwargs=metric_kwargs,
strategy=strategy,
sort_results=sort_results,
return_distance=return_distance,
)
raise ValueError(
"Only float64 or float32 datasets pairs are supported at this time, "
f"got: X.dtype={X.dtype} and Y.dtype={Y.dtype}."
)
class ArgKminClassMode(BaseDistancesReductionDispatcher):
"""Compute the argkmin of row vectors of X on the ones of Y with labels.
For each row vector of X, computes the indices of k first the rows
vectors of Y with the smallest distances. Computes weighted mode of labels.
ArgKminClassMode is typically used to perform bruteforce k-nearest neighbors
queries when the weighted mode of the labels for the k-nearest neighbors
are required, such as in `predict` methods.
This class is not meant to be instantiated, one should only use
its :meth:`compute` classmethod which handles allocation and
deallocation consistently.
"""
@classmethod
def valid_metrics(cls) -> List[str]:
excluded = {
# Euclidean is technically usable for ArgKminClassMode
# but its current implementation would not be competitive.
# TODO: implement Euclidean specialization using GEMM.
"euclidean",
"sqeuclidean",
}
return list(set(BaseDistancesReductionDispatcher.valid_metrics()) - excluded)
@classmethod
def compute(
cls,
X,
Y,
k,
weights,
Y_labels,
unique_Y_labels,
metric="euclidean",
chunk_size=None,
metric_kwargs=None,
strategy=None,
):
"""Compute the argkmin reduction.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
The input array to be labelled.
Y : ndarray of shape (n_samples_Y, n_features)
The input array whose class membership are provided through the
`Y_labels` parameter.
k : int
The number of nearest neighbors to consider.
weights : ndarray
The weights applied over the `Y_labels` of `Y` when computing the
weighted mode of the labels.
Y_labels : ndarray
An array containing the index of the class membership of the
associated samples in `Y`. This is used in labeling `X`.
unique_Y_labels : ndarray
An array containing all unique indices contained in the
corresponding `Y_labels` array.
metric : str, default='euclidean'
The distance metric to use. For a list of available metrics, see
the documentation of :class:`~sklearn.metrics.DistanceMetric`.
Currently does not support `'precomputed'`.
chunk_size : int, default=None,
The number of vectors per chunk. If None (default) looks-up in
scikit-learn configuration for `pairwise_dist_chunk_size`,
and use 256 if it is not set.
metric_kwargs : dict, default=None
Keyword arguments to pass to specified metric function.
strategy : str, {'auto', 'parallel_on_X', 'parallel_on_Y'}, default=None
The chunking strategy defining which dataset parallelization are made on.
For both strategies the computations happens with two nested loops,
respectively on chunks of X and chunks of Y.
Strategies differs on which loop (outer or inner) is made to run
in parallel with the Cython `prange` construct:
- 'parallel_on_X' dispatches chunks of X uniformly on threads.
Each thread then iterates on all the chunks of Y. This strategy is
embarrassingly parallel and comes with no datastructures
synchronisation.
- 'parallel_on_Y' dispatches chunks of Y uniformly on threads.
Each thread processes all the chunks of X in turn. This strategy is
a sequence of embarrassingly parallel subtasks (the inner loop on Y
chunks) with intermediate datastructures synchronisation at each
iteration of the sequential outer loop on X chunks.
- 'auto' relies on a simple heuristic to choose between
'parallel_on_X' and 'parallel_on_Y': when `X.shape[0]` is large enough,
'parallel_on_X' is usually the most efficient strategy.
When `X.shape[0]` is small but `Y.shape[0]` is large, 'parallel_on_Y'
brings more opportunity for parallelism and is therefore more efficient
despite the synchronization step at each iteration of the outer loop
on chunks of `X`.
- None (default) looks-up in scikit-learn configuration for
`pairwise_dist_parallel_strategy`, and use 'auto' if it is not set.
Returns
-------
probabilities : ndarray of shape (n_samples_X, n_classes)
An array containing the class probabilities for each sample.
Notes
-----
This classmethod is responsible for introspecting the arguments
values to dispatch to the most appropriate implementation of
:class:`PairwiseDistancesArgKmin`.
This allows decoupling the API entirely from the implementation details
whilst maintaining RAII: all temporarily allocated datastructures necessary
for the concrete implementation are therefore freed when this classmethod
returns.
"""
if weights not in {"uniform", "distance"}:
raise ValueError(
"Only the 'uniform' or 'distance' weights options are supported"
f" at this time. Got: {weights=}."
)
if X.dtype == Y.dtype == np.float64:
return ArgKminClassMode64.compute(
X=X,
Y=Y,
k=k,
weights=weights,
Y_labels=np.array(Y_labels, dtype=np.intp),
unique_Y_labels=np.array(unique_Y_labels, dtype=np.intp),
metric=metric,
chunk_size=chunk_size,
metric_kwargs=metric_kwargs,
strategy=strategy,
)
if X.dtype == Y.dtype == np.float32:
return ArgKminClassMode32.compute(
X=X,
Y=Y,
k=k,
weights=weights,
Y_labels=np.array(Y_labels, dtype=np.intp),
unique_Y_labels=np.array(unique_Y_labels, dtype=np.intp),
metric=metric,
chunk_size=chunk_size,
metric_kwargs=metric_kwargs,
strategy=strategy,
)
raise ValueError(
"Only float64 or float32 datasets pairs are supported at this time, "
f"got: X.dtype={X.dtype} and Y.dtype={Y.dtype}."
)
|
798fe99f411f63141d6f79b216f752faed9f7878
|
ec9fd70a945d9ad6c7018ed96ca71a65c76bb471
|
/onshape_to_robot/onshape_api/client.py
|
37f7d33e935eba72b939118046e508d3bc956739
|
[
"MIT"
] |
permissive
|
Rhoban/onshape-to-robot
|
0c252e37c09988d878bdaed2ffd42e460bd48411
|
f4716a23046bada1c90e3e1acd81686a6d1f2593
|
refs/heads/master
| 2023-08-17T23:22:54.592494
| 2023-03-14T15:52:19
| 2023-03-14T15:52:19
| 176,774,675
| 175
| 40
|
MIT
| 2023-09-05T23:31:58
| 2019-03-20T16:30:53
|
Python
|
UTF-8
|
Python
| false
| false
| 12,095
|
py
|
client.py
|
'''
client
======
Convenience functions for working with the Onshape API
'''
from .onshape import Onshape
import mimetypes
import random
import string
import os
import json
import hashlib
from pathlib import Path
def escape_slash(s):
return s.replace('/', '%2f')
class Client():
'''
Defines methods for testing the Onshape API. Comes with several methods:
- Create a document
- Delete a document
- Get a list of documents
Attributes:
- stack (str, default='https://cad.onshape.com'): Base URL
- logging (bool, default=True): Turn logging on or off
'''
def __init__(self, stack='https://cad.onshape.com', logging=True, creds='./config.json'):
'''
Instantiates a new Onshape client.
Args:
- stack (str, default='https://cad.onshape.com'): Base URL
- logging (bool, default=True): Turn logging on or off
'''
self._metadata_cache = {}
self._massproperties_cache = {}
self._stack = stack
self._api = Onshape(stack=stack, logging=logging, creds=creds)
self.useCollisionsConfigurations = True
@staticmethod
def get_cache_path() -> Path:
"""Return the path to the user cache."""
path = Path.home() / ".cache" / "onshape-to-robot"
path.mkdir(parents=True, exist_ok=True)
return path
def new_document(self, name='Test Document', owner_type=0, public=False):
'''
Create a new document.
Args:
- name (str, default='Test Document'): The doc name
- owner_type (int, default=0): 0 for user, 1 for company, 2 for team
- public (bool, default=False): Whether or not to make doc public
Returns:
- requests.Response: Onshape response data
'''
payload = {
'name': name,
'ownerType': owner_type,
'isPublic': public
}
return self._api.request('post', '/api/documents', body=payload)
def rename_document(self, did, name):
'''
Renames the specified document.
Args:
- did (str): Document ID
- name (str): New document name
Returns:
- requests.Response: Onshape response data
'''
payload = {
'name': name
}
return self._api.request('post', '/api/documents/' + did, body=payload)
def del_document(self, did):
'''
Delete the specified document.
Args:
- did (str): Document ID
Returns:
- requests.Response: Onshape response data
'''
return self._api.request('delete', '/api/documents/' + did)
def get_document(self, did):
'''
Get details for a specified document.
Args:
- did (str): Document ID
Returns:
- requests.Response: Onshape response data
'''
return self._api.request('get', '/api/documents/' + did)
def cache_get(self, method, key, callback, isString = False):
if type(key) == tuple:
key = '_'.join(list(key))
fileName = method+'__'+key
dirName = self.get_cache_path()
m = hashlib.sha1()
m.update(fileName.encode('utf-8'))
fileName = m.hexdigest()
fileName = dirName / fileName
if os.path.exists(fileName):
with open(fileName, "rb") as stream:
result = stream.read()
else:
result = callback().content
with open(fileName, 'wb') as stream:
stream.write(result)
if isString and type(result) == bytes:
result = result.decode('utf-8')
return result
def list_documents(self):
'''
Get list of documents for current user.
Returns:
- requests.Response: Onshape response data
'''
return self._api.request('get', '/api/documents')
def list_elements(self, did, wid, type='w'):
'''
Get the list of elements in a given document
'''
return self._api.request('get', '/api/documents/d/'+did+'/'+type+'/'+wid+'/elements')
def create_assembly(self, did, wid, name='My Assembly'):
'''
Creates a new assembly element in the specified document / workspace.
Args:
- did (str): Document ID
- wid (str): Workspace ID
- name (str, default='My Assembly')
Returns:
- requests.Response: Onshape response data
'''
payload = {
'name': name
}
return self._api.request('post', '/api/assemblies/d/' + did + '/w/' + wid, body=payload)
def get_assembly(self, did, wid, eid, type='w', configuration='default'):
return self._api.request('get', '/api/assemblies/d/'+did+'/'+type+'/'+wid+'/e/'+eid, query={'includeMateFeatures': 'true', 'includeMateConnectors': 'true', 'includeNonSolids': 'true', 'configuration': configuration}).json()
def get_features(self, did, wid, eid, type='w'):
'''
Gets the feature list for specified document / workspace / part studio.
Args:
- did (str): Document ID
- wid (str): Workspace ID
- eid (str): Element ID
Returns:
- requests.Response: Onshape response data
'''
return self._api.request('get', '/api/assemblies/d/' + did + '/'+type+'/' + wid + '/e/' + eid + '/features').json()
def get_assembly_features(self, did, wid, eid):
'''
Gets the feature list for specified document / workspace / part studio.
Args:
- did (str): Document ID
- wid (str): Workspace ID
- eid (str): Element ID
Returns:
- requests.Response: Onshape response data
'''
return self._api.request('get', '/api/assemblies/d/' + did + '/w/' + wid + '/e/' + eid + '/features')
def get_partstudio_tessellatededges(self, did, wid, eid):
'''
Gets the tessellation of the edges of all parts in a part studio.
Args:
- did (str): Document ID
- wid (str): Workspace ID
- eid (str): Element ID
Returns:
- requests.Response: Onshape response data
'''
return self._api.request('get', '/api/partstudios/d/' + did + '/w/' + wid + '/e/' + eid + '/tessellatededges')
def upload_blob(self, did, wid, filepath='./blob.json'):
'''
Uploads a file to a new blob element in the specified doc.
Args:
- did (str): Document ID
- wid (str): Workspace ID
- filepath (str, default='./blob.json'): Blob element location
Returns:
- requests.Response: Onshape response data
'''
chars = string.ascii_letters + string.digits
boundary_key = ''.join(random.choice(chars) for i in range(8))
mimetype = mimetypes.guess_type(filepath)[0]
encoded_filename = os.path.basename(filepath)
file_content_length = str(os.path.getsize(filepath))
with open(filepath, "r", encoding="utf-8") as stream:
blob = stream.read()
req_headers = {
'Content-Type': 'multipart/form-data; boundary="%s"' % boundary_key
}
# build request body
payload = '--' + boundary_key + '\r\nContent-Disposition: form-data; name="encodedFilename"\r\n\r\n' + encoded_filename + '\r\n'
payload += '--' + boundary_key + '\r\nContent-Disposition: form-data; name="fileContentLength"\r\n\r\n' + file_content_length + '\r\n'
payload += '--' + boundary_key + '\r\nContent-Disposition: form-data; name="file"; filename="' + encoded_filename + '"\r\n'
payload += 'Content-Type: ' + mimetype + '\r\n\r\n'
payload += blob
payload += '\r\n--' + boundary_key + '--'
return self._api.request('post', '/api/blobelements/d/' + did + '/w/' + wid, headers=req_headers, body=payload)
def part_studio_stl(self, did, wid, eid):
'''
Exports STL export from a part studio
Args:
- did (str): Document ID
- wid (str): Workspace ID
- eid (str): Element ID
Returns:
- requests.Response: Onshape response data
'''
req_headers = {
'Accept': '*/*'
}
return self._api.request('get', '/api/partstudios/d/' + did + '/w/' + wid + '/e/' + eid + '/stl', headers=req_headers)
def hash_partid(self, data):
m = hashlib.sha1()
m.update(data.encode('utf-8'))
return m.hexdigest()
def get_sketches(self, did, mid, eid, configuration):
def invoke():
return self._api.request('get', '/api/partstudios/d/' + did + '/m/' + mid + '/e/' + eid + '/sketches',
query={'includeGeometry': 'true', 'configuration': configuration})
return json.loads(self.cache_get('sketches', (did, mid, eid, configuration), invoke))
def get_parts(self, did, mid, eid, configuration):
def invoke():
return self._api.request('get', '/api/parts/d/' + did + '/m/' + mid + '/e/' + eid, query=
{'configuration': configuration})
return json.loads(self.cache_get('parts_list', (did, mid, eid, configuration), invoke))
def find_new_partid(self, did, mid, eid, partid, configuration_before, configuration):
before = self.get_parts(did, mid, eid, configuration_before)
name = None
for entry in before:
if entry['partId'] == partid:
name = entry['name']
if name is not None:
after = self.get_parts(did, mid, eid, configuration)
for entry in after:
if entry['name'] == name:
return entry['partId']
else:
print("OnShape ERROR: Can't find new partid for "+str(partid))
return partid
def part_studio_stl_m(self, did, mid, eid, partid, configuration = 'default'):
if self.useCollisionsConfigurations:
configuration_before = configuration
parts = configuration.split(';')
partIdChanged = False
result = ''
for k, part in enumerate(parts):
kv = part.split('=')
if len(kv) == 2:
if kv[0] == 'collisions':
kv[1] = 'true'
partIdChanged = True
parts[k] = '='.join(kv)
configuration = ';'.join(parts)
if partIdChanged:
partid = self.find_new_partid(did, mid, eid, partid, configuration_before, configuration)
def invoke():
req_headers = {
'Accept': '*/*'
}
return self._api.request('get', '/api/parts/d/' + did + '/m/' + mid + '/e/' + eid + '/partid/'+escape_slash(partid)+'/stl', query={'mode': 'binary', 'units': 'meter', 'configuration': configuration}, headers=req_headers)
return self.cache_get('part_stl', (did, mid, eid, self.hash_partid(partid), configuration), invoke)
def part_get_metadata(self, did, mid, eid, partid, configuration = 'default'):
def invoke():
return self._api.request('get', '/api/metadata/d/' + did + '/m/' + mid + '/e/' + eid + '/p/'+escape_slash(partid), query={'configuration': configuration})
return json.loads(self.cache_get('metadata', (did, mid, eid, self.hash_partid(partid), configuration), invoke, True))
def part_mass_properties(self, did, mid, eid, partid, configuration = 'default'):
def invoke():
return self._api.request('get', '/api/parts/d/' + did + '/m/' + mid + '/e/' + eid + '/partid/'+escape_slash(partid)+'/massproperties', query={'configuration': configuration, 'useMassPropertyOverrides': True})
return json.loads(self.cache_get('massproperties', (did, mid, eid, self.hash_partid(partid), configuration), invoke, True))
|
dd574a9c8ec92fac0c5b584c569015bd44f6aa46
|
f80ef3a3cf859b13e8af8433af549b6b1043bf6e
|
/pyobjc-framework-Cocoa/PyObjCTest/test_nsjsonserialization.py
|
da59cc80518031a52cac618cac221976972a73a7
|
[
"MIT"
] |
permissive
|
ronaldoussoren/pyobjc
|
29dc9ca0af838a56105a9ddd62fb38ec415f0b86
|
77b98382e52818690449111cd2e23cd469b53cf5
|
refs/heads/master
| 2023-09-01T05:15:21.814504
| 2023-06-13T20:00:17
| 2023-06-13T20:00:17
| 243,933,900
| 439
| 49
| null | 2023-06-25T02:49:07
| 2020-02-29T08:43:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,730
|
py
|
test_nsjsonserialization.py
|
import Foundation
from PyObjCTools.TestSupport import TestCase, min_os_level
class TestNSJSONSerialization(TestCase):
def test_enum_types(self):
self.assertIsEnumType(Foundation.NSJSONReadingOptions)
self.assertIsEnumType(Foundation.NSJSONWritingOptions)
@min_os_level("10.7")
def testConstants10_7(self):
self.assertEqual(Foundation.NSJSONReadingMutableContainers, (1 << 0))
self.assertEqual(Foundation.NSJSONReadingMutableLeaves, (1 << 1))
self.assertEqual(Foundation.NSJSONReadingAllowFragments, (1 << 2))
self.assertEqual(Foundation.NSJSONReadingFragmentsAllowed, (1 << 2))
self.assertEqual(Foundation.NSJSONReadingJSON5Allowed, (1 << 3))
self.assertEqual(Foundation.NSJSONReadingTopLevelDictionaryAssumed, (1 << 4))
self.assertEqual(Foundation.NSJSONWritingPrettyPrinted, (1 << 0))
self.assertEqual(Foundation.NSJSONWritingSortedKeys, (1 << 1))
self.assertEqual(Foundation.NSJSONWritingFragmentsAllowed, (1 << 2))
self.assertEqual(Foundation.NSJSONWritingWithoutEscapingSlashes, (1 << 3))
@min_os_level("10.7")
def testMethod10_7(self):
self.assertResultIsBOOL(Foundation.NSJSONSerialization.isValidJSONObject_)
self.assertArgIsOut(
Foundation.NSJSONSerialization.dataWithJSONObject_options_error_, 2
)
self.assertArgIsOut(
Foundation.NSJSONSerialization.JSONObjectWithData_options_error_, 2
)
self.assertArgIsOut(
Foundation.NSJSONSerialization.writeJSONObject_toStream_options_error_, 3
)
self.assertArgIsOut(
Foundation.NSJSONSerialization.JSONObjectWithStream_options_error_, 2
)
|
c954a05343bfd4e6a605db9d5c25f50ea0200107
|
5095200e9ca55cd3a37af34ed44448c02e2a1bb5
|
/modules/image/Image_editing/super_resolution/dcscn/processor.py
|
04ac460e5397339fa03d21e5d85bfd2d0bb58d10
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/PaddleHub
|
8712603ef486c45e83eb0bc5725b0b3ed3ddbbde
|
b402610a6f0b382a978e82473b541ea1fc6cf09a
|
refs/heads/develop
| 2023-07-24T06:03:13.172978
| 2023-03-28T11:49:55
| 2023-03-28T11:49:55
| 162,672,577
| 12,914
| 2,239
|
Apache-2.0
| 2023-07-06T21:38:19
| 2018-12-21T06:00:48
|
Python
|
UTF-8
|
Python
| false
| false
| 2,546
|
py
|
processor.py
|
# -*- coding:utf-8 -*-
import os
import time
import base64
import cv2
import numpy as np
__all__ = ['cv2_to_base64', 'base64_to_cv2', 'postprocess']
def cv2_to_base64(image):
data = cv2.imencode('.jpg', image)[1]
return base64.b64encode(data.tostring()).decode('utf8')
def base64_to_cv2(b64str):
data = base64.b64decode(b64str.encode('utf8'))
data = np.fromstring(data, np.uint8)
data = cv2.imdecode(data, cv2.IMREAD_COLOR)
return data
def postprocess(data_out, org_im, org_im_shape, org_im_path, output_dir, visualization):
"""
Postprocess output of network. one image at a time.
Args:
data_out (numpy.ndarray): output of network.
org_im (numpy.ndarray): original image.
org_im_shape (list): shape pf original image.
org_im_path (list): path of riginal image.
output_dir (str): output directory to store image.
visualization (bool): whether to save image or not.
Returns:
result (dict): The data of processed image.
"""
result = dict()
for sr in data_out:
sr = np.squeeze(sr, 0)
sr = np.clip(sr * 255, 0, 255)
sr = sr.astype(np.uint8)
shape = sr.shape
if visualization:
org_im = cv2.cvtColor(org_im, cv2.COLOR_BGR2YUV)
uv = cv2.resize(org_im[..., 1:], (shape[1], shape[0]), interpolation=cv2.INTER_CUBIC)
combine_im = cv2.cvtColor(np.concatenate((sr, uv), axis=2), cv2.COLOR_YUV2BGR)
check_dir(output_dir)
save_im_path = get_save_image_name(org_im, org_im_path, output_dir)
cv2.imwrite(save_im_path, combine_im)
print("save image at: ", save_im_path)
result['save_path'] = save_im_path
result['data'] = sr
else:
result['data'] = sr
return result
def check_dir(dir_path):
if not os.path.exists(dir_path):
os.makedirs(dir_path)
elif os.path.isfile(dir_path):
os.remove(dir_path)
os.makedirs(dir_path)
def get_save_image_name(org_im, org_im_path, output_dir):
"""
Get save image name from source image path.
"""
# name prefix of orginal image
org_im_name = os.path.split(org_im_path)[-1]
im_prefix = os.path.splitext(org_im_name)[0]
ext = '.png'
# save image path
save_im_path = os.path.join(output_dir, im_prefix + ext)
if os.path.exists(save_im_path):
save_im_path = os.path.join(output_dir, im_prefix + 'time={}'.format(int(time.time())) + ext)
return save_im_path
|
ec4665989bef7c098b518981e4909a9b28eb6138
|
81b450ffc4dcbb09272e24d938e8bf6ee911f5c9
|
/examples/deadline/All-In-AWS-Infrastructure-Basic/python/package/lib/storage_tier.py
|
22600a7c2095e4cb2c8f0f64bf9d52ee904006a1
|
[
"SSPL-1.0",
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
aws/aws-rfdk
|
2e3968ced5f64b77ca5f801477cb4fdb2c66f9a0
|
a875bc0804c74479e717aa3846c5d290b2ec416a
|
refs/heads/mainline
| 2023-09-01T02:43:20.983370
| 2023-08-18T19:05:30
| 2023-08-18T19:05:30
| 274,453,874
| 102
| 40
|
Apache-2.0
| 2023-09-12T14:31:35
| 2020-06-23T16:20:36
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 18,706
|
py
|
storage_tier.py
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import json
from dataclasses import dataclass
from typing import Optional
from aws_cdk import (
Duration,
RemovalPolicy,
Size,
Stack,
StackProps
)
from aws_cdk.aws_cloudwatch import (
ComparisonOperator,
Metric,
TreatMissingData
)
from aws_cdk.aws_cloudwatch_actions import (
SnsAction
)
from aws_cdk.aws_docdb import (
BackupProps,
DatabaseCluster,
Login
)
from aws_cdk.aws_ec2 import (
InstanceType,
IVpc,
SubnetSelection
)
from aws_cdk.aws_efs import (
AccessPoint,
Acl,
FileSystem,
PosixUser
)
from aws_cdk.aws_iam import (
ServicePrincipal
)
from aws_cdk.aws_kms import (
Key
)
from aws_cdk.aws_route53 import (
IPrivateHostedZone
)
from aws_cdk.aws_sns import (
Topic
)
from aws_cdk.aws_sns_subscriptions import (
EmailSubscription
)
from aws_rfdk import (
MongoDbUsers,
MongoDbX509User,
DistinguishedName,
MongoDbInstance,
MongoDbApplicationProps,
MongoDbPostInstallSetup,
MongoDbSsplLicenseAcceptance,
MongoDbVersion,
MountableEfs,
PadEfsStorage,
X509CertificatePem,
X509CertificatePkcs12
)
from aws_rfdk.deadline import (
DatabaseConnection
)
from constructs import (
Construct
)
from . import subnets
@dataclass
class StorageTierProps(StackProps):
"""
Properties for StorageTier
"""
# The VPC to deploy resources into.
vpc: IVpc
# Email address to send alerts to when CloudWatch Alarms breach. If not specified, no alarms or alerts will be
# deployed.
alarm_email: Optional[str]
class StorageTier(Stack):
"""
The storage tier of the render farm.
This stack contains all constructs that persist data which would be useful to keep between deployments.
There should little to no "business-logic" constructs in this stack.
"""
def __init__(self, scope: Construct, stack_id: str, *, props: StorageTierProps, **kwargs):
"""
Initializes a new instance of StorageTier
:param scope: The scope of this construct.
:param stack_id: The ID of this construct.
:param props: The properties for the storage tier.
:param kwargs: Any kwargs that need to be passed on to the parent class.
"""
super().__init__(scope, stack_id, **kwargs)
# The file-system to use (e.g. to install Deadline Repository onto).
file_system = FileSystem(
self,
'EfsFileSystem',
vpc=props.vpc,
vpc_subnets=SubnetSelection(
subnet_group_name=subnets.INFRASTRUCTURE.name
),
encrypted=True,
# TODO - Evaluate this removal policy for your own needs. This is set to DESTROY to
# cleanly remove everything when this stack is destroyed. If you would like to ensure
# that your data is not accidentally deleted, you should modify this value.
removal_policy=RemovalPolicy.DESTROY
)
# Create an EFS access point that is used to grant the Repository and RenderQueue with write access to the
# Deadline Repository directory in the EFS file-system.
access_point = AccessPoint(
self,
'AccessPoint',
file_system=file_system,
# The AccessPoint will create the directory (denoted by the path property below) if it doesn't exist with
# the owning UID/GID set as specified here. These should be set up to grant read and write access to the
# UID/GID configured in the "poxis_user" property below.
create_acl=Acl(
owner_uid='10000',
owner_gid='10000',
permissions='750',
),
# When you mount the EFS via the access point, the mount will be rooted at this path in the EFS file-system
path='/DeadlineRepository',
# TODO - When you mount the EFS via the access point, all file-system operations will be performed using
# these UID/GID values instead of those from the user on the system where the EFS is mounted. If you intend
# to use the same EFS file-system for other purposes (e.g. render assets, plug-in storage), you may want to
# evaluate the UID/GID permissions based on your requirements.
posix_user=PosixUser(
uid='10000',
gid='10000'
)
)
self.mountable_file_system = MountableEfs(
self,
filesystem=file_system,
access_point=access_point,
# We have enable_local_file_caching set to True on the RenderQueue in the
# Service Tier. EFS requires the 'fsc' mount option to take advantage of
# that.
extra_mount_options=['fsc']
)
# The database to connect Deadline to.
self.database: Optional[DatabaseConnection] = None
# The Amazon EFS filesystem deployed above has been deployed in bursting throughput
# mode. This means that it can burst throughput up to 100 MiB/s (with reads counting as
# 1/3 of their actual throughput for this purpose). However, the baseline throughput of the EFS
# is 50 KiB/s per 1 GiB stored in the filesystem and exceeding this throughput consumes burst credits.
# An EFS starts with a large amount of burst credits, and regains credits when throughput is below
# the baseline throughput threshold.
#
# The Deadline Repository is approximately 1 GiB in size; resulting in 50 KiB/s baseline throughput, which is
# not sufficient for the operation of Deadline.
#
# The following:
# 1) Sets up a series of AWS CloudWatch Alarms that will send you an email to alert you to take action
# to increase the data stored in the filesystem when the burst credits have decreased below certain thresholds.
# If you run out of burst credits on the filesystem, then Deadline will start timing-out on requests and your
# render farm may become unstable.
# 2) Uses RFDK's PadEfsStorage construct to add data to the EFS for the purpose of increasing the amount
# of stored data to increase the baseline throughput.
#
# See: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html
# for more information on AWS CloudWatch Alarms.
# See: https://docs.aws.amazon.com/efs/latest/ug/performance.html#throughput-modes
# for more information on Amazon EFS throughput modes.
if props.alarm_email:
self.add_low_efs_burst_credit_alarms(file_system, props.alarm_email)
# Add padding files to the filesystem to increase baseline throughput. We add files to the filesystem to
# increase this baseline throughput, while retaining the ability to burst throughput. See RFDK's PadEfsStorage
# documentation for additional details.
pad_access_point = AccessPoint(
self,
'PaddingAccessPoint',
file_system=file_system,
path='/RFDK_PaddingFiles',
# TODO - We set the padding files to be owned by root (uid/gid = 0) by default. You may wish to change this.
create_acl=Acl(
owner_gid='0',
owner_uid='0',
permissions='700',
),
posix_user=PosixUser(
uid='0',
gid='0',
),
)
PadEfsStorage(
self,
'PadEfsStorage',
vpc=props.vpc,
vpc_subnets=SubnetSelection(
subnet_group_name=subnets.INFRASTRUCTURE.name
),
access_point=pad_access_point,
desired_padding=Size.gibibytes(40), # Provides 2 MiB/s of baseline throughput. Costs $12/month.
)
def add_low_efs_burst_credit_alarms(self, filesystem: FileSystem, email_address: str) -> None:
'''
Set up CloudWatch Alarms that will warn when the given filesystem's burst credits are below
four different thresholds. We send an email to the given address when an Alarm breaches.
'''
# Set up the SNS Topic that will send the emails.
# ====================
# 1) KMS key to use to encrypt events within the SNS Topic. The Key is optional
key = Key(
self,
'SNSEncryptionKey',
description='Used to encrypt the SNS Topic for sending EFS Burst Credit alerts',
enable_key_rotation=True,
removal_policy=RemovalPolicy.DESTROY
)
key.grant(ServicePrincipal('cloudwatch.amazonaws.com'), 'kms:Decrypt', 'kms:GenerateDataKey')
# 2) SNS Topic that will be alerted by CloudWatch and will send the email in response.
sns_topic = Topic(
self,
'BurstAlertEmailTopic',
master_key=key
)
sns_topic.grant_publish(ServicePrincipal('cloudwatch.amazonaws.com'))
sns_topic.add_subscription(EmailSubscription(email_address))
alarm_action = SnsAction(sns_topic)
# Set up the CloudWatch Alarm(s) and have them trigger SNS events when breached.
# ======================
# 1) CDK helper to define the CloudWatch Metric that we're interested in.
burst_credits_metric = Metric(
metric_name='BurstCreditBalance',
namespace='AWS/EFS',
dimensions_map={
"FileSystemId": filesystem.file_system_id
},
# One 99-th percentile data point sample every hour
period=Duration.hours(1),
statistic='p99'
)
# 2) Create the alarms
thresholds = [
{
"id": 'CAUTION-EfsBurstCredits',
"name": f"CAUTION Burst Credits - {filesystem.file_system_id}",
"threshold": int(2.00 * 2**40),
"message": f"CAUTION. 2 TiB Threshold Breached: EFS {filesystem.file_system_id} is depleting burst credits. Add data to the EFS to increase baseline throughput.",
# Alarm after 6 datapoints below threshold. We have 1 datapoint every hour. So, we alarm if below threshold for 6hrs
"datapoints": 6
},
{
"id": 'WARNING-EfsBurstCredits',
"name": f"WARNING Burst Credits - {filesystem.file_system_id}",
"threshold": int(1.25 * 2**40),
"message": f"WARNING. 1.25 TiB Threshold Breached: EFS {filesystem.file_system_id} is depleting burst credits. Add data to the EFS to increase baseline throughput.",
# Alarm after 6 datapoints below threshold. We have 1 datapoint every hour. So, we alarm if below threshold for 6hrs
"datapoints": 6
},
{
"id": 'ALERT-EfsBurstCredits',
"name": f"ALERT Burst Credits - {filesystem.file_system_id}",
"threshold": int(0.50 * 2**40),
"message": f"ALERT! 500 GiB Threshold Breached: EFS {filesystem.file_system_id} is running out of burst credits. Add data to the EFS to increase baseline throughput or else the Render Farm may cease operation.",
# Alarm after 6 datapoints below threshold. We have 1 datapoint every hour. So, we alarm if below threshold for 6hrs
"datapoints": 6
},
{
"id": 'EMERGENCY-EfsBurstCredits',
"name": f"EMERGENCY Burst Credits - {filesystem.file_system_id}",
"threshold": int(0.10 * 2**40),
"message": f"EMERGENCY! 100 GiB Threshold Breached: EFS {filesystem.file_system_id} is running out of burst credits. Add data to the EFS to increase baseline throughput or else the Render Farm will cease operation.",
# Alarm after 2 datapoints below threshold. We have 1 datapoint every hour. So, we alarm if below threshold for 2hrs
"datapoints": 2
},
]
for config in thresholds:
alarm = burst_credits_metric.create_alarm(
self,
config['id'],
alarm_name=config['name'],
actions_enabled=True,
alarm_description=config['message'],
treat_missing_data=TreatMissingData.NOT_BREACHING,
threshold=config['threshold'],
comparison_operator=ComparisonOperator.LESS_THAN_THRESHOLD,
evaluation_periods=config['datapoints']
)
alarm.add_alarm_action(alarm_action)
@dataclass
class StorageTierDocDBProps(StorageTierProps):
"""
Properties for StorageTierDocDB.
"""
# The InstanceType for DocDB.
database_instance_type: InstanceType
class StorageTierDocDB(StorageTier):
"""
An implementation of StorageTier that is backed by DocumentDB.
"""
def __init__(self, scope: Construct, stack_id: str, *, props: StorageTierDocDBProps, **kwargs):
"""
Initializes a new instance of StorageTier
:param scope: The scope of this construct.
:param stack_id: the ID of this construct.
:param props: The properties for the storage tier.
:param kwargs: Any kwargs that need to be passed on to the parent class.
"""
super().__init__(scope, stack_id, props=props, **kwargs)
doc_db = DatabaseCluster(
self,
'DocDBCluster',
vpc=props.vpc,
vpc_subnets=SubnetSelection(
subnet_group_name=subnets.INFRASTRUCTURE.name
),
instance_type=props.database_instance_type,
# TODO - For cost considerations this example only uses 1 Database instance.
# It is recommended that when creating your render farm you use at least 2 instances for redundancy.
instances=1,
master_user=Login(username='adminuser'),
engine_version='3.6.0',
backup=BackupProps(
# We recommend setting the retention of your backups to 15 days
# for security reasons. The default retention is just one day.
# Please note that changing this value will affect cost.
retention=Duration.days(15)
),
# TODO - Evaluate this removal policy for your own needs. This is set to DESTROY to
# cleanly remove everything when this stack is destroyed. If you would like to ensure
# that your data is not accidentally deleted, you should modify this value.
removal_policy=RemovalPolicy.DESTROY
)
self.database = DatabaseConnection.for_doc_db(
database=doc_db,
login=doc_db.secret
)
@dataclass
class StorageTierMongoDBProps(StorageTierProps):
"""
Properties for StorageTierMongoDB
"""
# The InstanceType for MongoDB.
database_instance_type: InstanceType
# Self-signed root CA to sign server certificate with.
root_ca: X509CertificatePem
# Internal DNS zone for the VPC.
dns_zone: IPrivateHostedZone
# Whether the SSPL license is accepted or not.
accept_sspl_license: MongoDbSsplLicenseAcceptance
# The name of the EC2 keypair to associate with the MongoDB instance.
key_pair_name: Optional[str]
class StorageTierMongoDB(StorageTier):
"""
An implementation of StorageTier that is backed by MongoDB.
"""
def __init__(self, scope: Construct, stack_id: str, *, props: StorageTierMongoDBProps, **kwargs):
"""
Initialize a new instance of StorageTierMongoDB
:param scope: The scope of this construct.
:param stack_id: The ID of this construct.
:param props: The properties for this construct.
:param kwargs: Any kwargs that need to be passed on to the parent class.
"""
super().__init__(scope, stack_id, props=props, **kwargs)
server_cert = X509CertificatePem(
self,
'MongoCert',
subject=DistinguishedName(
cn=f'mongo.{props.dns_zone.zone_name}',
o='RFDK-Sample',
ou='MongoServer'
),
signing_certificate=props.root_ca
)
client_cert = X509CertificatePem(
self,
'DeadlineMongoCert',
subject=DistinguishedName(
cn='SampleUser',
o='RFDK-Sample',
ou='MongoClient'
),
signing_certificate=props.root_ca
)
client_pkcs12 = X509CertificatePkcs12(
self,
'DeadlineMongoPkcs12',
source_certificate=client_cert
)
availability_zone = props.vpc.availability_zones[0]
mongo_vpc_subnet = SubnetSelection(
subnet_group_name=subnets.INFRASTRUCTURE.name,
availability_zones=[availability_zone]
)
mongo_db = MongoDbInstance(
self,
'MongoDb',
vpc=props.vpc,
vpc_subnets=mongo_vpc_subnet,
key_name=props.key_pair_name,
instance_type=props.database_instance_type,
mongo_db=MongoDbApplicationProps(
user_sspl_acceptance=props.accept_sspl_license,
version=MongoDbVersion.COMMUNITY_3_6,
hostname='mongo',
dns_zone=props.dns_zone,
server_certificate=server_cert
)
)
_mongo_db_post_install_setup = MongoDbPostInstallSetup(
self,
'MongoDbPostInstall',
vpc=props.vpc,
vpc_subnets=mongo_vpc_subnet,
mongo_db=mongo_db,
users=MongoDbUsers(
x509_auth_users=[
MongoDbX509User(
certificate=client_cert.cert,
roles=json.dumps([
{
'role': 'readWriteAnyDatabase',
'db': 'admin'
},
{
'role': 'clusterMonitor',
'db': 'admin'
}
])
)
]
)
)
self.database = DatabaseConnection.for_mongo_db_instance(
database=mongo_db,
client_certificate=client_pkcs12
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.