blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
55188ab2b54ddddd6385389a4369c974fd6e31f7
|
ae31542273a142210a1ff30fb76ed9d45d38eba9
|
/gpMgmt/bin/gppylib/commands/test/unit/test_unit_base.py
|
6de0f145edf40b5e71bb4537cc30fd6a5fab27bb
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"PostgreSQL",
"OpenSSL",
"LicenseRef-scancode-stream-benchmark",
"ISC",
"LicenseRef-scancode-openssl",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-ssleay-windows",
"BSD-2-Clause",
"Python-2.0"
] |
permissive
|
greenplum-db/gpdb
|
8334837bceb2d5d51a684500793d11b190117c6a
|
2c0f8f0fb24a2d7a7da114dc80f5f5a2712fca50
|
refs/heads/main
| 2023-08-22T02:03:03.806269
| 2023-08-21T22:59:53
| 2023-08-22T01:17:10
| 44,781,140
| 6,417
| 2,082
|
Apache-2.0
| 2023-09-14T20:33:42
| 2015-10-23T00:25:17
|
C
|
UTF-8
|
Python
| false
| false
| 2,814
|
py
|
test_unit_base.py
|
#!/usr/bin/env python3
#
# Copyright (c) Greenplum Inc 2012. All Rights Reserved.
#
import unittest
from mock import call, Mock, patch, ANY
from gppylib.commands.base import Command, WorkerPool, RemoteExecutionContext, GPHOME, LocalExecutionContext
class WorkerPoolTestCase(unittest.TestCase):
def tearDown(self):
Command.propagate_env_map.clear()
def test_RemoteExecutionContext_uses_default_gphome(self):
self.subject = RemoteExecutionContext("myhost", "my_stdin")
cmd = Command("dummy name", "echo 'foo'")
self.subject.execute(cmd)
self.assertIn(". %s/greenplum_path.sh;" % GPHOME, cmd.cmdStr)
def test_RemoteExecutionContext_uses_provided_gphome_when_set(self):
self.subject = RemoteExecutionContext(targetHost="myhost", stdin="my_stdin", gphome="other/gphome")
cmd = Command("dummy name", "echo 'foo'")
self.subject.execute(cmd)
self.assertIn(". other/gphome/greenplum_path.sh;", cmd.cmdStr)
def test_LocalExecutionContext_uses_no_environment(self):
self.subject = LocalExecutionContext(None)
cmd = Command('test', cmdStr='ls /tmp')
self.subject.execute(cmd)
self.assertEqual("ls /tmp", cmd.cmdStr)
def test_LocalExecutionContext_uses_ampersand(self):
self.subject = LocalExecutionContext(None)
cmd = Command('test', cmdStr='ls /tmp')
cmd.propagate_env_map['foo'] = 1
self.subject.execute(cmd)
self.assertEqual("foo=1 && ls /tmp", cmd.cmdStr)
def test_LocalExecutionContext_uses_ampersand_multiple(self):
self.subject = LocalExecutionContext(None)
cmd = Command('test', cmdStr='ls /tmp')
cmd.propagate_env_map['foo'] = 1
cmd.propagate_env_map['bar'] = 1
self.subject.execute(cmd)
self.assertEqual("bar=1 && foo=1 && ls /tmp", cmd.cmdStr)
def test_RemoteExecutionContext_uses_ampersand_multiple(self):
self.subject = RemoteExecutionContext('localhost', None, 'gphome')
cmd = Command('test', cmdStr='ls /tmp')
cmd.propagate_env_map['foo'] = 1
cmd.propagate_env_map['bar'] = 1
self.subject.execute(cmd)
self.assertEqual("bar=1 && foo=1 && ssh -o StrictHostKeyChecking=no -o ServerAliveInterval=60 localhost "
"\". gphome/greenplum_path.sh; bar=1 && foo=1 && ls /tmp\"", cmd.cmdStr)
@patch('gppylib.commands.base.Command.get_stderr',
return_value="ssh_exchange_identification: Connection closed by remote host")
def test_RemoteExecutionContext_failed_and_retry(self,mock):
self.subject = RemoteExecutionContext('localhost',None,'gphome' )
cmd = Command('test', cmdStr='ls /tmp')
self.subject.execute(cmd)
self.assertEqual(Command.get_stderr.call_count, 11)
|
efd117eb8a84a1dc413cbdf0652b434997fe5cc2
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/cv/PAMTRI/infer/sdk/eval_mt.py
|
183d26317694b71b86caa4c945fcec89fa21ed5c
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616
| 2023-07-17T11:43:18
| 2023-07-17T11:43:18
| 417,393,380
| 301
| 92
|
Apache-2.0
| 2023-05-17T11:22:28
| 2021-10-15T06:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 15,054
|
py
|
eval_mt.py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
''' eval MultiTaskNet '''
import csv
import ast
import argparse
import os.path as osp
import cv2
import numpy as np
from utils.inference import SdkInfer, infer_test
from utils.transforms import read_image_color, read_image_grayscale, segs, \
Compose_Keypt, to_tensor, normalize_mt_input, Resize_Keypt
def eval_market1501(distmat, q_vids, g_vids, q_camids, g_camids, max_rank=50):
"""Evaluation with Market1501 metrics
Key: for each query identity, its gallery images from the same camera view are discarded.
"""
num_q, num_g = distmat.shape
if num_g < max_rank:
max_rank = num_g
print(f"Note: number of gallery samples is quite small, got {num_g}")
indices = np.argsort(distmat, axis=1)
matches = (g_vids[indices] == q_vids[:, np.newaxis]).astype(np.int32)
# compute cmc curve for each query
all_cmc = []
all_ap = []
num_valid_q = 0. # number of valid query
for q_idx in range(num_q):
# get query vid and camid
q_vid = q_vids[q_idx]
q_camid = q_camids[q_idx]
# remove gallery samples that have the same vid and camid with query
order = indices[q_idx]
remove = (g_vids[order] == q_vid) & (g_camids[order] == q_camid)
keep = np.invert(remove)
# compute cmc curve
# binary vector, positions with value 1 are correct matches
orig_cmc = matches[q_idx][keep]
if not np.any(orig_cmc):
# this condition is true when query identity does not appear in gallery
continue
cmc = orig_cmc.cumsum()
cmc[cmc > 1] = 1
all_cmc.append(cmc[:max_rank])
num_valid_q += 1.
# compute average precision
# https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Average_precision
num_rel = orig_cmc.sum()
tmp_cmc = orig_cmc.cumsum()
tmp_cmc = [x / (i+1.) for i, x in enumerate(tmp_cmc)]
tmp_cmc = np.asarray(tmp_cmc) * orig_cmc
ap_ = tmp_cmc.sum() / num_rel
all_ap.append(ap_)
assert num_valid_q > 0, "Error: all query identities do not appear in gallery"
all_cmc = np.array(all_cmc, dtype=np.float32)
all_cmc = all_cmc.sum(0) / num_valid_q
map_ = np.mean(all_ap)
return all_cmc, map_
def get_mtn_dataset(label_path, imgs_path, desc='query', relabel=True):
''' get img and target value set '''
dataset = []
vid_container = set()
vcolor_container = set()
vtype_container = set()
vcolor2label = {}
vtype2label = {}
with open(osp.join(label_path, f'label_{desc}.csv'), encoding='utf-8') as label_file:
reader = csv.reader(label_file, delimiter=',')
for row in reader:
vid = int(row[1])
vid_container.add(vid)
vcolor = int(row[2])
vcolor_container.add(vcolor)
vtype = int(row[3])
vtype_container.add(vtype)
vkeypt = []
for k in range(36):
vkeypt.extend(
[float(row[4+3*k]), float(row[5+3*k]), float(row[6+3*k])])
# synthetic data do not have camera ID
camid = -1
camidx = row[0].find('c')
if camidx >= 0:
camid = int(row[0][camidx+1:camidx+4])
dataset.append([osp.join(imgs_path, f'image_{desc}', row[0]),
vid, camid, vcolor, vtype, vkeypt,
osp.join(
imgs_path, f'heatmap_{desc}', row[0][:-4]),
osp.join(imgs_path, f'segment_{desc}', row[0][:-4])])
if relabel:
vid2label = {vid: label for label, vid in enumerate(vid_container)}
vcolor2label = {vcolor: label for label,
vcolor in enumerate(vcolor_container)}
vtype2label = {vtype: label for label,
vtype in enumerate(vtype_container)}
if desc == 'train':
for v_ in dataset:
v_[1] = vid2label[v_[1]]
v_[3] = vcolor2label[v_[3]]
v_[4] = vtype2label[v_[4]]
return dataset, vcolor2label, vtype2label
transform = Compose_Keypt([
Resize_Keypt((256, 256))
])
def get_mtn_dataitem(dataset, index, heatmapaware=True, segmentaware=True, keyptaware=True):
''' get input data '''
img_chnls = []
img_path, vid, camid, vcolor, vtype, vkeypt, heatmap_dir_path, segment_dir_path = dataset[
index]
img_orig = read_image_color(img_path)
height_orig, width_orig, _ = img_orig.shape
img_b, img_g, img_r = cv2.split(img_orig)
img_chnls.extend([img_r, img_g, img_b])
if heatmapaware:
for h in range(36):
heatmap_path = osp.join(heatmap_dir_path, "%02d.jpg" % h)
heatmap = read_image_grayscale(heatmap_path)
heatmap = cv2.resize(heatmap, dsize=(width_orig, height_orig))
img_chnls.append(heatmap)
if segmentaware:
for s in range(len(segs)):
segment_flag = True
for k in segs[s]:
# conf_thld = 0.5
if vkeypt[k * 3+2] < 0.5:
segment_flag = False
break
if segment_flag:
segment_path = osp.join(segment_dir_path, "%02d.jpg" % s)
segment = read_image_grayscale(segment_path)
segment = cv2.resize(segment, dsize=(width_orig, height_orig))
else:
segment = np.zeros((height_orig, width_orig), np.uint8)
img_chnls.append(segment)
# assert transform is not None
img = np.stack(img_chnls, axis=2)
img = np.array(img, np.float32)
img = transform(img, vkeypt)
img = to_tensor(img)
img = normalize_mt_input(img, heatmapaware, segmentaware)
vkeypt = np.array(vkeypt, np.float32)
# normalize keypt
if keyptaware:
for k in range(vkeypt.size):
if k % 3 == 0:
vkeypt[k] = (vkeypt[k] / float(256)) - 0.5
elif k % 3 == 1:
vkeypt[k] = (vkeypt[k] / float(256)) - 0.5
elif k % 3 == 2:
vkeypt[k] -= 0.5
return img, vid, camid, vcolor, vtype, vkeypt
def eval_multitasknet(imgs_path, label_path, pipline_path, keyptaware=True, multitask=True,
return_distmat=True, batchsize=1, heatmapaware=True, segmentaware=True,
data_from_pn=True):
''' start eval '''
stream = SdkInfer(pipline_path)
stream.init_stream()
mtn_query_dataset, vcolor2label, _ = get_mtn_dataset(
label_path, imgs_path, 'query')
mtn_gallary_dataset, _, _ = get_mtn_dataset(
label_path, imgs_path, 'test', False)
qf = []
q_vids = []
q_camids = []
q_vcolors = []
q_vtypes = []
pred_q_vcolors = []
pred_q_vtypes = []
imgs, vids, camids, vcolors, vtypes, vkeypts = [], [], [], [], [], []
for i in range(len(mtn_query_dataset)):
if not data_from_pn:
img, vid, camid, vcolor, vtype, vkeypt = get_mtn_dataitem(mtn_query_dataset, i,
heatmapaware=heatmapaware,
segmentaware=segmentaware,
keyptaware=keyptaware)
imgs.append(img)
else:
img_path, vid, camid, vcolor, vtype, vkeypt, _, _ = mtn_query_dataset[i]
vids.append(vid)
camids.append(camid)
vcolors.append(vcolor)
vtypes.append(vtype)
vkeypts.append(vkeypt)
if (i + 1) % batchsize != 0:
continue
if not data_from_pn:
_ = stream.send_package_buf(b'MultiTaskNet0', np.array(imgs), 0)
_ = stream.send_package_buf(b'MultiTaskNet0', np.array(vkeypts), 1)
if keyptaware and multitask:
_, output_vcolors, output_vtypes, features = stream.get_result(
b'MultiTaskNet0', 0)
else:
if keyptaware and multitask:
_, output_vcolors, output_vtypes, features = infer_test(
stream, img_path, heatmapaware=heatmapaware, segmentaware=segmentaware)
qf.append(features)
q_vids.extend(vids)
q_camids.extend(camids)
if multitask:
q_vcolors.extend(vcolors)
q_vtypes.extend(vtypes)
pred_q_vcolors.extend(output_vcolors)
pred_q_vtypes.extend(output_vtypes)
imgs, vids, camids, vcolors, vtypes, vkeypts = [], [], [], [], [], []
qf = np.array(qf, dtype=np.float32)
qf.shape = (len(qf), 1024)
# qf = cat(qf) # (1664, 1024)
q_vids = np.asarray(q_vids)
q_camids = np.asarray(q_camids)
if multitask:
q_vcolors = np.asarray(q_vcolors)
q_vtypes = np.asarray(q_vtypes)
pred_q_vcolors = np.asarray(pred_q_vcolors)
pred_q_vtypes = np.asarray(pred_q_vtypes)
print(
f"Extracted features for query set, obtained {qf.shape[0]}-by-{qf.shape[1]} matrix")
gf = []
g_vids = []
g_camids = []
g_vcolors = []
g_vtypes = []
pred_g_vcolors = []
pred_g_vtypes = []
for i in range(len(mtn_gallary_dataset)):
if not data_from_pn:
img, vid, camid, vcolor, vtype, vkeypt = get_mtn_dataitem(mtn_gallary_dataset, i,
heatmapaware=heatmapaware,
segmentaware=segmentaware,
keyptaware=keyptaware)
imgs.append(img)
else:
img_path, vid, camid, vcolor, vtype, vkeypt, _, _ = mtn_gallary_dataset[i]
vids.append(vid)
camids.append(camid)
vcolors.append(vcolor)
vtypes.append(vtype)
vkeypts.append(vkeypt)
if (i + 1) % batchsize != 0:
continue
if not data_from_pn:
_ = stream.send_package_buf(b'MultiTaskNet0', np.array(imgs), 0)
_ = stream.send_package_buf(b'MultiTaskNet0', np.array(vkeypts), 1)
if keyptaware and multitask:
_, output_vcolors, output_vtypes, features = stream.get_result(
b'MultiTaskNet0', 0)
else:
if keyptaware and multitask:
_, output_vcolors, output_vtypes, features = infer_test(
stream, img_path, heatmapaware=heatmapaware, segmentaware=segmentaware)
gf.append(features) # (32, 1024)
g_vids.extend(vids)
g_camids.extend(camids)
if multitask:
g_vcolors.extend(vcolors)
g_vtypes.extend(vtypes)
pred_g_vcolors.extend(output_vcolors)
pred_g_vtypes.extend(output_vtypes)
_, vids, camids, vcolors, vtypes, vkeypts = [], [], [], [], [], []
stream.destroy()
gf = np.array(gf, dtype=np.float32)
gf.shape = (len(gf), 1024)
g_vids = np.asarray(g_vids)
g_camids = np.asarray(g_camids)
if multitask:
g_vcolors = np.asarray(g_vcolors)
g_vtypes = np.asarray(g_vtypes)
pred_g_vcolors = np.asarray(pred_g_vcolors)
pred_g_vtypes = np.asarray(pred_g_vtypes)
print(
f"Extracted features for gallery set, obtained {gf.shape[0]}-by-{gf.shape[1]} matrix")
m, n = qf.shape[0], gf.shape[0]
qf_distmat = qf**2
qf_distmat = np.expand_dims(np.sum(qf_distmat, 1), axis=1)
qf_distmat = np.broadcast_to(qf_distmat, (m, n))
gf_distmat = gf**2
gf_distmat = np.expand_dims(np.sum(gf_distmat, 1), axis=1)
gf_distmat = np.broadcast_to(gf_distmat, (n, m))
gf_distmat = gf_distmat.T
distmat = qf_distmat + gf_distmat
distmat = distmat*1 + np.matmul(qf, gf.T)*(-2)
print("Computing CMC and mAP")
cmc, map_ = eval_market1501(distmat, q_vids, g_vids, q_camids, g_camids, 50)
print("Results ----------")
print(f"mAP: {map_:8.2%}")
print("CMC curve")
for r in range(1, 51):
print(f"Rank-{r:<3}: {cmc[r-1]:8.2%}")
print("------------------")
if multitask:
print("Compute attribute classification accuracy")
for q in range(q_vcolors.size):
q_vcolors[q] = vcolor2label[q_vcolors[q]]
for g in range(g_vcolors.size):
g_vcolors[g] = vcolor2label[g_vcolors[g]]
q_vcolor_errors = np.argmax(pred_q_vcolors, axis=1) - q_vcolors
g_vcolor_errors = np.argmax(pred_g_vcolors, axis=1) - g_vcolors
vcolor_error_num = np.count_nonzero(
q_vcolor_errors) + np.count_nonzero(g_vcolor_errors)
vcolor_accuracy = 1.0 - \
(float(vcolor_error_num) /
float(distmat.shape[0] + distmat.shape[1]))
print(
f"Color classification accuracy: {vcolor_accuracy:8.2%}".format())
for q_ in range(q_vtypes.size):
q_vtypes[q_] = vcolor2label[q_vtypes[q_]]
for g_ in range(g_vtypes.size):
g_vtypes[g_] = vcolor2label[g_vtypes[g_]]
q_vtype_errors = np.argmax(pred_q_vtypes, axis=1) - q_vtypes
g_vtype_errors = np.argmax(pred_g_vtypes, axis=1) - g_vtypes
vtype_error_num = np.count_nonzero(
q_vtype_errors) + np.count_nonzero(g_vtype_errors)
vtype_accuracy = 1.0 - (float(vtype_error_num) /
float(distmat.shape[0] + distmat.shape[1]))
print(f"Type classification accuracy: {vtype_accuracy:8.2%}")
print("------------------")
if return_distmat:
return distmat
return cmc[0]
parser = argparse.ArgumentParser(description='Eval MultiTaskNet')
parser.add_argument('--img_path', type=str, default='../data/MultiTaskNet/veri')
parser.add_argument('--label_path', type=str, default='../data/MultiTaskNet/veri')
parser.add_argument('--pipline_path', type=str, default='../pipline/pamtri.pipline')
parser.add_argument('--segmentaware', type=ast.literal_eval, default=True)
parser.add_argument('--heatmapaware', type=ast.literal_eval, default=False)
args = parser.parse_args()
if __name__ == '__main__':
eval_multitasknet(args.img_path, args.label_path, args.pipline_path,
segmentaware=args.segmentaware, heatmapaware=args.heatmapaware)
|
c26d15be5b2bfad8ee3890ffc2733f31f238e36b
|
c43b5835b4499f4e6d6fa4efda9546dc67ae0767
|
/sfepy/examples/diffusion/laplace_iga_interactive.py
|
10b0a27cf3b685e95d4422282d684604421525be
|
[
"BSD-3-Clause"
] |
permissive
|
sfepy/sfepy
|
4b74e7839b5e7b5e8d90e19ab6e90a068fe33df4
|
0c2d1690e764b601b2687be1e4261b82207ca366
|
refs/heads/master
| 2023-09-04T22:07:28.041123
| 2023-08-28T14:47:50
| 2023-08-28T14:47:50
| 802,525
| 651
| 188
|
BSD-3-Clause
| 2023-09-12T07:28:19
| 2010-07-28T09:14:41
|
Python
|
UTF-8
|
Python
| false
| false
| 9,736
|
py
|
laplace_iga_interactive.py
|
#!/usr/bin/env python
r"""
Laplace equation with Dirichlet boundary conditions solved in a single patch
NURBS domain using the isogeometric analysis (IGA) approach, using commands
for interactive use.
This script allows the creation of a customisable NURBS surface using igakit
built-in CAD routines, which is then saved in custom HDF5-based files with
.iga extension.
Notes
-----
The ``create_patch`` function creates a NURBS-patch of the area between two
coplanar nested circles using igakit CAD built-in routines. The created patch
is not connected in the orthoradial direction. This is a problem when the
disconnected boundary is not perpendicular to the line connecting the two
centres of the circles, as the solution then exhibits a discontinuity along
this line. A workaround for this issue is to enforce perpendicularity by
changing the start angle in function ``igakit.cad.circle`` (see the code down
below for the actual trick). The discontinuity disappears.
Usage Examples
--------------
Default options, storing results in this file's parent directory::
python3 sfepy/examples/diffusion/laplace_iga_interactive.py
Command line options for tweaking the geometry of the NURBS-patch & more::
python3 sfepy/examples/diffusion/laplace_iga_interactive.py --R1=0.7 --C2=0.1,0.1 --viewpatch
View the results using::
sfepy-view concentric_circles.vtk
"""
from argparse import RawDescriptionHelpFormatter, ArgumentParser
import os
import sys
sys.path.append('.')
import numpy as nm
from sfepy import data_dir
from sfepy.base.ioutils import ensure_path
from sfepy.base.base import IndexedStruct
from sfepy.discrete import (FieldVariable, Integral, Equation,Equations,
Problem)
from sfepy.discrete.iga.domain import IGDomain
from sfepy.discrete.common.fields import Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
def create_patch(R1, R2, C1, C2, order=2, viewpatch=False):
"""
Create a single 2d NURBS-patch of the area between two coplanar nested
circles using igakit.
Parameters
----------
R1 : float
Radius of the inner circle.
R2 : float
Radius of the outer circle.
C1 : list of two floats
Coordinates of the center of the inner circle given as [x1, y1].
C2 : list of two floats
Coordinates of the center of the outer circle given as [x2, y2].
order : int, optional
Degree of the NURBS basis functions. The default is 2.
viewpatch : bool, optional
When set to True, display the NURBS patch. The default is False.
Returns
-------
None.
"""
from sfepy.discrete.iga.domain_generators import create_from_igakit
import sfepy.discrete.iga.io as io
from igakit.cad import circle, ruled
from igakit.plot import plt as iplt
from numpy import pi
# Assert the inner circle is inside the outer one
inter_centers = nm.sqrt((C2[0]-C1[0])**2 + (C2[1]-C1[1])**2)
assert R2>R1, "Outer circle should have a larger radius than the inner one"
assert inter_centers<R2-R1, "Circles are not nested"
# Geometry Creation
centers_direction = [C2[0]-C1[0], C2[1]-C1[1]]
if centers_direction[0]==0 and centers_direction[1]==0:
start_angle = 0.0
else:
start_angle = nm.arctan2(centers_direction[1], centers_direction[0])
c1 = circle(radius=R1, center=C1, angle=(start_angle, start_angle + 2*pi))
c2 = circle(radius=R2, center=C2, angle=(start_angle, start_angle + 2*pi))
srf = ruled(c1,c2).transpose() # make the radial direction first
# Refinement
insert_U = insert_uniformly(srf.knots[0], 6)
insert_V = insert_uniformly(srf.knots[1], 6)
srf.refine(0, insert_U).refine(1, insert_V)
# Setting the NURBS-surface degree
srf.elevate(0, order-srf.degree[0] if order-srf.degree[0] > 0 else 0)
srf.elevate(1, order-srf.degree[1] if order-srf.degree[1] > 0 else 0)
# Sfepy .iga file creation
nurbs, bmesh, regions = create_from_igakit(srf, verbose=True)
# Save .iga file in sfepy/meshes/iga
filename_domain = data_dir + '/meshes/iga/concentric_circles.iga'
io.write_iga_data(filename_domain, None, nurbs.knots, nurbs.degrees,
nurbs.cps, nurbs.weights, nurbs.cs, nurbs.conn,
bmesh.cps, bmesh.weights, bmesh.conn, regions)
if viewpatch:
iplt.use('matplotlib')
iplt.figure()
iplt.plot(srf)
iplt.show()
def insert_uniformly(U, n):
"""
Find knots to uniformly add to U.
[Code from igakit/demo/venturi.py file]
Given a knot vector U and the number of uniform spans desired,
find the knots which need to be inserted.
Parameters
----------
U : numpy.ndarray
Original knot vector for a C^p-1 space.
n : int
Target number of uniformly-spaced knot spans.
Returns
-------
Knots to be inserted into U
"""
U0 = U
dU=(U.max()-U.min())/float(n) # target dU in knot vector
idone=0
while idone == 0:
# Add knots in middle of spans which are too large
Uadd=[]
for i in range(len(U)-1):
if U[i+1]-U[i] > dU:
Uadd.append(0.5*(U[i+1]+U[i]))
# Now we add these knots (once only, assumes C^(p-1))
if len(Uadd) > 0:
U = nm.sort(nm.concatenate([U,nm.asarray(Uadd)]))
else:
idone=1
# And now a little Laplacian smoothing
for num_iterations in range(5):
for i in range(len(U)-2):
if abs(U0[U0.searchsorted(U[i+1])]-U[i+1]) > 1.0e-14:
U[i+1] = 0.5*(U[i]+U[i+2])
return nm.setdiff1d(U,U0)
helps = {
'output_dir' :
'output directory',
'R1' :
'Inner circle radius [default: %(default)s]',
'R2' :
'Outer circle radius [default: %(default)s]',
'C1' :
'centre of the inner circle [default: %(default)s]',
'C2' :
'centre of the outer circle [default: %(default)s]',
'order' :
'field approximation order [default: %(default)s]',
'viewpatch' :
'generate a plot of the NURBS-patch',
}
def main():
parser = ArgumentParser(description=__doc__.rstrip(),
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('-o', '--output-dir', default='.',
help=helps['output_dir'])
parser.add_argument('--R1', metavar='R1',
action='store', dest='R1',
default='0.5', help=helps['R1'])
parser.add_argument('--R2', metavar='R2',
action='store', dest='R2',
default='1.0', help=helps['R2'])
parser.add_argument('--C1', metavar='C1',
action='store', dest='C1',
default='0.0,0.0', help=helps['C1'])
parser.add_argument('--C2', metavar='C2',
action='store', dest='C2',
default='0.0,0.0', help=helps['C2'])
parser.add_argument('--order', metavar='int', type=int,
action='store', dest='order',
default=2, help=helps['order'])
parser.add_argument('-v', '--viewpatch',
action='store_true', dest='viewpatch',
default=False, help=helps['viewpatch'])
options = parser.parse_args()
# Creation of the NURBS-patch with igakit
R1 = eval(options.R1)
R2 = eval(options.R2)
C1 = list(eval(options.C1))
C2 = list(eval(options.C2))
order = options.order
viewpatch = options.viewpatch
create_patch(R1, R2, C1, C2, order=order, viewpatch=viewpatch)
# Setting a Domain instance
filename_domain = data_dir + '/meshes/iga/concentric_circles.iga'
domain = IGDomain.from_file(filename_domain)
# Sub-domains
omega = domain.create_region('Omega', 'all')
Gamma_out = domain.create_region('Gamma_out', 'vertices of set xi01',
kind='facet')
Gamma_in = domain.create_region('Gamma_in', 'vertices of set xi00',
kind='facet')
# Field (featuring order elevation)
order_increase = order - domain.nurbs.degrees[0]
order_increase *= int(order_increase>0)
field = Field.from_args('fu', nm.float64, 'scalar', omega,
approx_order='iga', space='H1',
poly_space_base='iga')
# Variables
u = FieldVariable('u', 'unknown', field) # unknown function
v = FieldVariable('v', 'test', field, primary_var_name='u') # test function
# Integral
integral = Integral('i', order=2*field.approx_order)
# Term
t = Term.new('dw_laplace( v, u )', integral, omega, v=v, u=u)
# Equation
eq = Equation('laplace', t)
eqs = Equations([eq])
# Boundary Conditions
u_in = EssentialBC('u_in', Gamma_in, {'u.all' : 7.0})
u_out = EssentialBC('u_out', Gamma_out, {'u.all' : 3.0})
# solvers
ls = ScipyDirect({})
nls_status = IndexedStruct()
nls = Newton({}, lin_solver=ls, status=nls_status)
# problem instance
pb = Problem('potential', equations=eqs, active_only=True)
# Set boundary conditions
pb.set_bcs(ebcs=Conditions([u_in, u_out]))
# solving
pb.set_solver(nls)
status = IndexedStruct()
state = pb.solve(status=status, save_results=True, verbose=True)
# Saving the results to a classic VTK file
filename = os.path.join(options.output_dir, 'concentric_circles.vtk')
ensure_path(filename)
pb.save_state(filename, state)
if __name__ == '__main__':
main()
|
1ba24fc22f9067cb34f11da38da871a768426056
|
503bc00f0a403c225073502a3db5da3e864346f0
|
/btrfs-heatmap
|
a4a4825e2f795121ba2dd579e3f122b35c6a577a
|
[
"MIT"
] |
permissive
|
knorrie/btrfs-heatmap
|
5a605a9d7c26d444053803cc27f6f02e8f3946a4
|
381fad6a0e38d8273fab1f9a7f90eb28007811e9
|
refs/heads/master
| 2021-07-03T08:55:01.584728
| 2020-10-14T17:12:40
| 2020-10-14T17:35:11
| 51,668,428
| 176
| 7
|
MIT
| 2022-02-22T17:28:15
| 2016-02-13T22:17:21
|
Python
|
UTF-8
|
Python
| false
| false
| 25,307
|
btrfs-heatmap
|
#!/usr/bin/python3
#
# Copyright (C) 2016 Hans van Kranenburg <hans@knorrie.org>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import argparse
import btrfs
import errno
import os
import struct
import sys
import types
import zlib
class HeatmapError(Exception):
pass
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--order",
type=int,
help="Hilbert curve order (default: automatically chosen)",
)
parser.add_argument(
"--size",
type=int,
help="Image size (default: 10). Height/width is 2^size",
)
parser.add_argument(
"--sort",
choices=['physical', 'virtual'],
default='physical',
help="Show disk usage sorted on dev_extent (physical) or chunk/stripe (virtual)"
)
parser.add_argument(
"--blockgroup",
type=int,
help="Instead of a filesystem overview, show extents in a block group",
)
parser.add_argument(
"-v",
"--verbose",
action="count",
help="increase debug output verbosity (-v, -vv, -vvv, etc)",
)
parser.add_argument(
"-q",
"--quiet",
action="count",
help="decrease debug output verbosity (-q, -qq, -qqq, etc)",
)
parser.add_argument(
"-o",
"--output",
dest="output",
help="Output png file name or directory (default: filename automatically chosen)",
)
parser.add_argument(
"--curve",
choices=['hilbert', 'linear', 'snake'],
default='hilbert',
help="Space filling curve type or alternative. Default is hilbert.",
)
parser.add_argument(
"mountpoint",
help="Btrfs filesystem mountpoint",
)
return parser.parse_args()
struct_color = struct.Struct('!BBB')
black = (0x00, 0x00, 0x00)
white = (0xff, 0xff, 0xff)
p_red = (0xca, 0x53, 0x5c)
fuchsia = (0xde, 0x5d, 0x94)
curry = (0xf9, 0xe1, 0x7e)
clover = (0x6e, 0xa6, 0x34)
moss = (0x81, 0x88, 0x3c)
bluebell = (0xaa, 0xcc, 0xeb)
pool = (0x8f, 0xdd, 0xea)
beet = (0x9d, 0x54, 0x9c)
aubergine = (0x6a, 0x5a, 0x7f)
plum = (0xdb, 0xc9, 0xea)
slate = (0x75, 0x77, 0x7b)
chocolate = (0x6f, 0x5e, 0x55)
red = (0xff, 0x00, 0x33)
blue = (0x00, 0x00, 0xff)
blue_white = (0x99, 0xcc, 0xff) # for mixed bg
dev_extent_colors = {
btrfs.BLOCK_GROUP_DATA: white,
btrfs.BLOCK_GROUP_METADATA: blue,
btrfs.BLOCK_GROUP_SYSTEM: red,
btrfs.BLOCK_GROUP_DATA | btrfs.BLOCK_GROUP_METADATA: blue_white,
}
metadata_extent_colors = {
btrfs.ctree.ROOT_TREE_OBJECTID: p_red,
btrfs.ctree.EXTENT_TREE_OBJECTID: beet,
btrfs.ctree.CHUNK_TREE_OBJECTID: moss,
btrfs.ctree.DEV_TREE_OBJECTID: aubergine,
btrfs.ctree.FS_TREE_OBJECTID: bluebell,
btrfs.ctree.CSUM_TREE_OBJECTID: clover,
btrfs.ctree.QUOTA_TREE_OBJECTID: fuchsia,
btrfs.ctree.UUID_TREE_OBJECTID: chocolate,
btrfs.ctree.FREE_SPACE_TREE_OBJECTID: plum,
btrfs.ctree.DATA_RELOC_TREE_OBJECTID: slate,
}
def hilbert(order):
U = (-1, 0)
R = (0, 1)
D = (1, 0)
L = (0, -1)
URDR = (U, R, D, R)
RULU = (R, U, L, U)
URDD = (U, R, D, D)
LDRR = (L, D, R, R)
RULL = (R, U, L, L)
DLUU = (D, L, U, U)
LDRD = (L, D, R, D)
DLUL = (D, L, U, L)
inception = {
URDR: (RULU, URDR, URDD, LDRR),
RULU: (URDR, RULU, RULL, DLUU),
URDD: (RULU, URDR, URDD, LDRD),
LDRR: (DLUL, LDRD, LDRR, URDR),
RULL: (URDR, RULU, RULL, DLUL),
DLUU: (LDRD, DLUL, DLUU, RULU),
LDRD: (DLUL, LDRD, LDRR, URDD),
DLUL: (LDRD, DLUL, DLUU, RULL)
}
pos = [(2 ** order) - 1, 0, 0] # y, x, linear
def walk(steps, level):
if level > 1:
for substeps in inception[steps]:
for subpos in walk(substeps, level - 1):
yield subpos
else:
for step in steps:
yield pos
pos[0] += step[0] # y
pos[1] += step[1] # x
pos[2] += 1 # linear
return walk(URDR, order)
def linear(order):
edge_len = 2 ** order
l = 0
for y in range(0, edge_len):
for x in range(0, edge_len):
yield (y, x, l)
l += 1
def snake(order):
edge_len = 2 ** order
l = 0
for y in range(0, edge_len, 2):
for x in range(0, edge_len):
yield (y, x, l)
l += 1
y += 1
for x in range(edge_len - 1, -1, -1):
yield (y, x, l)
l += 1
curves = {
'hilbert': hilbert,
'linear': linear,
'snake': snake,
}
class Grid(object):
def __init__(self, order, size, total_bytes, default_granularity, verbose,
min_brightness=None, curve=None):
self.order, self.size = choose_order_size(order, size, total_bytes, default_granularity)
self.verbose = verbose
if curve is None:
curve = 'hilbert'
self.curve = curves.get(curve)(self.order)
self._pixel_mix = []
self._pixel_dirty = False
self._next_pixel()
self.height = 2 ** self.order
self.width = 2 ** self.order
self.num_steps = (2 ** self.order) ** 2
self.total_bytes = total_bytes
self.bytes_per_pixel = total_bytes / self.num_steps
self._color_cache = {}
self._add_color_cache(black)
self._grid = [[self._color_cache[black]
for x in range(self.width)]
for y in range(self.height)]
self._finished = False
if min_brightness is None:
self._min_brightness = 0.1
else:
if min_brightness < 0 or min_brightness > 1:
raise ValueError("min_brightness out of range (need >= 0 and <= 1)")
self._min_brightness = min_brightness
if self.verbose >= 0:
print("grid curve {} order {} size {} height {} width {} total_bytes {} "
"bytes_per_pixel {}".format(curve, self.order, self.size,
self.height, self.width, total_bytes,
self.bytes_per_pixel, self.num_steps))
def _next_pixel(self):
if self._pixel_dirty is True:
self._finish_pixel()
self.y, self.x, self.linear = next(self.curve)
def _add_to_pixel_mix(self, color, used_pct, pixel_pct):
self._pixel_mix.append((color, used_pct, pixel_pct))
self._pixel_dirty = True
def _pixel_mix_to_rgbytes(self):
R_composite = sum(color[0] * pixel_pct for color, _, pixel_pct in self._pixel_mix)
G_composite = sum(color[1] * pixel_pct for color, _, pixel_pct in self._pixel_mix)
B_composite = sum(color[2] * pixel_pct for color, _, pixel_pct in self._pixel_mix)
weighted_usage = sum(used_pct * pixel_pct
for _, used_pct, pixel_pct in self._pixel_mix)
weighted_usage_min_bright = self._min_brightness + \
weighted_usage * (1 - self._min_brightness)
RGB = (
int(round(R_composite * weighted_usage_min_bright)),
int(round(G_composite * weighted_usage_min_bright)),
int(round(B_composite * weighted_usage_min_bright)),
)
if RGB in self._color_cache:
return self._color_cache[RGB]
return self._add_color_cache(RGB)
def _add_color_cache(self, color):
rgbytes = struct_color.pack(*color)
self._color_cache[color] = rgbytes
return rgbytes
def _set_pixel(self, rgbytes):
self._grid[self.y][self.x] = rgbytes
def _finish_pixel(self):
rgbytes = self._pixel_mix_to_rgbytes()
self._set_pixel(rgbytes)
if self.verbose >= 3:
print(" pixel y {} x{} linear {} rgb #{:02x}{:02x}{:02x}".format(
self.y, self.x, self.linear, *[byte for byte in rgbytes]))
self._pixel_mix = []
self._pixel_dirty = False
def fill(self, first_byte, length, used_pct, color=white):
if self._finished is True:
raise Exception("Cannot change grid any more after retrieving the result once!")
first_pixel = int(first_byte / self.bytes_per_pixel)
last_pixel = int((first_byte + length - 1) / self.bytes_per_pixel)
while self.linear < first_pixel:
self._next_pixel()
if first_pixel == last_pixel:
pct_of_pixel = length / self.bytes_per_pixel
if self.verbose >= 2:
print(" in_pixel {0} {1:.2f}%".format(first_pixel, pct_of_pixel * 100))
self._add_to_pixel_mix(color, used_pct, pct_of_pixel)
else:
pct_of_first_pixel = \
(self.bytes_per_pixel - (first_byte % self.bytes_per_pixel)) / self.bytes_per_pixel
pct_of_last_pixel = \
((first_byte + length) % self.bytes_per_pixel) / self.bytes_per_pixel
if pct_of_last_pixel == 0:
pct_of_last_pixel = 1
if self.verbose >= 2:
print(" first_pixel {0} {1:.2f}% last_pixel {2} {3:.2f}%".format(
first_pixel, pct_of_first_pixel * 100, last_pixel, pct_of_last_pixel * 100))
# add our part of the first pixel, may be shared with previous fill
self._add_to_pixel_mix(color, used_pct, pct_of_first_pixel)
# all intermediate pixels are ours, set brightness directly
if self.linear < last_pixel - 1:
self._next_pixel()
self._add_to_pixel_mix(color, used_pct, pixel_pct=1)
rgbytes = self._pixel_mix_to_rgbytes()
self._set_pixel(rgbytes)
if self.verbose >= 3:
print(" pixel range linear {} to {} rgb #{:02x}{:02x}{:02x}".format(
self.linear, last_pixel - 1, *[byte for byte in rgbytes]))
while self.linear < last_pixel - 1:
self._next_pixel()
self._set_pixel(rgbytes)
self._next_pixel()
# add our part of the last pixel, may be shared with next fill
self._add_to_pixel_mix(color, used_pct, pct_of_last_pixel)
def write_png(self, pngfile):
if self.verbose >= 0:
print("pngfile {}".format(pngfile))
if self._finished is False:
if self._pixel_dirty is True:
self._finish_pixel()
self._finished = True
if self.size > self.order:
scale = 2 ** (self.size - self.order)
rows = ((pix for pix in row for _ in range(scale))
for row in self._grid for _ in range(scale))
_write_png(pngfile, self.width * scale, self.height * scale, rows)
else:
_write_png(pngfile, self.width, self.height, self._grid)
def walk_chunks(fs, devices=None, order=None, size=None,
default_granularity=33554432, verbose=0, min_brightness=None, curve=None):
if devices is None:
devices = list(fs.devices())
devids = None
if verbose >= 0:
print("scope chunks")
else:
if isinstance(devices, types.GeneratorType):
devices = list(devices)
devids = [device.devid for device in devices]
if verbose >= 0:
print("scope chunk stripes on devices {}".format(' '.join(map(str, devids))))
total_bytes = sum(device.total_bytes for device in devices)
grid = Grid(order, size, total_bytes, default_granularity, verbose, min_brightness, curve)
byte_offset = 0
for chunk in fs.chunks():
if devids is None:
stripes = chunk.stripes
else:
stripes = [stripe for stripe in chunk.stripes if stripe.devid in devids]
if len(stripes) == 0:
continue
try:
block_group = fs.block_group(chunk.vaddr, chunk.length)
except btrfs.ctree.ItemNotFoundError:
continue
used_pct = block_group.used / block_group.length
length = btrfs.volumes.chunk_to_dev_extent_length(chunk) * len(stripes)
if verbose >= 1:
print(block_group)
print(chunk)
print("allocated physical space for chunk at {}: {}".format(
chunk.vaddr, btrfs.utils.pretty_size(length)))
if verbose >= 2:
for stripe in stripes:
print(" {}".format(stripe))
if block_group.flags in dev_extent_colors:
color = dev_extent_colors[block_group.flags]
else:
color = dev_extent_colors[block_group.flags & btrfs.BLOCK_GROUP_TYPE_MASK]
grid.fill(byte_offset, length, used_pct, color)
byte_offset += length
return grid
def walk_dev_extents(fs, devices=None, order=None, size=None,
default_granularity=33554432, verbose=0, min_brightness=None, curve=None):
if devices is None:
devices = list(fs.devices())
dev_extents = fs.dev_extents()
else:
if isinstance(devices, types.GeneratorType):
devices = list(devices)
dev_extents = (dev_extent
for device in devices
for dev_extent in fs.dev_extents(device.devid, device.devid))
if verbose >= 0:
print("scope device {}".format(' '.join([str(device.devid) for device in devices])))
total_bytes = 0
device_grid_offset = {}
for device in devices:
device_grid_offset[device.devid] = total_bytes
total_bytes += device.total_bytes
grid = Grid(order, size, total_bytes, default_granularity, verbose, min_brightness, curve)
block_group_cache = {}
for dev_extent in dev_extents:
if dev_extent.vaddr in block_group_cache:
block_group = block_group_cache[dev_extent.vaddr]
else:
try:
block_group = fs.block_group(dev_extent.vaddr)
except IndexError:
continue
if block_group.flags & btrfs.BLOCK_GROUP_PROFILE_MASK != 0:
block_group_cache[dev_extent.vaddr] = block_group
used_pct = block_group.used / block_group.length
if verbose >= 1:
print("dev_extent devid {0} paddr {1} length {2} pend {3} type {4} "
"used_pct {5:.2f}".format(dev_extent.devid, dev_extent.paddr, dev_extent.length,
dev_extent.paddr + dev_extent.length - 1,
btrfs.utils.block_group_flags_str(block_group.flags),
used_pct * 100))
first_byte = device_grid_offset[dev_extent.devid] + dev_extent.paddr
if block_group.flags in dev_extent_colors:
color = dev_extent_colors[block_group.flags]
else:
color = dev_extent_colors[block_group.flags & btrfs.BLOCK_GROUP_TYPE_MASK]
grid.fill(first_byte, dev_extent.length, used_pct, color)
return grid
def _get_metadata_root(extent):
if extent.refs > 1:
return btrfs.ctree.FS_TREE_OBJECTID
if len(extent.shared_block_refs) > 0:
return btrfs.ctree.FS_TREE_OBJECTID
root = extent.tree_block_refs[0].root
if root >= btrfs.ctree.FIRST_FREE_OBJECTID and root <= btrfs.ctree.LAST_FREE_OBJECTID:
return btrfs.ctree.FS_TREE_OBJECTID
return root
def walk_extents(fs, block_groups, order=None, size=None, default_granularity=None, verbose=0,
curve=None):
if isinstance(block_groups, types.GeneratorType):
block_groups = list(block_groups)
fs_info = fs.fs_info()
nodesize = fs_info.nodesize
if default_granularity is None:
default_granularity = fs_info.sectorsize
if verbose >= 0:
print("scope block_group {}".format(' '.join([str(b.vaddr) for b in block_groups])))
total_bytes = 0
block_group_grid_offset = {}
for block_group in block_groups:
block_group_grid_offset[block_group] = total_bytes - block_group.vaddr
total_bytes += block_group.length
grid = Grid(order, size, total_bytes, default_granularity, verbose, curve=curve)
tree = btrfs.ctree.EXTENT_TREE_OBJECTID
for block_group in block_groups:
if verbose > 0:
print(block_group)
if block_group.flags & btrfs.BLOCK_GROUP_TYPE_MASK == btrfs.BLOCK_GROUP_DATA:
# Only DATA, so also not DATA|METADATA (mixed). In this case we
# take a shortcut. Since we know that all extents are data extents,
# which get their usual white color, we don't need to load the
# actual extent objects.
min_key = btrfs.ctree.Key(block_group.vaddr, 0, 0)
max_key = btrfs.ctree.Key(block_group.vaddr + block_group.length, 0, 0) - 1
for header, _ in btrfs.ioctl.search_v2(fs.fd, tree, min_key, max_key, buf_size=65536):
if header.type == btrfs.ctree.EXTENT_ITEM_KEY:
length = header.offset
first_byte = block_group_grid_offset[block_group] + header.objectid
if verbose >= 1:
print("extent vaddr {0} first_byte {1} type {2} length {3}".format(
header.objectid, first_byte,
btrfs.ctree.key_type_str(header.type), length))
grid.fill(first_byte, length, 1, white)
else:
# The block group is METADATA or DATA|METADATA or SYSTEM (chunk
# tree metadata). We load all extent info to figure out which
# btree root metadata extents belong to.
min_vaddr = block_group.vaddr
max_vaddr = block_group.vaddr + block_group.length - 1
for extent in fs.extents(min_vaddr, max_vaddr,
load_data_refs=True, load_metadata_refs=True):
if isinstance(extent, btrfs.ctree.ExtentItem):
length = extent.length
if extent.flags & btrfs.ctree.EXTENT_FLAG_DATA:
color = white
elif extent.flags & btrfs.ctree.EXTENT_FLAG_TREE_BLOCK:
color = metadata_extent_colors.get(_get_metadata_root(extent), white)
else:
raise Exception("BUG: expected either DATA or TREE_BLOCK flag, but got "
"{}".format(btrfs.utils.extent_flags_str(extent.flags)))
elif isinstance(extent, btrfs.ctree.MetaDataItem):
length = nodesize
color = metadata_extent_colors.get(_get_metadata_root(extent), white)
first_byte = block_group_grid_offset[block_group] + extent.vaddr
if verbose >= 1:
print("extent vaddr {0} first_byte {1} type {2} length {3}".format(
extent.vaddr, first_byte,
btrfs.ctree.key_type_str(extent.key.type), length))
grid.fill(first_byte, length, 1, color)
return grid
def choose_order_size(order=None, size=None, total_bytes=None, default_granularity=None):
order_was_none = order is None
if order_was_none:
import math
order = min(10, int(math.ceil(math.log(math.sqrt(total_bytes/default_granularity), 2))))
if size is None:
if order > 10:
size = order
else:
size = 10
if size < order:
if order_was_none:
order = size
else:
raise HeatmapError("size ({}) cannot be smaller than order ({})".format(size, order))
return order, size
def generate_png_file_name(output=None, parts=None):
if output is not None and os.path.isdir(output):
output_dir = output
output_file = None
else:
output_dir = None
output_file = output
if output_file is None:
if parts is None:
parts = []
else:
parts.append('at')
import time
parts.append(str(int(time.time())))
output_file = '_'.join([str(part) for part in parts]) + '.png'
if output_dir is None:
return output_file
return os.path.join(output_dir, output_file)
class StdoutWriter:
def __init__(self):
self.pos = 0
self.bytelist = []
def write(self, data):
self.bytelist[self.pos:self.pos+len(data)] = data
self.pos += len(data)
def tell(self):
return self.pos
def seek(self, pos):
self.pos = pos
def close(self):
sys.stdout.buffer.write(bytes(self.bytelist))
def _write_png(pngfile, width, height, rows, color_type=2):
struct_len = struct_crc = struct.Struct('!I')
if pngfile == '-':
out = StdoutWriter()
else:
out = open(pngfile, 'wb')
out.write(b'\x89PNG\r\n\x1a\n')
# IHDR
out.write(struct_len.pack(13))
ihdr = struct.Struct('!4s2I5B').pack(b'IHDR', width, height, 8, color_type, 0, 0, 0)
out.write(ihdr)
out.write(struct_crc.pack(zlib.crc32(ihdr) & 0xffffffff))
# IDAT
length_pos = out.tell()
out.write(b'\x00\x00\x00\x00IDAT')
crc = zlib.crc32(b'IDAT')
datalen = 0
compress = zlib.compressobj()
for row in rows:
for uncompressed in (b'\x00', b''.join(row)):
compressed = compress.compress(uncompressed)
if len(compressed) > 0:
crc = zlib.crc32(compressed, crc)
datalen += len(compressed)
out.write(compressed)
compressed = compress.flush()
if len(compressed) > 0:
crc = zlib.crc32(compressed, crc)
datalen += len(compressed)
out.write(compressed)
out.write(struct_crc.pack(crc & 0xffffffff))
# IEND
out.write(b'\x00\x00\x00\x00IEND\xae\x42\x60\x82')
# Go back and write length of the IDAT
out.seek(length_pos)
out.write(struct_len.pack(datalen))
out.close()
def main():
args = parse_args()
path = args.mountpoint
verbose = 0
if args.verbose is not None:
verbose += args.verbose
if args.quiet is not None:
verbose -= args.quiet
if args.output == '-':
verbose = -1
try:
with btrfs.FileSystem(path) as fs:
filename_parts = ['fsid', fs.fsid]
if args.curve != 'hilbert':
filename_parts.append(args.curve)
bg_vaddr = args.blockgroup
if bg_vaddr is None:
if args.sort == 'physical':
grid = walk_dev_extents(fs, order=args.order, size=args.size,
verbose=verbose, curve=args.curve)
elif args.sort == 'virtual':
filename_parts.append('chunks')
grid = walk_chunks(fs, order=args.order, size=args.size,
verbose=verbose, curve=args.curve)
else:
raise HeatmapError("Invalid sort option {}".format(args.sort))
else:
try:
block_group = fs.block_group(bg_vaddr)
except IndexError:
raise HeatmapError("No block group at vaddr {}!".format(bg_vaddr))
grid = walk_extents(fs, [block_group], order=args.order, size=args.size,
verbose=verbose, curve=args.curve)
filename_parts.extend(['blockgroup', block_group.vaddr])
except OSError as e:
if e.errno == errno.EPERM:
raise HeatmapError("Insufficient permissions to use the btrfs kernel API. "
"Hint: Try running the script as root user.".format(e))
elif e.errno == errno.ENOTTY:
raise HeatmapError("Unable to retrieve data. Hint: Not a btrfs file system?")
raise
try:
filename = generate_png_file_name(args.output, filename_parts)
grid.write_png(filename)
except Exception as e:
raise HeatmapError("Unable to write output file {}: {}".format(filename, e))
if __name__ == '__main__':
try:
main()
except HeatmapError as e:
print("Error: {0}".format(e), file=sys.stderr)
sys.exit(1)
|
|
e937c992b7b81c5ade130b8d3e0ea46ce08a3e7c
|
0aaba744749d86e0ed7b0cb088707857de0e6d82
|
/mlos_bench/mlos_bench/tests/optimizers/__init__.py
|
509ecbd842b0f0ec31123c8c1efbca8bd784fd78
|
[
"MIT"
] |
permissive
|
microsoft/MLOS
|
66968fd0e00df9a3d819ce5287dc057585b2bd8c
|
0db80043dad256d77dc4c2b4fc54aa0b0aa2597f
|
refs/heads/main
| 2023-08-19T02:10:54.290106
| 2023-08-18T20:29:48
| 2023-08-18T20:29:48
| 253,620,591
| 109
| 50
|
MIT
| 2023-09-14T18:52:13
| 2020-04-06T21:33:40
|
Python
|
UTF-8
|
Python
| false
| false
| 178
|
py
|
__init__.py
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Tests for mlos_bench.optimizers.
Used to make mypy happy about multiple conftest.py modules.
"""
|
4bb61b2e0dbe2973484d4cc690a9fba261920f92
|
25783871609cc18715e2cf454f9c91fcbaef3798
|
/owasp-top10-2021-apps/a8/amarelo-designs/app/app.py
|
92e24231e65157bf21333d3914d03d12f3a096d5
|
[
"BSD-3-Clause"
] |
permissive
|
globocom/secDevLabs
|
da65414122cca79845c9305ecf0e4865d92782d8
|
13a98dd853a1538d4971fed073bff23e42de88ec
|
refs/heads/master
| 2023-09-04T14:04:48.963314
| 2023-07-10T19:30:33
| 2023-07-10T19:30:33
| 159,996,500
| 830
| 413
|
BSD-3-Clause
| 2023-07-10T19:30:36
| 2018-12-01T23:26:50
|
PHP
|
UTF-8
|
Python
| false
| false
| 1,298
|
py
|
app.py
|
# coding: utf-8
from flask import Flask, request, make_response, render_template, redirect, flash
import uuid
import pickle
import base64
app = Flask(__name__)
@app.route("/")
def ola():
return render_template('index.html')
@app.route("/admin", methods=['GET','POST'])
def login():
if request.method == 'POST':
username = request.values.get('username')
password = request.values.get('password')
if username == "admin" and password == "admin":
token = str(uuid.uuid4().hex)
cookie = { "username":username, "admin":True, "sessionId":token }
pickle_resultado = pickle.dumps(cookie)
encodedSessionCookie = base64.b64encode(pickle_resultado)
resp = make_response(redirect("/user"))
resp.set_cookie("sessionId", encodedSessionCookie)
return resp
else:
return redirect("/admin")
else:
return render_template('admin.html')
@app.route("/user", methods=['GET'])
def userInfo():
cookie = request.cookies.get("sessionId")
if cookie == None:
return "Não Autorizado!"
cookie = pickle.loads(base64.b64decode(cookie))
return render_template('user.html')
if __name__ == '__main__':
app.run(debug=True,host='0.0.0.0')
|
bd2337b3a2c16a7cfb9ce13858b3d1f784e59bf3
|
4bcc9806152542ab43fc2cf47c499424f200896c
|
/tensorflow/python/kernel_tests/distributions/normal_test.py
|
e2ad1956a3ec1a1bb2227331db66f85283e86cab
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] |
permissive
|
tensorflow/tensorflow
|
906276dbafcc70a941026aa5dc50425ef71ee282
|
a7f3934a67900720af3d3b15389551483bee50b8
|
refs/heads/master
| 2023-08-25T04:24:41.611870
| 2023-08-25T04:06:24
| 2023-08-25T04:14:08
| 45,717,250
| 208,740
| 109,943
|
Apache-2.0
| 2023-09-14T20:55:50
| 2015-11-07T01:19:20
|
C++
|
UTF-8
|
Python
| false
| false
| 21,729
|
py
|
normal_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for initializers."""
import importlib
import math
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module
stats = try_import("scipy.stats")
class NormalTest(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(123)
def assertAllFinite(self, tensor):
is_finite = np.isfinite(self.evaluate(tensor))
all_true = np.ones_like(is_finite, dtype=np.bool_)
self.assertAllEqual(all_true, is_finite)
def _testParamShapes(self, sample_shape, expected):
param_shapes = normal_lib.Normal.param_shapes(sample_shape)
mu_shape, sigma_shape = param_shapes["loc"], param_shapes["scale"]
self.assertAllEqual(expected, self.evaluate(mu_shape))
self.assertAllEqual(expected, self.evaluate(sigma_shape))
mu = array_ops.zeros(mu_shape)
sigma = array_ops.ones(sigma_shape)
self.assertAllEqual(
expected,
self.evaluate(array_ops.shape(normal_lib.Normal(mu, sigma).sample())))
def _testParamStaticShapes(self, sample_shape, expected):
param_shapes = normal_lib.Normal.param_static_shapes(sample_shape)
mu_shape, sigma_shape = param_shapes["loc"], param_shapes["scale"]
self.assertEqual(expected, mu_shape)
self.assertEqual(expected, sigma_shape)
@test_util.run_in_graph_and_eager_modes
def testSampleLikeArgsGetDistDType(self):
dist = normal_lib.Normal(0., 1.)
self.assertEqual(dtypes.float32, dist.dtype)
for method in ("log_prob", "prob", "log_cdf", "cdf",
"log_survival_function", "survival_function", "quantile"):
self.assertEqual(dtypes.float32, getattr(dist, method)(1).dtype)
@test_util.run_in_graph_and_eager_modes
def testParamShapes(self):
sample_shape = [10, 3, 4]
self._testParamShapes(sample_shape, sample_shape)
self._testParamShapes(constant_op.constant(sample_shape), sample_shape)
@test_util.run_in_graph_and_eager_modes
def testParamStaticShapes(self):
sample_shape = [10, 3, 4]
self._testParamStaticShapes(sample_shape, sample_shape)
self._testParamStaticShapes(
tensor_shape.TensorShape(sample_shape), sample_shape)
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testNormalWithSoftplusScale(self):
mu = array_ops.zeros((10, 3))
rho = array_ops.ones((10, 3)) * -2.
normal = normal_lib.NormalWithSoftplusScale(loc=mu, scale=rho)
self.assertAllEqual(self.evaluate(mu), self.evaluate(normal.loc))
self.assertAllEqual(
self.evaluate(nn_ops.softplus(rho)), self.evaluate(normal.scale))
@test_util.run_in_graph_and_eager_modes
def testNormalLogPDF(self):
batch_size = 6
mu = constant_op.constant([3.0] * batch_size)
sigma = constant_op.constant([math.sqrt(10.0)] * batch_size)
x = np.array([-2.5, 2.5, 4.0, 0.0, -1.0, 2.0], dtype=np.float32)
normal = normal_lib.Normal(loc=mu, scale=sigma)
log_pdf = normal.log_prob(x)
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), log_pdf.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()),
self.evaluate(log_pdf).shape)
self.assertAllEqual(normal.batch_shape, log_pdf.get_shape())
self.assertAllEqual(normal.batch_shape, self.evaluate(log_pdf).shape)
pdf = normal.prob(x)
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), pdf.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()),
self.evaluate(pdf).shape)
self.assertAllEqual(normal.batch_shape, pdf.get_shape())
self.assertAllEqual(normal.batch_shape, self.evaluate(pdf).shape)
if not stats:
return
expected_log_pdf = stats.norm(self.evaluate(mu),
self.evaluate(sigma)).logpdf(x)
self.assertAllClose(expected_log_pdf, self.evaluate(log_pdf))
self.assertAllClose(np.exp(expected_log_pdf), self.evaluate(pdf))
@test_util.run_in_graph_and_eager_modes
def testNormalLogPDFMultidimensional(self):
batch_size = 6
mu = constant_op.constant([[3.0, -3.0]] * batch_size)
sigma = constant_op.constant(
[[math.sqrt(10.0), math.sqrt(15.0)]] * batch_size)
x = np.array([[-2.5, 2.5, 4.0, 0.0, -1.0, 2.0]], dtype=np.float32).T
normal = normal_lib.Normal(loc=mu, scale=sigma)
log_pdf = normal.log_prob(x)
log_pdf_values = self.evaluate(log_pdf)
self.assertEqual(log_pdf.get_shape(), (6, 2))
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), log_pdf.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()),
self.evaluate(log_pdf).shape)
self.assertAllEqual(normal.batch_shape, log_pdf.get_shape())
self.assertAllEqual(normal.batch_shape, self.evaluate(log_pdf).shape)
pdf = normal.prob(x)
pdf_values = self.evaluate(pdf)
self.assertEqual(pdf.get_shape(), (6, 2))
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), pdf.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), pdf_values.shape)
self.assertAllEqual(normal.batch_shape, pdf.get_shape())
self.assertAllEqual(normal.batch_shape, pdf_values.shape)
if not stats:
return
expected_log_pdf = stats.norm(self.evaluate(mu),
self.evaluate(sigma)).logpdf(x)
self.assertAllClose(expected_log_pdf, log_pdf_values)
self.assertAllClose(np.exp(expected_log_pdf), pdf_values)
@test_util.run_in_graph_and_eager_modes
def testNormalCDF(self):
batch_size = 50
mu = self._rng.randn(batch_size)
sigma = self._rng.rand(batch_size) + 1.0
x = np.linspace(-8.0, 8.0, batch_size).astype(np.float64)
normal = normal_lib.Normal(loc=mu, scale=sigma)
cdf = normal.cdf(x)
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), cdf.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()),
self.evaluate(cdf).shape)
self.assertAllEqual(normal.batch_shape, cdf.get_shape())
self.assertAllEqual(normal.batch_shape, self.evaluate(cdf).shape)
if not stats:
return
expected_cdf = stats.norm(mu, sigma).cdf(x)
self.assertAllClose(expected_cdf, self.evaluate(cdf), atol=0)
@test_util.run_in_graph_and_eager_modes
def testNormalSurvivalFunction(self):
batch_size = 50
mu = self._rng.randn(batch_size)
sigma = self._rng.rand(batch_size) + 1.0
x = np.linspace(-8.0, 8.0, batch_size).astype(np.float64)
normal = normal_lib.Normal(loc=mu, scale=sigma)
sf = normal.survival_function(x)
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), sf.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()),
self.evaluate(sf).shape)
self.assertAllEqual(normal.batch_shape, sf.get_shape())
self.assertAllEqual(normal.batch_shape, self.evaluate(sf).shape)
if not stats:
return
expected_sf = stats.norm(mu, sigma).sf(x)
self.assertAllClose(expected_sf, self.evaluate(sf), atol=0)
@test_util.run_in_graph_and_eager_modes
def testNormalLogCDF(self):
batch_size = 50
mu = self._rng.randn(batch_size)
sigma = self._rng.rand(batch_size) + 1.0
x = np.linspace(-100.0, 10.0, batch_size).astype(np.float64)
normal = normal_lib.Normal(loc=mu, scale=sigma)
cdf = normal.log_cdf(x)
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), cdf.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()),
self.evaluate(cdf).shape)
self.assertAllEqual(normal.batch_shape, cdf.get_shape())
self.assertAllEqual(normal.batch_shape, self.evaluate(cdf).shape)
if not stats:
return
expected_cdf = stats.norm(mu, sigma).logcdf(x)
self.assertAllClose(expected_cdf, self.evaluate(cdf), atol=0, rtol=1e-3)
def testFiniteGradientAtDifficultPoints(self):
for dtype in [np.float32, np.float64]:
g = ops.Graph()
with g.as_default():
mu = variables.Variable(dtype(0.0))
sigma = variables.Variable(dtype(1.0))
dist = normal_lib.Normal(loc=mu, scale=sigma)
x = np.array([-100., -20., -5., 0., 5., 20., 100.]).astype(dtype)
for func in [
dist.cdf, dist.log_cdf, dist.survival_function,
dist.log_survival_function, dist.log_prob, dist.prob
]:
value = func(x)
grads = gradients_impl.gradients(value, [mu, sigma])
with self.session(graph=g):
self.evaluate(variables.global_variables_initializer())
self.assertAllFinite(value)
self.assertAllFinite(grads[0])
self.assertAllFinite(grads[1])
@test_util.run_in_graph_and_eager_modes
def testNormalLogSurvivalFunction(self):
batch_size = 50
mu = self._rng.randn(batch_size)
sigma = self._rng.rand(batch_size) + 1.0
x = np.linspace(-10.0, 100.0, batch_size).astype(np.float64)
normal = normal_lib.Normal(loc=mu, scale=sigma)
sf = normal.log_survival_function(x)
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), sf.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()),
self.evaluate(sf).shape)
self.assertAllEqual(normal.batch_shape, sf.get_shape())
self.assertAllEqual(normal.batch_shape, self.evaluate(sf).shape)
if not stats:
return
expected_sf = stats.norm(mu, sigma).logsf(x)
self.assertAllClose(expected_sf, self.evaluate(sf), atol=0, rtol=1e-5)
@test_util.run_in_graph_and_eager_modes
def testNormalEntropyWithScalarInputs(self):
# Scipy.stats.norm cannot deal with the shapes in the other test.
mu_v = 2.34
sigma_v = 4.56
normal = normal_lib.Normal(loc=mu_v, scale=sigma_v)
entropy = normal.entropy()
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), entropy.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()),
self.evaluate(entropy).shape)
self.assertAllEqual(normal.batch_shape, entropy.get_shape())
self.assertAllEqual(normal.batch_shape, self.evaluate(entropy).shape)
# scipy.stats.norm cannot deal with these shapes.
if not stats:
return
expected_entropy = stats.norm(mu_v, sigma_v).entropy()
self.assertAllClose(expected_entropy, self.evaluate(entropy))
@test_util.run_in_graph_and_eager_modes
def testNormalEntropy(self):
mu_v = np.array([1.0, 1.0, 1.0])
sigma_v = np.array([[1.0, 2.0, 3.0]]).T
normal = normal_lib.Normal(loc=mu_v, scale=sigma_v)
# scipy.stats.norm cannot deal with these shapes.
sigma_broadcast = mu_v * sigma_v
expected_entropy = 0.5 * np.log(2 * np.pi * np.exp(1) * sigma_broadcast**2)
entropy = normal.entropy()
np.testing.assert_allclose(expected_entropy, self.evaluate(entropy))
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), entropy.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()),
self.evaluate(entropy).shape)
self.assertAllEqual(normal.batch_shape, entropy.get_shape())
self.assertAllEqual(normal.batch_shape, self.evaluate(entropy).shape)
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testNormalMeanAndMode(self):
# Mu will be broadcast to [7, 7, 7].
mu = [7.]
sigma = [11., 12., 13.]
normal = normal_lib.Normal(loc=mu, scale=sigma)
self.assertAllEqual((3,), normal.mean().get_shape())
self.assertAllEqual([7., 7, 7], self.evaluate(normal.mean()))
self.assertAllEqual((3,), normal.mode().get_shape())
self.assertAllEqual([7., 7, 7], self.evaluate(normal.mode()))
@test_util.run_in_graph_and_eager_modes
def testNormalQuantile(self):
batch_size = 52
mu = self._rng.randn(batch_size)
sigma = self._rng.rand(batch_size) + 1.0
p = np.linspace(0., 1.0, batch_size - 2).astype(np.float64)
# Quantile performs piecewise rational approximation so adding some
# special input values to make sure we hit all the pieces.
p = np.hstack((p, np.exp(-33), 1. - np.exp(-33)))
normal = normal_lib.Normal(loc=mu, scale=sigma)
x = normal.quantile(p)
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()), x.get_shape())
self.assertAllEqual(
self.evaluate(normal.batch_shape_tensor()),
self.evaluate(x).shape)
self.assertAllEqual(normal.batch_shape, x.get_shape())
self.assertAllEqual(normal.batch_shape, self.evaluate(x).shape)
if not stats:
return
expected_x = stats.norm(mu, sigma).ppf(p)
self.assertAllClose(expected_x, self.evaluate(x), atol=0.)
def _baseQuantileFiniteGradientAtDifficultPoints(self, dtype):
g = ops.Graph()
with g.as_default():
mu = variables.Variable(dtype(0.0))
sigma = variables.Variable(dtype(1.0))
dist = normal_lib.Normal(loc=mu, scale=sigma)
p = variables.Variable(
np.array([0.,
np.exp(-32.), np.exp(-2.),
1. - np.exp(-2.), 1. - np.exp(-32.),
1.]).astype(dtype))
value = dist.quantile(p)
grads = gradients_impl.gradients(value, [mu, p])
with self.cached_session(graph=g):
self.evaluate(variables.global_variables_initializer())
self.assertAllFinite(grads[0])
self.assertAllFinite(grads[1])
def testQuantileFiniteGradientAtDifficultPointsFloat32(self):
self._baseQuantileFiniteGradientAtDifficultPoints(np.float32)
def testQuantileFiniteGradientAtDifficultPointsFloat64(self):
self._baseQuantileFiniteGradientAtDifficultPoints(np.float64)
@test_util.run_in_graph_and_eager_modes
def testNormalVariance(self):
# sigma will be broadcast to [7, 7, 7]
mu = [1., 2., 3.]
sigma = [7.]
normal = normal_lib.Normal(loc=mu, scale=sigma)
self.assertAllEqual((3,), normal.variance().get_shape())
self.assertAllEqual([49., 49, 49], self.evaluate(normal.variance()))
@test_util.run_in_graph_and_eager_modes
def testNormalStandardDeviation(self):
# sigma will be broadcast to [7, 7, 7]
mu = [1., 2., 3.]
sigma = [7.]
normal = normal_lib.Normal(loc=mu, scale=sigma)
self.assertAllEqual((3,), normal.stddev().get_shape())
self.assertAllEqual([7., 7, 7], self.evaluate(normal.stddev()))
@test_util.run_in_graph_and_eager_modes
def testNormalSample(self):
mu = constant_op.constant(3.0)
sigma = constant_op.constant(math.sqrt(3.0))
mu_v = 3.0
sigma_v = np.sqrt(3.0)
n = constant_op.constant(100000)
normal = normal_lib.Normal(loc=mu, scale=sigma)
samples = normal.sample(n)
sample_values = self.evaluate(samples)
# Note that the standard error for the sample mean is ~ sigma / sqrt(n).
# The sample variance similarly is dependent on sigma and n.
# Thus, the tolerances below are very sensitive to number of samples
# as well as the variances chosen.
self.assertEqual(sample_values.shape, (100000,))
self.assertAllClose(sample_values.mean(), mu_v, atol=1e-1)
self.assertAllClose(sample_values.std(), sigma_v, atol=1e-1)
expected_samples_shape = tensor_shape.TensorShape(
[self.evaluate(n)]).concatenate(
tensor_shape.TensorShape(
self.evaluate(normal.batch_shape_tensor())))
self.assertAllEqual(expected_samples_shape, samples.get_shape())
self.assertAllEqual(expected_samples_shape, sample_values.shape)
expected_samples_shape = (
tensor_shape.TensorShape([self.evaluate(n)]).concatenate(
normal.batch_shape))
self.assertAllEqual(expected_samples_shape, samples.get_shape())
self.assertAllEqual(expected_samples_shape, sample_values.shape)
def testNormalFullyReparameterized(self):
mu = constant_op.constant(4.0)
sigma = constant_op.constant(3.0)
with backprop.GradientTape() as tape:
tape.watch(mu)
tape.watch(sigma)
normal = normal_lib.Normal(loc=mu, scale=sigma)
samples = normal.sample(100)
grad_mu, grad_sigma = tape.gradient(samples, [mu, sigma])
self.assertIsNotNone(grad_mu)
self.assertIsNotNone(grad_sigma)
@test_util.run_in_graph_and_eager_modes
def testNormalSampleMultiDimensional(self):
batch_size = 2
mu = constant_op.constant([[3.0, -3.0]] * batch_size)
sigma = constant_op.constant(
[[math.sqrt(2.0), math.sqrt(3.0)]] * batch_size)
mu_v = [3.0, -3.0]
sigma_v = [np.sqrt(2.0), np.sqrt(3.0)]
n = constant_op.constant(100000)
normal = normal_lib.Normal(loc=mu, scale=sigma)
samples = normal.sample(n)
sample_values = self.evaluate(samples)
# Note that the standard error for the sample mean is ~ sigma / sqrt(n).
# The sample variance similarly is dependent on sigma and n.
# Thus, the tolerances below are very sensitive to number of samples
# as well as the variances chosen.
self.assertEqual(samples.get_shape(), (100000, batch_size, 2))
self.assertAllClose(sample_values[:, 0, 0].mean(), mu_v[0], atol=1e-1)
self.assertAllClose(sample_values[:, 0, 0].std(), sigma_v[0], atol=1e-1)
self.assertAllClose(sample_values[:, 0, 1].mean(), mu_v[1], atol=1e-1)
self.assertAllClose(sample_values[:, 0, 1].std(), sigma_v[1], atol=1e-1)
expected_samples_shape = tensor_shape.TensorShape(
[self.evaluate(n)]).concatenate(
tensor_shape.TensorShape(
self.evaluate(normal.batch_shape_tensor())))
self.assertAllEqual(expected_samples_shape, samples.get_shape())
self.assertAllEqual(expected_samples_shape, sample_values.shape)
expected_samples_shape = (
tensor_shape.TensorShape([self.evaluate(n)]).concatenate(
normal.batch_shape))
self.assertAllEqual(expected_samples_shape, samples.get_shape())
self.assertAllEqual(expected_samples_shape, sample_values.shape)
@test_util.run_in_graph_and_eager_modes
def testNegativeSigmaFails(self):
with self.assertRaisesOpError("Condition x > 0 did not hold"):
normal = normal_lib.Normal(
loc=[1.], scale=[-5.], validate_args=True, name="G")
self.evaluate(normal.mean())
@test_util.run_in_graph_and_eager_modes
def testNormalShape(self):
mu = constant_op.constant([-3.0] * 5)
sigma = constant_op.constant(11.0)
normal = normal_lib.Normal(loc=mu, scale=sigma)
self.assertEqual(self.evaluate(normal.batch_shape_tensor()), [5])
self.assertEqual(normal.batch_shape, tensor_shape.TensorShape([5]))
self.assertAllEqual(self.evaluate(normal.event_shape_tensor()), [])
self.assertEqual(normal.event_shape, tensor_shape.TensorShape([]))
@test_util.run_deprecated_v1
def testNormalShapeWithPlaceholders(self):
mu = array_ops.placeholder(dtype=dtypes.float32)
sigma = array_ops.placeholder(dtype=dtypes.float32)
normal = normal_lib.Normal(loc=mu, scale=sigma)
with self.cached_session() as sess:
# get_batch_shape should return an "<unknown>" tensor.
self.assertEqual(normal.batch_shape, tensor_shape.TensorShape(None))
self.assertEqual(normal.event_shape, ())
self.assertAllEqual(self.evaluate(normal.event_shape_tensor()), [])
self.assertAllEqual(
sess.run(normal.batch_shape_tensor(),
feed_dict={mu: 5.0,
sigma: [1.0, 2.0]}), [2])
@test_util.run_in_graph_and_eager_modes
def testNormalNormalKL(self):
batch_size = 6
mu_a = np.array([3.0] * batch_size)
sigma_a = np.array([1.0, 2.0, 3.0, 1.5, 2.5, 3.5])
mu_b = np.array([-3.0] * batch_size)
sigma_b = np.array([0.5, 1.0, 1.5, 2.0, 2.5, 3.0])
n_a = normal_lib.Normal(loc=mu_a, scale=sigma_a)
n_b = normal_lib.Normal(loc=mu_b, scale=sigma_b)
kl = kullback_leibler.kl_divergence(n_a, n_b)
kl_val = self.evaluate(kl)
kl_expected = ((mu_a - mu_b)**2 / (2 * sigma_b**2) + 0.5 * (
(sigma_a**2 / sigma_b**2) - 1 - 2 * np.log(sigma_a / sigma_b)))
self.assertEqual(kl.get_shape(), (batch_size,))
self.assertAllClose(kl_val, kl_expected)
if __name__ == "__main__":
test.main()
|
44d011881b2ca3d76b6c82f9a5bb06962c8504b7
|
19085cb1763ed552aa3f3600bdcf9f79bc43a6aa
|
/tests/test_util.py
|
c64a43d946d2b1300b553a49b9003ac00059956a
|
[
"MIT"
] |
permissive
|
michaeljoseph/changes
|
44822cfd43861a10e8f106e660ab728e1fd1ccba
|
81dfa6b44041d124c3c6195bb8dce926138242bf
|
refs/heads/main
| 2022-10-31T04:12:01.677533
| 2021-06-15T21:51:28
| 2021-06-15T21:51:28
| 12,110,516
| 138
| 12
|
MIT
| 2022-10-27T04:58:49
| 2013-08-14T14:14:35
|
Python
|
UTF-8
|
Python
| false
| false
| 359
|
py
|
test_util.py
|
from changes import util
def test_extract():
assert {'a': 1, 'b': 2} == util.extract({'a': 1, 'b': 2, 'c': 3}, ['a', 'b'])
def test_extract_arguments():
assert {'major': True, 'minor': False, 'patch': False} == util.extract_arguments(
{'--major': True, '--minor': False, '--patch': False},
['--major', '--minor', '--patch'],
)
|
33ee09644c83dd5d9c319134261223fc65f7a38d
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/helpers/typeshed/stubs/passlib/passlib/exc.pyi
|
7c9f1c4ddbb0f110db2149d323f4bf977b9058f6
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 1,896
|
pyi
|
exc.pyi
|
from typing import Any
class UnknownBackendError(ValueError):
hasher: Any
backend: Any
def __init__(self, hasher, backend) -> None: ...
class MissingBackendError(RuntimeError): ...
class InternalBackendError(RuntimeError): ...
class PasswordValueError(ValueError): ...
class PasswordSizeError(PasswordValueError):
max_size: Any
def __init__(self, max_size, msg: Any | None = ...) -> None: ...
class PasswordTruncateError(PasswordSizeError):
def __init__(self, cls, msg: Any | None = ...) -> None: ...
class PasslibSecurityError(RuntimeError): ...
class TokenError(ValueError):
def __init__(self, msg: Any | None = ..., *args, **kwds) -> None: ...
class MalformedTokenError(TokenError): ...
class InvalidTokenError(TokenError): ...
class UsedTokenError(TokenError):
expire_time: Any
def __init__(self, *args, **kwds) -> None: ...
class UnknownHashError(ValueError):
value: Any
message: Any
def __init__(self, message: Any | None = ..., value: Any | None = ...) -> None: ...
class PasslibWarning(UserWarning): ...
class PasslibConfigWarning(PasslibWarning): ...
class PasslibHashWarning(PasslibWarning): ...
class PasslibRuntimeWarning(PasslibWarning): ...
class PasslibSecurityWarning(PasslibWarning): ...
def type_name(value): ...
def ExpectedTypeError(value, expected, param): ...
def ExpectedStringError(value, param): ...
def MissingDigestError(handler: Any | None = ...): ...
def NullPasswordError(handler: Any | None = ...): ...
def InvalidHashError(handler: Any | None = ...): ...
def MalformedHashError(handler: Any | None = ..., reason: Any | None = ...): ...
def ZeroPaddedRoundsError(handler: Any | None = ...): ...
def ChecksumSizeError(handler, raw: bool = ...): ...
ENABLE_DEBUG_ONLY_REPR: bool
def debug_only_repr(value, param: str = ...): ...
def CryptBackendError(handler, config, hash, source: str = ...) -> None: ...
|
394c8971fb7e9b55412376f1ba942911bc898bf5
|
093cf0048b504d5ffc389e6c57b1515ef80f0834
|
/databricks/notebooks/legacy/mlflow_galleries/models_basic/python/02f_Sklearn_Train_Predict_ModelServer.py
|
520ba2fb25299bde9dbf0fa437840a7a8114632c
|
[] |
no_license
|
amesar/mlflow-examples
|
68d7e2c274ec61a5157c6af15a32a30e5396d563
|
a9f29cde4fe1fd0203712e1e20a9b8d335c18470
|
refs/heads/master
| 2023-09-01T07:11:14.470369
| 2023-08-22T02:17:14
| 2023-08-22T02:17:14
| 219,743,783
| 163
| 68
| null | 2023-01-27T01:08:19
| 2019-11-05T12:49:04
|
HTML
|
UTF-8
|
Python
| false
| false
| 5,944
|
py
|
02f_Sklearn_Train_Predict_ModelServer.py
|
# Databricks notebook source
# MAGIC %md # Basic Sklearn MLflow train and predict with Databricks model server
# MAGIC
# MAGIC **Overview**
# MAGIC * End-to-end example of train a model, deploy it to Databricks model server and score.
# MAGIC * Trains and registers model.
# MAGIC * Deploys registered model to Databricks model server.
# MAGIC * Client invocation: curl and Python `requests` examples.
# MAGIC
# MAGIC **Widgets**
# MAGIC * Registered model: if left empty, the registered model name will be created from your user name plus the notebook name.
# MAGIC * For example, `john.doe@databricks` will result in `john_doe_02e_Sklearn_Train_Predict_ModelServer`.
# COMMAND ----------
# MAGIC %md ### Setup
# COMMAND ----------
# MAGIC %run ./common
# COMMAND ----------
dbutils.widgets.text("Registered Model","")
default_registered_model = ""
registered_model = dbutils.widgets.get("Registered Model")
if registered_model == "":
context = dbutils.notebook.entry_point.getDbutils().notebook().getContext()
username = context.tags().get("user").get().replace("@databricks.com","").replace(".","_")
notebook = context.notebookPath().get().split("/")[-1]
registered_model = f"{username}_{notebook}"
print("default_registered_model:",default_registered_model)
print("registered_model:",registered_model)
if not registered_model :
raise Exception("Required value for registered_model")
# COMMAND ----------
import sklearn
import mlflow
import mlflow.sklearn
# COMMAND ----------
# MAGIC %md ### Prepare data
# COMMAND ----------
data_path = download_wine_file()
# COMMAND ----------
from sklearn.model_selection import train_test_split
import pandas as pd
data = pd.read_csv(data_path)
train, test = train_test_split(data, test_size=0.30, random_state=42)
train_x = train.drop([colLabel], axis=1)
test_x = test.drop([colLabel], axis=1)
train_y = train[colLabel]
test_y = test[colLabel]
# COMMAND ----------
# MAGIC %md ### Train
# COMMAND ----------
import numpy as np
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_squared_error
model_name = "sklearn-model"
max_depth = 5
with mlflow.start_run(run_name="sklearn") as run:
run_id = run.info.run_id
print("run_id:",run_id)
print("experiment_id:",run.info.experiment_id)
mlflow.set_tag("version.mlflow", mlflow.__version__)
mlflow.log_param("max_depth", max_depth)
model = DecisionTreeRegressor(max_depth=max_depth)
model.fit(train_x, train_y)
predictions = model.predict(test_x)
mlflow.sklearn.log_model(model, "sklearn-model", registered_model_name=registered_model)
rmse = np.sqrt(mean_squared_error(test_y, predictions))
mlflow.log_metric("rmse", rmse)
# COMMAND ----------
display_run_uri(run.info.experiment_id, run_id)
# COMMAND ----------
# MAGIC %md ### Get latest version
# COMMAND ----------
client = mlflow.tracking.MlflowClient()
model = client.get_registered_model(registered_model)
versions = [vr.version for vr in model.latest_versions]
versions.sort()
latest_version = versions[-1]
print("Latest mode version:",latest_version)
# COMMAND ----------
# MAGIC %md ### Deploy model - manual step
# MAGIC
# MAGIC * Now you must start the model server for the version you have just created.
# MAGIC * Follow the steps in [MLflow Model Serving on Databricks](https://docs.databricks.com/applications/mlflow/model-serving.html) to deploy your model to the Databricks Model Server.
# COMMAND ----------
# MAGIC %md ### Predict
# COMMAND ----------
display_registered_model_uri(registered_model)
# COMMAND ----------
# Wait for the model version to be deployed to existing model server
import time
time.sleep(10)
# COMMAND ----------
# MAGIC %md #### Setup for prediction
# COMMAND ----------
hostname = dbutils.notebook.entry_point.getDbutils().notebook().getContext().tags().get("browserHostName").get()
model_server_uri= f"https://{host_name}/model/{registered_model}/{latest_version}/invocations"
os.environ["MODEL_SERVER_URI"] = model_server_uri
token = dbutils.notebook.entry_point.getDbutils().notebook().getContext().apiToken().get()
os.environ["TOKEN"] = token
model_server_uri
# COMMAND ----------
# MAGIC %md #### Predict with curl
# COMMAND ----------
# MAGIC %sh
# MAGIC
# MAGIC echo "MODEL_SERVER_URI: $MODEL_SERVER_URI"
# MAGIC
# MAGIC curl -X POST -H "Content-Type:application/json" \
# MAGIC -H "Authorization: Bearer $TOKEN" \
# MAGIC -d'{
# MAGIC "columns": [
# MAGIC "fixed acidity",
# MAGIC "volatile acidity",
# MAGIC "citric acid",
# MAGIC "residual sugar",
# MAGIC "chlorides",
# MAGIC "free sulfur dioxide",
# MAGIC "total sulfur dioxide",
# MAGIC "density",
# MAGIC "pH",
# MAGIC "sulphates",
# MAGIC "alcohol"
# MAGIC ],
# MAGIC "data": [
# MAGIC [ 7, 0.27, 0.36, 20.7, 0.045, 45, 170, 1.001, 3, 0.45, 8.8 ],
# MAGIC [ 6.3, 0.3, 0.34, 1.6, 0.049, 14, 132, 0.994, 3.3, 0.49, 9.5 ],
# MAGIC [ 8.1, 0.28, 0.4, 6.9, 0.05, 30, 97, 0.9951, 3.26, 0.44, 10.1 ]
# MAGIC ]
# MAGIC }' \
# MAGIC $MODEL_SERVER_URI
# COMMAND ----------
# MAGIC %md #### Predict with Python
# COMMAND ----------
data = """
{
"columns": [
"fixed acidity",
"volatile acidity",
"citric acid",
"residual sugar",
"chlorides",
"free sulfur dioxide",
"total sulfur dioxide",
"density",
"pH",
"sulphates",
"alcohol"
],
"data": [
[ 7, 0.27, 0.36, 20.7, 0.045, 45, 170, 1.001, 3, 0.45, 8.8 ],
[ 6.3, 0.3, 0.34, 1.6, 0.049, 14, 132, 0.994, 3.3, 0.49, 9.5 ],
[ 8.1, 0.28, 0.4, 6.9, 0.05, 30, 97, 0.9951, 3.26, 0.44, 10.1 ]
]
}
"""
# COMMAND ----------
import requests
headers = {"Authorization": f"Bearer {token}", "Content-Type": "application/json"}
response = requests.request(method='POST', headers=headers, url=model_server_uri, data=data)
response.json()
|
f1ddab314555b1c053aee9b0e9f57e1adf4775ff
|
00f950030a550cfd7e972794ee993d94a92d75be
|
/cm-mlops/automation/cache/module.py
|
4739c514c304019151da5d8901946a5b3c2727ec
|
[
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mlcommons/ck
|
6261de6a99934cdd84c413de01d2ee4df6f9217a
|
e4306117546ea01f688afc7540c0ae2e1c007470
|
refs/heads/master
| 2023-09-01T23:26:28.195951
| 2023-09-01T17:26:00
| 2023-09-01T17:26:00
| 26,230,485
| 122
| 33
|
Apache-2.0
| 2023-09-13T12:01:35
| 2014-11-05T17:14:43
|
Python
|
UTF-8
|
Python
| false
| false
| 4,734
|
py
|
module.py
|
import os
from cmind.automation import Automation
from cmind import utils
class CAutomation(Automation):
"""
Automation actions
"""
############################################################
def __init__(self, cmind, automation_file):
super().__init__(cmind, __file__)
############################################################
def test(self, i):
"""
Test automation
Args:
(CM input dict):
(out) (str): if 'con', output to console
automation (str): automation as CM string object
parsed_automation (list): prepared in CM CLI or CM access function
[ (automation alias, automation UID) ] or
[ (automation alias, automation UID), (automation repo alias, automation repo UID) ]
(artifact) (str): artifact as CM string object
(parsed_artifact) (list): prepared in CM CLI or CM access function
[ (artifact alias, artifact UID) ] or
[ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ]
...
Returns:
(CM return dict):
* return (int): return code == 0 if no error and >0 if error
* (error) (str): error string if return>0
* Output from this automation action
"""
import json
print (json.dumps(i, indent=2))
return {'return':0}
############################################################
def show(self, i):
"""
Show cache
Args:
(CM input dict):
(out) (str): if 'con', output to console
(env) (bool): if True, show env from cm-cached-state.json
...
Returns:
(CM return dict):
* return (int): return code == 0 if no error and >0 if error
* (error) (str): error string if return>0
* Output from this automation action
"""
import json
# Check parsed automation
if 'parsed_automation' not in i:
return {'return':1, 'error':'automation is not specified'}
console = i.get('out') == 'con'
show_env = i.get('env', False)
# Check simplified CMD: cm run script "get compiler"
# If artifact has spaces, treat them as tags!
artifact = i.get('artifact','')
tags = i.get('tags','').strip()
if ' ' in artifact or ',' in artifact:
del(i['artifact'])
if 'parsed_artifact' in i: del(i['parsed_artifact'])
new_tags = artifact.replace(' ',',')
tags = new_tags if tags=='' else new_tags+','+tags
i['tags'] = tags
# Find CM artifact(s)
i['out'] = None
r = self.search(i)
if r['return']>0: return r
lst = r['list']
for artifact in sorted(lst, key = lambda x: sorted(x.meta['tags'])):
# for artifact in lst:
path = artifact.path
meta = artifact.meta
original_meta = artifact.original_meta
alias = meta.get('alias','')
uid = meta.get('uid','')
tags = meta.get('tags',[])
tags1 = sorted([x for x in tags if not x.startswith('_')])
tags2 = sorted([x for x in tags if x.startswith('_')])
tags = tags1 + tags2
version = meta.get('version','')
if console:
print ('')
# print ('* UID: {}'.format(uid))
print ('* Tags: {}'.format(','.join(tags)))
print (' Path: {}'.format(path))
if version!='':
print (' Version: {}'.format(version))
if show_env and console:
path_to_cached_state_file = os.path.join(path, 'cm-cached-state.json')
if os.path.isfile(path_to_cached_state_file):
r = utils.load_json(file_name = path_to_cached_state_file)
if r['return']>0: return r
# Update env and state from cache!
cached_state = r['meta']
new_env = cached_state.get('new_env', {})
if len(new_env)>0:
print (' New env:')
print (json.dumps(new_env, indent=6, sort_keys=True).replace('{','').replace('}',''))
new_state = cached_state.get('new_state', {})
if len(new_state)>0:
print (' New state:')
print (json.dumps(new_env, indent=6, sort_keys=True))
return {'return':0, 'list': lst}
|
decf842bf7a79f3e0fef20e89e7414daa55b2ea4
|
63ace5832d453e325681d02f6496a0999b72edcb
|
/bip_utils/ecc/ed25519/ed25519_point.py
|
827f4c8b8e56c7bd4420ef907f1756e38e0368df
|
[
"MIT"
] |
permissive
|
ebellocchia/bip_utils
|
c9ec04c687f4247e57434319e36b2abab78f0b32
|
d15c75ddd74e4838c396a0d036ef6faf11b06a4b
|
refs/heads/master
| 2023-09-01T13:38:55.567370
| 2023-08-16T17:04:14
| 2023-08-16T17:04:14
| 251,130,186
| 244
| 88
|
MIT
| 2023-08-23T13:46:19
| 2020-03-29T20:42:48
|
Python
|
UTF-8
|
Python
| false
| false
| 6,465
|
py
|
ed25519_point.py
|
# Copyright (c) 2022 Emanuele Bellocchia
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Module for ed25519 point."""
# Imports
from typing import Any, Optional
from bip_utils.ecc.common.ipoint import IPoint
from bip_utils.ecc.curve.elliptic_curve_types import EllipticCurveTypes
from bip_utils.ecc.ed25519.lib import ed25519_lib
from bip_utils.utils.misc import DataBytes
class Ed25519PointConst:
"""Class container for ed25519 point constants."""
# Point coordinate length in bytes
POINT_COORD_BYTE_LEN: int = 32
class Ed25519Point(IPoint):
"""Ed25519 point class."""
m_is_generator: bool
m_enc_bytes: bytes
m_x: Optional[int]
m_y: Optional[int]
@classmethod
def FromBytes(cls,
point_bytes: bytes) -> IPoint:
"""
Construct class from point bytes.
Args:
point_bytes (bytes): Point bytes
Returns:
IPoint: IPoint object
"""
if not ed25519_lib.point_is_on_curve(point_bytes):
raise ValueError("Invalid point bytes")
if ed25519_lib.point_is_decoded_bytes(point_bytes):
point_bytes = ed25519_lib.point_encode(
ed25519_lib.point_bytes_to_coord(point_bytes)
)
return cls(point_bytes)
@classmethod
def FromCoordinates(cls,
x: int,
y: int) -> IPoint:
"""
Construct class from point coordinates.
Args:
x (int): X coordinate of the point
y (int): Y coordinate of the point
Returns:
IPoint: IPoint object
"""
return cls.FromBytes(
ed25519_lib.point_coord_to_bytes((x, y))
)
def __init__(self,
point_bytes: bytes) -> None:
"""
Construct class from point object.
Args:
point_bytes (bytes): Point bytes
"""
if not ed25519_lib.point_is_encoded_bytes(point_bytes):
raise ValueError("Invalid point bytes")
self.m_enc_bytes = point_bytes
self.m_is_generator = ed25519_lib.point_is_generator(point_bytes)
self.m_x, self.m_y = None, None
@staticmethod
def CurveType() -> EllipticCurveTypes:
"""
Get the elliptic curve type.
Returns:
EllipticCurveTypes: Elliptic curve type
"""
return EllipticCurveTypes.ED25519
@staticmethod
def CoordinateLength() -> int:
"""
Get the coordinate length.
Returns:
int: Coordinate key length
"""
return Ed25519PointConst.POINT_COORD_BYTE_LEN
def UnderlyingObject(self) -> Any:
"""
Get the underlying object.
Returns:
Any: Underlying object
"""
return self.m_enc_bytes
def X(self) -> int:
"""
Get point X coordinate.
Returns:
int: Point X coordinate
"""
if self.m_x is None:
self.m_x, self.m_y = ed25519_lib.point_bytes_to_coord(self.m_enc_bytes)
return self.m_x
def Y(self) -> int:
"""
Get point Y coordinate.
Returns:
int: Point Y coordinate
"""
if self.m_y is None:
self.m_x, self.m_y = ed25519_lib.point_bytes_to_coord(self.m_enc_bytes)
return self.m_y
def Raw(self) -> DataBytes:
"""
Return the point encoded to raw bytes.
Returns:
DataBytes object: DataBytes object
"""
return self.RawDecoded()
def RawEncoded(self) -> DataBytes:
"""
Return the encoded point raw bytes.
Returns:
DataBytes object: DataBytes object
"""
return DataBytes(self.m_enc_bytes)
def RawDecoded(self) -> DataBytes:
"""
Return the decoded point raw bytes.
Returns:
DataBytes object: DataBytes object
"""
return DataBytes(ed25519_lib.int_encode(self.X()) + ed25519_lib.int_encode(self.Y()))
def __add__(self,
point: IPoint) -> IPoint:
"""
Add point to another point.
Args:
point (IPoint object): IPoint object
Returns:
IPoint object: IPoint object
"""
return self.__class__(
ed25519_lib.point_add(self.m_enc_bytes, point.UnderlyingObject())
)
def __radd__(self,
point: IPoint) -> IPoint:
"""
Add point to another point.
Args:
point (IPoint object): IPoint object
Returns:
IPoint object: IPoint object
"""
return self + point
def __mul__(self,
scalar: int) -> IPoint:
"""
Multiply point by a scalar.
Args:
scalar (int): scalar
Returns:
IPoint object: IPoint object
"""
if self.m_is_generator:
return self.__class__(
ed25519_lib.point_scalar_mul_base(scalar)
)
return self.__class__(
ed25519_lib.point_scalar_mul(scalar, self.m_enc_bytes)
)
def __rmul__(self,
scalar: int) -> IPoint:
"""
Multiply point by a scalar.
Args:
scalar (int): scalar
Returns:
IPoint object: IPoint object
"""
return self * scalar
|
02c4c7e3743f4e43528204fd140861bb5a162caf
|
44ba493efd0fd7ae78880d3d93cc0d66166935e5
|
/tests/integrations/docker/test_Docker__verify.py
|
969d37a65c267ff5a968333f8afbe6020133f177
|
[
"BSD-3-Clause"
] |
permissive
|
beeware/briefcase
|
1b3eaebf0791728c68986809aa07abc436e422c6
|
cc2dae1ffc58f9700d0ca57461cb05909bc01bec
|
refs/heads/main
| 2023-09-01T19:24:15.424713
| 2023-09-01T04:35:53
| 2023-09-01T04:35:53
| 39,841,700
| 1,609
| 256
|
BSD-3-Clause
| 2023-09-11T10:04:34
| 2015-07-28T15:20:03
|
Python
|
UTF-8
|
Python
| false
| false
| 13,786
|
py
|
test_Docker__verify.py
|
import subprocess
from collections import namedtuple
from pathlib import Path, PurePosixPath
from unittest.mock import MagicMock, call
import pytest
from briefcase.exceptions import BriefcaseCommandError, UnsupportedHostError
from briefcase.integrations.base import ToolCache
from briefcase.integrations.docker import Docker
from briefcase.integrations.subprocess import Subprocess
VALID_DOCKER_VERSION = "Docker version 19.03.8, build afacb8b\n"
VALID_DOCKER_INFO = "docker info printout"
VALID_BUILDX_VERSION = "github.com/docker/buildx v0.10.2 00ed17d\n"
VALID_USER_MAPPING_IMAGE_CACHE = "1ed313b0551f"
DOCKER_VERIFICATION_CALLS = [
call(["docker", "--version"]),
call(["docker", "info"]),
call(["docker", "buildx", "version"]),
]
@pytest.fixture
def mock_tools(mock_tools) -> ToolCache:
mock_tools.subprocess = MagicMock(spec_set=Subprocess)
return mock_tools
@pytest.fixture
def mock_write_test_path(tmp_path, monkeypatch):
"""Mock the container write test path in to pytest's tmp directory."""
write_test_path = tmp_path / "mock_write_test"
# Wrap the path so read-only methods can be replaced
write_test_path = MagicMock(wraps=write_test_path)
monkeypatch.setattr(Docker, "_write_test_path", lambda self: write_test_path)
return write_test_path
def test_short_circuit(mock_tools):
"""Tool is not created if already cached."""
mock_tools.docker = "tool"
tool = Docker.verify(mock_tools)
assert tool == "tool"
assert tool == mock_tools.docker
def test_unsupported_os(mock_tools):
"""When host OS is not supported, an error is raised."""
mock_tools.host_os = "wonky"
with pytest.raises(
UnsupportedHostError,
match=f"{Docker.name} is not supported on wonky",
):
Docker.verify(mock_tools)
@pytest.mark.parametrize("host_os", ["Windows", "Linux", "Darwin"])
def test_docker_install_url(host_os):
"""Docker details available for each OS."""
assert host_os in Docker.DOCKER_INSTALL_URL
def test_docker_exists(mock_tools, user_mapping_run_calls, capsys, tmp_path):
"""If docker exists, the Docker wrapper is returned."""
# Mock the return value of Docker Version
mock_tools.subprocess.check_output.side_effect = [
VALID_DOCKER_VERSION,
VALID_DOCKER_INFO,
VALID_BUILDX_VERSION,
VALID_USER_MAPPING_IMAGE_CACHE,
]
# Invoke docker verify
result = Docker.verify(mock_tools)
# The verify call should return the Docker wrapper
assert isinstance(result, Docker)
# Docker version and plugins were verified
mock_tools.subprocess.check_output.assert_has_calls(DOCKER_VERIFICATION_CALLS)
# Docker user mapping inspection occurred
mock_tools.subprocess.run.assert_has_calls(user_mapping_run_calls)
# No console output
output = capsys.readouterr()
assert output.out == ""
assert output.err == ""
def test_docker_doesnt_exist(mock_tools):
"""If docker doesn't exist, an error is raised."""
# Mock Docker not installed on system
mock_tools.subprocess.check_output.side_effect = FileNotFoundError
# Invoke Docker verify
with pytest.raises(BriefcaseCommandError):
Docker.verify(mock_tools)
# But docker was invoked
mock_tools.subprocess.check_output.assert_called_with(["docker", "--version"])
def test_docker_failure(mock_tools, user_mapping_run_calls, capsys):
"""If docker failed during execution, the Docker wrapper is returned with a
warning."""
# Mock Docker cannot be found
mock_tools.subprocess.check_output.side_effect = [
subprocess.CalledProcessError(
returncode=1,
cmd="docker --version",
),
"Success!",
VALID_BUILDX_VERSION,
VALID_USER_MAPPING_IMAGE_CACHE,
]
# Invoke Docker verify
result = Docker.verify(mock_tools)
# The verify call should return the Docker wrapper
assert isinstance(result, Docker)
# Docker version and plugins were verified
mock_tools.subprocess.check_output.assert_has_calls(DOCKER_VERIFICATION_CALLS)
# Docker user mapping inspection occurred
mock_tools.subprocess.run.assert_has_calls(user_mapping_run_calls)
# console output
output = capsys.readouterr()
assert "** WARNING: Unable to determine if Docker is installed" in output.out
assert output.err == ""
def test_docker_bad_version(mock_tools, capsys):
"""If docker exists but the version string doesn't make sense, the Docker wrapper is
returned with a warning."""
# Mock a bad return value of `docker --version`
mock_tools.subprocess.check_output.return_value = "Docker version 17.2\n"
# Invoke Docker verify
with pytest.raises(
BriefcaseCommandError,
match=r"Briefcase requires Docker 19 or higher",
):
Docker.verify(mock_tools)
def test_docker_unknown_version(mock_tools, user_mapping_run_calls, capsys):
"""If docker exists but the version string doesn't make sense, the Docker wrapper is
returned with a warning."""
# Mock a bad return value of `docker --version`
mock_tools.subprocess.check_output.return_value = "ceci nest pas un Docker\n"
# Invoke Docker verify
result = Docker.verify(mock_tools)
# The verify call should return the Docker wrapper
assert isinstance(result, Docker)
# Docker version and plugins were verified
mock_tools.subprocess.check_output.assert_has_calls(DOCKER_VERIFICATION_CALLS)
# Docker user mapping inspection occurred
mock_tools.subprocess.run.assert_has_calls(user_mapping_run_calls)
# console output
output = capsys.readouterr()
assert "** WARNING: Unable to determine the version of Docker" in output.out
assert output.err == ""
def test_docker_exists_but_process_lacks_permission_to_use_it(mock_tools):
"""If the docker daemon isn't running, the check fails."""
error_message = """
Client:
Debug Mode: false
Server:
ERROR: Got permission denied while trying to connect to the Docker daemon socket at unix:///var/run/docker.sock:
Get http://%2Fvar%2Frun%2Fdocker.sock/v1.40/info: dial unix /var/run/docker.sock: connect: permission denied
errors pretty printing info"""
mock_tools.subprocess.check_output.side_effect = [
VALID_DOCKER_VERSION,
subprocess.CalledProcessError(
returncode=1,
cmd="docker info",
output=error_message,
),
]
with pytest.raises(
BriefcaseCommandError,
match="does not have\npermissions to invoke Docker.",
):
Docker.verify(mock_tools)
@pytest.mark.parametrize(
"error_message",
[
"""
Client:
Debug Mode: false
Server:
ERROR: Error response from daemon: dial unix docker.raw.sock: connect: connection refused
errors pretty printing info
""", # this is the error shown on mac
"""
Client:
Debug Mode: false
Server:
ERROR: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?
errors pretty printing info""", # this is the error show on linux
],
)
def test_docker_exists_but_is_not_running(error_message, mock_tools):
"""If the docker daemon isn't running, the check fails."""
mock_tools.subprocess.check_output.side_effect = [
VALID_DOCKER_VERSION,
subprocess.CalledProcessError(
returncode=1,
cmd="docker info",
output=error_message,
),
]
with pytest.raises(
BriefcaseCommandError,
match="the Docker\ndaemon is not running",
):
Docker.verify(mock_tools)
def test_docker_exists_but_unknown_error_when_running_command(mock_tools):
"""If docker info fails in unknown ways, the check fails."""
mock_tools.subprocess.check_output.side_effect = [
VALID_DOCKER_VERSION,
subprocess.CalledProcessError(
returncode=1,
cmd="docker info",
output="This command failed!",
),
]
with pytest.raises(
BriefcaseCommandError,
match="Check your Docker\ninstallation, and try again",
):
Docker.verify(mock_tools)
def test_buildx_plugin_not_installed(mock_tools):
"""If the buildx plugin is not installed, verification fails."""
mock_tools.subprocess.check_output.side_effect = [
VALID_DOCKER_VERSION,
VALID_DOCKER_INFO,
subprocess.CalledProcessError(
returncode=1,
cmd="docker buildx version",
),
]
with pytest.raises(
BriefcaseCommandError,
match="Docker is installed and available for use but the buildx plugin\nis not installed",
):
Docker.verify(mock_tools)
def test_docker_image_hint(mock_tools):
"""If an image_tag is passed to verification, it is used for the user mapping
check."""
# Mock the return values for Docker verification
mock_tools.subprocess.check_output.side_effect = [
VALID_DOCKER_VERSION,
VALID_DOCKER_INFO,
VALID_BUILDX_VERSION,
VALID_USER_MAPPING_IMAGE_CACHE,
]
Docker.verify(mock_tools, image_tag="myimage:tagtorulethemall")
mock_tools.subprocess.run.assert_has_calls(
[
call(
[
"docker",
"run",
"--rm",
"--volume",
f"{Path.cwd() / 'build'}:/host_write_test:z",
"myimage:tagtorulethemall",
"touch",
PurePosixPath("/host_write_test/container_write_test"),
],
check=True,
),
call(
[
"docker",
"run",
"--rm",
"--volume",
f"{Path.cwd() / 'build'}:/host_write_test:z",
"myimage:tagtorulethemall",
"rm",
"-f",
PurePosixPath("/host_write_test/container_write_test"),
],
check=True,
),
]
)
def test_user_mapping_write_file_path(mock_tools):
"""The write test file path is as expected."""
expected_path = Path.cwd() / "build" / "container_write_test"
assert Docker(mock_tools)._write_test_path() == expected_path
def test_user_mapping_write_file_exists(mock_tools, mock_write_test_path):
"""Docker verification fails when the container write test file exists and cannot be
deleted."""
# Mock the return values for Docker verification
mock_tools.subprocess.check_output.side_effect = [
VALID_DOCKER_VERSION,
VALID_DOCKER_INFO,
VALID_BUILDX_VERSION,
VALID_USER_MAPPING_IMAGE_CACHE,
]
# Mock failure for deleting an existing write test file
mock_write_test_path.unlink = MagicMock(side_effect=OSError("delete failed"))
# Fails when file cannot be deleted
with pytest.raises(
BriefcaseCommandError,
match="file path used to determine how Docker is mapping users",
):
Docker.verify(mock_tools)
def test_user_mapping_write_test_file_creation_fails(mock_tools, mock_write_test_path):
"""Docker verification fails if the write test file cannot be written."""
# Mock the return values for Docker verification
mock_tools.subprocess.check_output.side_effect = [
VALID_DOCKER_VERSION,
VALID_DOCKER_INFO,
VALID_BUILDX_VERSION,
VALID_USER_MAPPING_IMAGE_CACHE,
]
# Mock failure for deleting an existing write test file
mock_tools.subprocess.run.side_effect = subprocess.CalledProcessError(
returncode=1, cmd=["docker", "run", "..."]
)
# Fails when file cannot be deleted
with pytest.raises(
BriefcaseCommandError,
match="Unable to determine if Docker is mapping users",
):
Docker.verify(mock_tools)
def test_user_mapping_write_test_file_cleanup_fails(mock_tools, mock_write_test_path):
"""Docker verification fails if the write test file cannot be removed after the
test."""
# Mock the return values for Docker verification
mock_tools.subprocess.check_output.side_effect = [
VALID_DOCKER_VERSION,
VALID_DOCKER_INFO,
VALID_BUILDX_VERSION,
VALID_USER_MAPPING_IMAGE_CACHE,
]
# Mock failure for deleting an existing write test file
mock_tools.subprocess.run.side_effect = [
"container write test file creation succeeded",
subprocess.CalledProcessError(returncode=1, cmd=["docker", "run", "..."]),
]
# Fails when file cannot be deleted
with pytest.raises(
BriefcaseCommandError,
match="Unable to clean up from determining if Docker is mapping users",
):
Docker.verify(mock_tools)
@pytest.mark.parametrize("file_owner_id, expected", [(1000, True), (0, False)])
def test_user_mapping_setting(
mock_tools,
user_mapping_run_calls,
file_owner_id,
expected,
):
"""If the write test file is not owned by root, user mapping is enabled, else
disabled."""
# Mock the return values for Docker verification
mock_tools.subprocess.check_output.side_effect = [
VALID_DOCKER_VERSION,
VALID_DOCKER_INFO,
VALID_BUILDX_VERSION,
VALID_USER_MAPPING_IMAGE_CACHE,
]
stat_result = namedtuple("stat_result", "st_uid")
# Mock the os.stat call returning a file owned by file_owner_id
mock_tools.os.stat = MagicMock(return_value=stat_result(st_uid=file_owner_id))
docker = Docker.verify(mock_tools)
# Docker user mapping inspection occurred
mock_tools.subprocess.run.assert_has_calls(user_mapping_run_calls)
assert docker.is_user_mapped is expected
|
39cc97f2d7e93560a45e57e1d875b999cc220275
|
c0eeaae1a689a349a86b1e0cf672f7b90fa32886
|
/solara/website/pages/examples/fullscreen/layout_demo.py
|
05ba61b46663bb2b1267c37f040c3800c90aa31c
|
[
"MIT"
] |
permissive
|
widgetti/solara
|
a624b9e6408b080cb9845c46d9a3dd56da61a1b7
|
baa36623c3eb7db50672d8eb3d3cdab9220a50a6
|
refs/heads/master
| 2023-08-31T06:59:51.637969
| 2023-07-31T19:04:29
| 2023-07-31T19:04:29
| 467,834,772
| 959
| 68
|
MIT
| 2023-09-13T19:33:42
| 2022-03-09T08:12:01
|
Python
|
UTF-8
|
Python
| false
| false
| 44
|
py
|
layout_demo.py
|
redirect = "/apps/layout-demo"
Page = True
|
ae33e997b7f94eb74dad7b2b86df06a872ddc701
|
8c39ba92cc71ff78242477d3256f6ee3daa872c7
|
/conans/test/functional/graph/test_graph_build_mode.py
|
3e26201cf34b667ac23f6a40975bfee2fe08b6f9
|
[
"MIT"
] |
permissive
|
conan-io/conan
|
eb4427e534a0edbb1fb06c753d5d9587faaef93c
|
bac455d1329b6744cdc41747354a727c9233179f
|
refs/heads/release/2.0
| 2023-09-03T18:51:54.345761
| 2023-09-03T17:30:43
| 2023-09-03T17:30:43
| 47,190,624
| 7,754
| 1,182
|
MIT
| 2023-09-14T15:16:09
| 2015-12-01T13:17:02
|
Python
|
UTF-8
|
Python
| false
| false
| 9,503
|
py
|
test_graph_build_mode.py
|
import pytest
from conans.test.assets.genconanfile import GenConanfile
from conans.test.utils.tools import TestClient
@pytest.fixture(scope="module")
def build_all():
""" Build a simple graph to test --build option
foobar <- bar <- foo
<--------|
All packages are built from sources to keep a cache.
:return: TestClient instance
"""
client = TestClient()
client.save({"conanfile.py": GenConanfile().with_setting("build_type")})
client.run("export . --name=foo --version=1.0 --user=user --channel=testing")
client.save({"conanfile.py": GenConanfile().with_require("foo/1.0@user/testing")
.with_setting("build_type")})
client.run("export . --name=bar --version=1.0 --user=user --channel=testing")
client.save({"conanfile.py": GenConanfile().with_require("foo/1.0@user/testing")
.with_require("bar/1.0@user/testing")
.with_setting("build_type")})
client.run("export . --name=foobar --version=1.0 --user=user --channel=testing")
client.run("install --requires=foobar/1.0@user/testing --build='*'")
return client
foo_id = "efa83b160a55b033c4ea706ddb980cd708e3ba1b"
bar_id = "7d0bb2b97d4339b0d3ded1418a2593f35b9cf267"
foobar_id = "af8f885f621ba7baac3f5b1d2c18cfdf5ba2550c"
def check_if_build_from_sources(refs_modes, output):
for ref, mode in refs_modes.items():
if mode == "Build":
assert "{}/1.0@user/testing: Forced build from source".format(ref) in output
else:
assert "{}/1.0@user/testing: Forced build from source".format(ref) not in output
def test_install_build_single(build_all):
""" When only --build=<ref> is passed, only <ref> must be built
"""
build_all.run("install --requires=foobar/1.0@user/testing --build=foo/*")
build_all.assert_listed_binary({"bar/1.0@user/testing": (bar_id, "Cache"),
"foo/1.0@user/testing": (foo_id, "Build"),
"foobar/1.0@user/testing": (foobar_id, "Cache"),
})
assert "foo/1.0@user/testing: Forced build from source" in build_all.out
assert "bar/1.0@user/testing: Forced build from source" not in build_all.out
assert "foobar/1.0@user/testing: Forced build from source" not in build_all.out
assert "No package matching" not in build_all.out
def test_install_build_double(build_all):
""" When both --build=<ref1> and --build=<ref2> are passed, only both should be built
"""
build_all.run("install --requires=foobar/1.0@user/testing --build=foo/* --build=bar/*")
build_all.assert_listed_binary({"bar/1.0@user/testing": (bar_id, "Build"),
"foo/1.0@user/testing": (foo_id, "Build"),
"foobar/1.0@user/testing": (foobar_id, "Cache"),
})
assert "foo/1.0@user/testing: Forced build from source" in build_all.out
assert "bar/1.0@user/testing: Forced build from source" in build_all.out
assert "foobar/1.0@user/testing: Forced build from source" not in build_all.out
assert "No package matching" not in build_all.out
@pytest.mark.parametrize("build_arg,mode", [
("--build=", "Cache"),
("--build=*", "Build")])
def test_install_build_only(build_arg, mode, build_all):
""" When only --build is passed, all packages must be built from sources
When only --build= is passed, it's considered an error
When only --build=* is passed, all packages must be built from sources
"""
build_all.run("install --requires=foobar/1.0@user/testing {}".format(build_arg))
build_all.assert_listed_binary({"bar/1.0@user/testing": (bar_id, mode),
"foo/1.0@user/testing": (foo_id, mode),
"foobar/1.0@user/testing": (foobar_id, mode),
})
if "Build" == mode:
assert "foo/1.0@user/testing: Forced build from source" in build_all.out
assert "bar/1.0@user/testing: Forced build from source" in build_all.out
assert "foobar/1.0@user/testing: Forced build from source" in build_all.out
# FIXME assert "No package matching" not in build_all.out
else:
assert "foo/1.0@user/testing: Forced build from source" not in build_all.out
assert "bar/1.0@user/testing: Forced build from source" not in build_all.out
assert "foobar/1.0@user/testing: Forced build from source" not in build_all.out
# FIXME assert "No package matching" in build_all.out
@pytest.mark.parametrize("build_arg,bar,foo,foobar", [("--build=", "Cache", "Build", "Cache"),
("--build=*", "Build", "Build", "Build")])
def test_install_build_all_with_single(build_arg, bar, foo, foobar, build_all):
""" When --build is passed with another package, only the package must be built from sources.
When --build= is passed with another package, only the package must be built from sources.
When --build=* is passed with another package, all packages must be built from sources.
"""
build_all.run("install --requires=foobar/1.0@user/testing --build=foo/* {}".format(build_arg))
build_all.assert_listed_binary({"bar/1.0@user/testing": (bar_id, bar),
"foo/1.0@user/testing": (foo_id, foo),
"foobar/1.0@user/testing": (foobar_id, foobar),
})
check_if_build_from_sources({"foo": foo, "bar": bar, "foobar": foobar}, build_all.out)
@pytest.mark.parametrize("build_arg,bar,foo,foobar", [("--build=", "Cache", "Cache", "Cache"),
("--build=*", "Build", "Cache", "Build")])
def test_install_build_all_with_single_skip(build_arg, bar, foo, foobar, build_all):
""" When --build is passed with a skipped package, not all packages must be built from sources.
When --build= is passed with another package, only the package must be built from sources.
When --build=* is passed with another package, not all packages must be built from sources.
The arguments order matter, that's why we need to run twice.
"""
for argument in ["--build=!foo/* {}".format(build_arg),
"{} --build=!foo/*".format(build_arg)]:
build_all.run("install --requires=foobar/1.0@user/testing {}".format(argument))
build_all.assert_listed_binary({"bar/1.0@user/testing": (bar_id, bar),
"foo/1.0@user/testing": (foo_id, foo),
"foobar/1.0@user/testing": (foobar_id, foobar),
})
check_if_build_from_sources({"foo": foo, "bar": bar, "foobar": foobar}, build_all.out)
@pytest.mark.parametrize("build_arg,bar,foo,foobar", [("--build=", "Cache", "Cache", "Cache"),
("--build=*", "Cache", "Cache", "Build")])
def test_install_build_all_with_double_skip(build_arg, bar, foo, foobar, build_all):
""" When --build is passed with a skipped package, not all packages must be built from sources.
When --build= is passed with another package, only the package must be built from sources.
When --build=* is passed with another package, not all packages must be built from sources.
The arguments order matter, that's why we need to run twice.
"""
for argument in ["--build=!foo/* --build=~bar/* {}".format(build_arg),
"{} --build=!foo/* --build=~bar/*".format(build_arg)]:
build_all.run("install --requires=foobar/1.0@user/testing {}".format(argument))
build_all.assert_listed_binary({"bar/1.0@user/testing": (bar_id, bar),
"foo/1.0@user/testing": (foo_id, foo),
"foobar/1.0@user/testing": (foobar_id, foobar),
})
def test_report_matches(build_all):
""" When a wrong reference is passed to be build, an error message should be shown
"""
build_all.run("install --requires=foobar/1.0@user/testing --build=* --build=baz/*")
build_all.assert_listed_binary({"foobar/1.0@user/testing": (foobar_id, "Build")})
# FIXME assert "No package matching 'baz' pattern found." in build_all.out
build_all.run("install --requires=foobar/1.0@user/testing --build=* --build=!baz/*")
# FIXME assert "No package matching 'baz' pattern found." in build_all.out
build_all.assert_listed_binary({"foobar/1.0@user/testing": (foobar_id, "Build")})
build_all.run("install --requires=foobar/1.0@user/testing --build=* --build=~baz/* --build=blah")
# FIXME assert "No package matching 'blah' pattern found." in build_all.out
# FIXME assert "No package matching 'baz' pattern found." in build_all.out
build_all.assert_listed_binary({"foobar/1.0@user/testing": (foobar_id, "Build")})
build_all.run("install --requires=foobar/1.0@user/testing --build=* --build=!baz/* --build=~blah")
# FIXME assert "No package matching 'blah' pattern found." in build_all.out
# FIXME assert "No package matching 'baz' pattern found." in build_all.out
build_all.assert_listed_binary({"foobar/1.0@user/testing": (foobar_id, "Build")})
|
d30ff01f1fc2e6d99613b6596159a0b71b088be2
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/assets/federated_learning_silo.py
|
80c4ba7ebe7cc643a4074f0c6a533fc68e18aa20
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-python-cwi",
"PSF-2.0",
"LGPL-2.0-or-later",
"GPL-3.0-or-later",
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"Python-2.0",
"MPL-2.0",
"LicenseRef-scancode-other-copyleft",
"HPND",
"ODbL-1.0",
"GPL-3.0-only",
"ZPL-2.1",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,023
|
py
|
federated_learning_silo.py
|
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
# # TODO determine where this file should live.
from marshmallow import fields
from azure.ai.ml._schema.core.resource import YamlFileSchema
from azure.ai.ml._utils._experimental import experimental
from azure.ai.ml._schema.job.input_output_fields_provider import InputsField
# Inherits from YamlFileSchema instead of something for specific because
# this does not represent a server-side resource.
@experimental
class FederatedLearningSiloSchema(YamlFileSchema):
"""The YAML definition of a silo for describing a federated learning data target.
Unlike most SDK/CLI schemas, this schema does not represent an AML resource;
it is merely used to simplify the loading and validation of silos which are used
to create FL pipeline nodes.
"""
compute = fields.Str()
datastore = fields.Str()
inputs = InputsField()
|
028ae21180f6ab7403888216be042a956038cf5c
|
ce32e0e1b9568c710a3168abc3c638d6f9f6c31b
|
/vnpy/data/tdx/refill_tdx_future_bars.py
|
74eb6f54cb1a86027b45c49bed91595e617438a9
|
[
"MIT"
] |
permissive
|
msincenselee/vnpy
|
55ae76ca32cae47369a66bd2d6589c13d7a0bdd4
|
7f4fd3cd202712b083ed7dc2f346ba4bb1bda6d7
|
refs/heads/vnpy2
| 2022-05-19T10:06:55.504408
| 2022-03-19T15:26:01
| 2022-03-19T15:26:01
| 38,525,806
| 359
| 158
|
MIT
| 2020-09-09T00:09:12
| 2015-07-04T07:27:46
|
C++
|
UTF-8
|
Python
| false
| false
| 2,531
|
py
|
refill_tdx_future_bars.py
|
# flake8: noqa
"""
下载通达信指数合约1分钟bar => vnpy项目目录/bar_data/
"""
import os
import sys
import json
from collections import OrderedDict
import pandas as pd
vnpy_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
if vnpy_root not in sys.path:
sys.path.append(vnpy_root)
os.environ["VNPY_TESTING"] = "1"
from vnpy.data.tdx.tdx_future_data import *
# 保存的1分钟指数 bar目录
bar_data_folder = os.path.abspath(os.path.join(vnpy_root, 'bar_data'))
# 开始日期(每年大概需要几分钟)
start_date = '20160101'
# 创建API对象
api_01 = TdxFutureData()
# 更新本地合约缓存信息
api_01.update_mi_contracts()
# 逐一指数合约下载并更新
for underlying_symbol in api_01.future_contracts.keys():
index_symbol = underlying_symbol + '99'
print(f'开始更新:{index_symbol}')
# csv数据文件名
bar_file_path = os.path.abspath(os.path.join(bar_data_folder, f'{underlying_symbol}99_{start_date}_1m.csv'))
# 如果文件存在,
if os.path.exists(bar_file_path):
df_old = pd.read_csv(bar_file_path, index_col=0)
df_old = df_old.rename(lambda x: pd.to_datetime(x, format="%Y-%m-%d %H:%M:%S"))
# 取最后一条时间
last_dt = df_old.index[-1]
start_dt = last_dt - timedelta(days=1)
print(f'文件{bar_file_path}存在,最后时间:{start_date}')
else:
df_old = None
start_dt = datetime.strptime(start_date, '%Y%m%d')
print(f'文件{bar_file_path}不存在,开始时间:{start_date}')
result, bars = api_01.get_bars(symbol=index_symbol,
period='1min',
callback=None,
start_dt=start_dt,
return_bar=False)
# [dict] => dataframe
if not result or len(bars) == 0:
continue
df_extern = pd.DataFrame(bars)
df_extern.set_index('datetime', inplace=True)
if df_old is not None:
# 扩展数据
print('扩展数据')
data_df = pd.concat([df_old, df_extern], axis=0)
else:
data_df = df_extern
# 数据按时间戳去重
print('按时间戳去重')
data_df = data_df[~data_df.index.duplicated(keep='first')]
# 排序
data_df = data_df.sort_index()
# print(data_df.head())
print(data_df.tail())
data_df.to_csv(bar_file_path, index=True)
print(f'更新{index_symbol}数据 => 文件{bar_file_path}')
print('更新完毕')
os._exit(0)
|
f3502bf46debf3803ad9c081a967197558b2a103
|
1dbbb05b30d27c6419b9f34eea3b9a47f92582a0
|
/parlai/tasks/msc/agents.py
|
a0978d9f488937ef043e52999b135e771fddf8dc
|
[
"MIT"
] |
permissive
|
facebookresearch/ParlAI
|
815334323d0ebef51bf9837336fe3eef6fe1655d
|
e1d899edfb92471552bae153f59ad30aa7fca468
|
refs/heads/main
| 2023-08-31T22:20:45.918129
| 2023-08-14T19:39:56
| 2023-08-14T19:39:56
| 89,266,735
| 10,943
| 2,395
|
MIT
| 2023-09-13T23:07:40
| 2017-04-24T17:10:44
|
Python
|
UTF-8
|
Python
| false
| false
| 30,947
|
py
|
agents.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.core.teachers import DialogTeacher
from parlai.utils.io import PathManager
from parlai.core.opt import Opt
from parlai.utils.strings import normalize_reply
from parlai.core.teachers import MultiTaskTeacher
from .build import build
import os
import json
from typing import Optional
from parlai.core.params import ParlaiParser
import copy
import random
import math
from parlai.utils.logging import logger
from parlai.core.message import Message
import parlai.scripts.display_data as dsd
from parlai.tasks.convai2.agents import NormalizedTeacherTrait, SelfOriginalTeacher
from parlai.tasks.blended_skill_talk.agents import (
ContextGenerator as BaseContextGenerator,
)
from parlai.tasks.msc.constants import (
INITIAL_DATA_TO_COMPLETE,
MODEL_OPT,
UI_OPT,
COMMON_CONFIG,
)
import parlai.tasks.msc.mutators # type: ignore
NOPERSONA = '__NO__PERSONA__BEAM__MIN__LEN__20__'
DUMMY_TEXT = '__SILENCE__'
def get_sessionbase_dir_path(opt, dpath, task_name):
assert task_name in ['msc_personasummary', 'msc_dialogue']
dpath = os.path.join(dpath, 'msc', task_name, f'session_{opt.get("session_id", 0)}')
return dpath
def get_predicted_summary_path(dpath, is_session_level=True):
if is_session_level:
return os.path.join(
dpath, 'msc', 'msc_dialogue', 'sessionlevel_summaries_subsample5.json'
)
else:
return os.path.join(dpath, 'msc', 'msc_dialogue', 'summaries_subsample5.json')
class SessionBasePersonaSummaryTeacher(DialogTeacher):
"""
Teacher that summarizes the persona lines.
"""
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
super().add_cmdline_args(parser, partial_opt)
agent = parser.add_argument_group('MSC Persona Summary Teacher options')
agent.add_argument('--session-id', type=int, default=1, help="session id")
agent.add_argument(
'--summary-num-turns',
type=int,
default=-1,
help="number of turns to infer persona",
)
agent.add_argument(
'--nopersona-subsampling-weight',
type=float,
default=1,
help="subampling ratio ",
)
return parser
def __init__(self, opt, shared=None):
self.summary_num_turns = opt['summary_num_turns']
assert (
self.summary_num_turns < 0 or self.summary_num_turns % 2 == 0
), "Please choose an even number for turns"
self.session_id = opt['session_id']
assert opt['session_id'] <= 4, f"No data beyong session {opt['session_id']}!"
assert (
opt['session_id'] <= 3 or 'train' not in opt['datatype']
), f"No train data beyong session {opt['session_id']}!"
self.nopersona_subsampling_weight = opt['nopersona_subsampling_weight']
if 'test' in opt['datatype']:
logger.warning(f'WARNING: Do not subsampling for {opt["datatype"]}')
self.nopersona_subsampling_weight = 1
assert (
self.nopersona_subsampling_weight >= 0
and self.nopersona_subsampling_weight <= 1
), "invalid subsampling weight"
dpath = build(opt)
opt['datafile'] = get_sessionbase_dir_path(opt, dpath, 'msc_personasummary')
self.id = f'msc_personasummary_{self.session_id}'
super().__init__(opt, shared)
def setup_data(self, data_path):
print('loading: ' + data_path)
if self.datatype.startswith('train'):
path_to_open = os.path.join(data_path, 'train.txt')
elif self.datatype.startswith('valid'):
path_to_open = os.path.join(data_path, 'valid.txt')
else:
path_to_open = os.path.join(data_path, 'test.txt')
with PathManager.open(path_to_open) as f:
raw_data = [json.loads(line.strip()) for line in f]
data = []
negative_data = []
for dialog_dict in raw_data:
current_episode = dialog_dict['dialog']
init_personachat = dialog_dict['init_personachat']
for end_idx in range(len(current_episode)):
if self.summary_num_turns > 0:
start_index = max(0, end_idx - self.summary_num_turns + 1)
else:
start_index = 0
end_line_persona = (
current_episode[end_idx]['persona_text']
if 'persona_text' in current_episode[end_idx]
else NOPERSONA
)
dialog_texts = [
current_episode[i]['text'] for i in range(start_index, end_idx + 1)
]
action = {
'id': self.id,
'text': '\n'.join(dialog_texts),
'labels': [end_line_persona],
'initial_data_id': dialog_dict['initial_data_id'],
'init_personas': init_personachat['init_personas'],
'utt_idx': end_idx,
'speaker_idx': end_idx % 2 + 1,
'session_id': self.session_id,
}
if end_line_persona == NOPERSONA:
negative_data.append(action)
else:
data.append(action)
size_to_sample = math.ceil(
self.nopersona_subsampling_weight * len(negative_data)
)
data.extend(random.sample(negative_data, size_to_sample))
random.shuffle(data)
for episode in data:
yield Message(episode), True
class SessionBaseMscTeacher(DialogTeacher):
"""
Teacher that generate text in the multi-session chat.
"""
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
super().add_cmdline_args(parser, partial_opt)
agent = parser.add_argument_group('Multi-Session Chat Task options')
agent.add_argument(
'--session-id',
type=int,
default=2,
help="session id, session_id = 1 refers to convai2 teacher and it's not supported here",
)
agent.add_argument(
'--previous-persona-type',
type=str,
default="raw_history",
choices=[
'none',
'goldsum_self',
'goldsum_both',
'goldsum_their',
'predsum_self',
'predsum_both',
'predsum_their',
'predsum_utt_self',
'predsum_utt_both',
'predsum_utt_their',
'init_self',
'init_both',
'init_their',
'raw_history',
],
help="type of previous context to include as context. "
"the 'goldsum_' prefix refers to gold persona summaries from crowdworkers; "
"the 'predsum_' prefix refers to predicted persona summaries from a summarization model; "
"the 'init_' prefix refers to the original persona lines used to ground the PersonaChat conversations. ",
)
agent.add_argument(
'--your-persona-first',
type=bool,
default=False,
help="whether to prepend your persona first or not",
)
agent.add_argument(
'--session-openning',
type=bool,
default=False,
help="whether to only include session opening or not",
)
agent.add_argument(
'--label-speaker-id',
type=str,
default="both",
choices=['self', 'both', 'their'],
help="the speaker id of the 'labels' field,",
)
agent.add_argument(
'--include-time-gap',
type=bool,
default=False,
help="whether to include time passed since last conversation in the context",
)
agent.add_argument(
'--history-time-gaps-token',
type=str,
default=None,
help="time tokens in the previous raw dialogue history, e.g. 'time:' ",
)
agent.add_argument(
'--history-person-tokens',
type=str,
default=None,
help="person tokens in the previous raw dialogue history, e.g. 'p1:,p2:' ",
)
agent.add_argument(
'--previous-session-delimiter',
type=str,
default=None,
help="delimiter between previous sessions in the context, such as '__NEXT_SESSION__' ",
)
return parser
def __init__(self, opt, shared=None):
assert opt['session_id'] <= 5, f"No data beyong session {opt['session_id']}!"
assert (
opt['session_id'] <= 4 or 'train' not in opt['datatype']
), f"No train data beyong session {opt['session_id']}!"
assert (
not opt['previous_persona_type'].startswith('predsum')
or opt['session_id'] <= 4
or (
opt['session_id'] == 5
and ('valid' in opt['datatype'] or 'test' in opt['datatype'])
)
), f"No predicted summary for session {opt['session_id']}"
self.previous_persona_type = opt['previous_persona_type']
self.session_openning = opt.get('session_openning', False)
if self.session_openning:
opt['label_speaker_id'] = 'their'
# NOTE: session_id = 1: personachat
self.session_id = opt['session_id']
self.label_speaker_id = opt["label_speaker_id"]
self.your_persona_first = opt['your_persona_first']
self.include_last_time_gap = opt['include_time_gap']
self.history_time_gaps_token = opt['history_time_gaps_token']
if self.history_time_gaps_token:
self.include_last_time_gap = False
self.history_person_tokens = opt['history_person_tokens']
self.use_predicted_summary = self.previous_persona_type.startswith('predsum')
self.previous_session_delimiter = opt.get('previous_session_delimiter', None)
if self.history_person_tokens is not None:
self.history_person_tokens = self.history_person_tokens.split(",")
self.msc_dpath = build(opt)
opt['datafile'] = get_sessionbase_dir_path(opt, self.msc_dpath, 'msc_dialogue')
self.id = f'msc_dialogue_{self.session_id}'
super().__init__(opt, shared)
def normalize_replies(self, x):
xs = [xt.strip() for xt in x.split('\n')]
xs2 = []
for x in xs:
if 'your persona:' in x:
# Normalize the sentence appearing after 'your persona:'
x = x[len('your persona: ') :]
x = normalize_reply(x)
x = 'your persona: ' + x
elif "partner's persona: " in x:
x = x[len("partner's persona: ") :]
x = normalize_reply(x)
x = "partner's persona: " + x
elif x != DUMMY_TEXT:
x = normalize_reply(x)
xs2.append(x)
return "\n".join(xs2)
def setup_data(self, datafile):
print('loading: ' + datafile)
if self.datatype.startswith('train'):
path_to_open = os.path.join(datafile, 'train.txt')
elif self.datatype.startswith('valid'):
path_to_open = os.path.join(datafile, 'valid.txt')
else:
path_to_open = os.path.join(datafile, 'test.txt')
with PathManager.open(path_to_open) as f:
raw_data = [json.loads(line.strip()) for line in f]
data = []
label_speaker_id_range = {}
predicted_summary_dict = {}
if self.use_predicted_summary:
is_session_level = not ('utt_' in self.previous_persona_type)
predsum_path = get_predicted_summary_path(self.msc_dpath, is_session_level)
logger.warning(f"use the predicted summary from {predsum_path}")
with PathManager.open(predsum_path) as jsonfile:
predicted_summary_dict = json.load(jsonfile)
def _get_time_gap(time_num, time_unit, time_token=""):
time_gap = str(time_num) + ' ' + time_unit
return f'{time_token} {time_gap}' if len(time_token) > 0 else time_gap
def _compile_persona_dialog_input(
dialog, personas, previous_dialogs, label_speaker_id
):
new_dialog = copy.deepcopy(dialog)
new_previous_dialogs = copy.deepcopy(previous_dialogs)
your_persona = ""
partner_persona = ""
if label_speaker_id == 'self':
your_persona = '\n'.join([f'your persona: {x}' for x in personas[1]])
partner_persona = '\n'.join(
[f"partner's persona: {x}" for x in personas[0]]
)
elif label_speaker_id == 'their':
your_persona = '\n'.join([f'your persona: {x}' for x in personas[0]])
partner_persona = '\n'.join(
[f"partner's persona: {x}" for x in personas[1]]
)
for prev_dialog in new_previous_dialogs:
prev_dialog['dialog'].insert(0, {"text": DUMMY_TEXT})
if len(prev_dialog['dialog']) % 2 == 1 and (
self.history_person_tokens is None
):
prev_dialog['dialog'].append({"text": DUMMY_TEXT})
new_dialog.insert(0, {"text": DUMMY_TEXT})
return your_persona, partner_persona, new_dialog, new_previous_dialogs
for dialog_dict in raw_data:
initial_data_id = dialog_dict['metadata']['initial_data_id']
if self.label_speaker_id == 'both':
label_speaker_id_range = ['their', 'self']
else:
label_speaker_id_range = [self.label_speaker_id]
for label_speaker_id in label_speaker_id_range:
if self.use_predicted_summary:
personas_to_complie = predicted_summary_dict[
str(self.session_id - 1)
][initial_data_id]
elif self.previous_persona_type.startswith('init'):
personas_to_complie = dialog_dict['init_personas']
else:
personas_to_complie = dialog_dict['personas']
(
your_persona,
partner_persona,
new_dialog,
new_previous_dialogs,
) = _compile_persona_dialog_input(
dialog_dict['dialog'],
personas_to_complie,
dialog_dict['previous_dialogs'],
label_speaker_id,
)
previous_sessions_msgs = []
if self.previous_persona_type == 'raw_history':
for d_id in range(len(new_previous_dialogs)):
previous_dialog_msg = [
x['text'] for x in new_previous_dialogs[d_id]['dialog']
]
if self.history_person_tokens:
previous_dialog_msg = [
self.history_person_tokens[i % 2] + ' ' + text
for i, text in enumerate(previous_dialog_msg)
if text != DUMMY_TEXT
]
if self.history_time_gaps_token:
time_gap_i = _get_time_gap(
new_previous_dialogs[d_id]['time_num'],
new_previous_dialogs[d_id]['time_unit'],
time_token=self.history_time_gaps_token,
)
previous_sessions_msgs.append(
'\n'.join(previous_dialog_msg + [time_gap_i])
)
else:
previous_sessions_msgs.append(
'\n'.join(previous_dialog_msg)
)
if self.previous_session_delimiter is not None:
previous_sessions_msgs = [
val
for pair in zip(
previous_sessions_msgs,
[self.previous_session_delimiter]
* len(previous_sessions_msgs),
)
for val in pair
]
previous_sessions_msgs = '\n'.join(previous_sessions_msgs)
episode = []
for i in range(0, len(new_dialog) - 1, 2):
text = new_dialog[i]['text']
partner_persona_one_line = partner_persona.replace('\n', '').split(
"partner's persona: "
)
your_persona_one_line = your_persona.replace('\n', '').split(
"your persona: "
)
action = {
'id': self.id,
'text': self.normalize_replies(text),
'labels': [self.normalize_replies(new_dialog[i + 1]['text'])],
'session_id': self.session_id,
'initial_data_id': initial_data_id,
'personas': f'{partner_persona}\n{your_persona}',
'personas_one_line': f"partner's persona: {' '.join(partner_persona_one_line)}\nyour persona: {' '.join(your_persona_one_line)}",
}
if i == 0:
action.update(
{
'time_num': dialog_dict['previous_dialogs'][-1][
'time_num'
],
'time_unit': dialog_dict['previous_dialogs'][-1][
'time_unit'
],
}
)
episode.append(action)
if self.session_openning:
break
persona_context_str = ""
if 'self' in self.previous_persona_type:
persona_context_str = your_persona
elif 'their' in self.previous_persona_type:
persona_context_str = partner_persona
elif 'both' in self.previous_persona_type:
if self.your_persona_first:
persona_context_str = (
(your_persona + '\n') if len(your_persona) > 0 else ""
) + partner_persona
else:
persona_context_str = (
(partner_persona + '\n') if len(partner_persona) > 0 else ""
) + your_persona
elif self.previous_persona_type == 'raw_history':
persona_context_str = previous_sessions_msgs
if self.include_last_time_gap:
time_gap = _get_time_gap(
dialog_dict['previous_dialogs'][-1]['time_num'],
dialog_dict['previous_dialogs'][-1]['time_unit'],
)
persona_context_str = (
(persona_context_str + '\n')
if len(persona_context_str) > 0
else ""
) + f'[{time_gap}]'
if persona_context_str and len(persona_context_str) > 0:
episode[0]['text'] = persona_context_str + '\n' + episode[0]['text']
data.append(episode)
for episode in data:
start_idx = 0
for i, turn in enumerate(episode):
yield Message(turn), i == start_idx
class PersonaSummaryTeacher(MultiTaskTeacher):
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
super().add_cmdline_args(parser, partial_opt)
parser = parser.add_argument_group('MSC Summary Teacher Args')
parser.add_argument(
'--include-last-session',
type=bool,
default=False,
help="whether to include session 4 for valid and test splits",
)
SessionBasePersonaSummaryTeacher.add_cmdline_args(parser, partial_opt)
return parser
def __init__(self, opt, shared=None):
msc_tasks = [
'msc:SessionBasePersonaSummary:session_id=1',
'msc:SessionBasePersonaSummary:session_id=2',
'msc:SessionBasePersonaSummary:session_id=3',
]
if opt.get('include_last_session', False) and 'train' not in opt['datatype']:
msc_tasks += ['msc:SessionBasePersonaSummary:session_id=4']
opt = copy.deepcopy(opt)
opt['task'] = ','.join(msc_tasks)
super().__init__(opt, shared)
class Session1NormalizedTrait(NormalizedTeacherTrait):
"""
Trait for flatten persona into one line.
"""
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
super().add_cmdline_args(parser, partial_opt)
agent = parser.add_argument_group('Session Level NormalizedTeacher arguments')
agent.add_argument(
'--is-convai2-session-level',
type=bool,
default=False,
help="whether to flatten the persona lines into a single persona line per speaker",
)
return agent
def __init__(self, opt, shared=None):
self.is_convai2_session_level = opt.get('is_convai2_session_level', False)
super().__init__(opt, shared)
def normalize_replies(self, x):
xs = x.split('\n')
your_personas = []
partner_personas = []
non_personas = []
for x in xs:
if x.startswith('your persona: '):
# Normalize the sentence appearing after 'your persona:'
x = x[len('your persona: ') :]
x = normalize_reply(x)
your_personas.append(x)
elif x.startswith("partner's persona: "):
x = x[len("partner's persona: ") :]
x = normalize_reply(x)
partner_personas.append(x)
else:
x = normalize_reply(x)
non_personas.append(x)
xs2 = []
if not self.is_convai2_session_level:
your_personas = ['your persona: ' + yx for yx in your_personas]
partner_personas = ["partner's persona: " + px for px in partner_personas]
else:
if your_personas:
your_personas = ['your persona: ' + " ".join(your_personas)]
if partner_personas:
partner_personas = ["partner's persona: " + " ".join(partner_personas)]
if self.your_persona_first:
xs2.extend(your_personas)
xs2.extend(partner_personas)
else:
xs2.extend(partner_personas)
xs2.extend(your_personas)
xs2.extend(non_personas)
return '\n'.join(xs2)
class Session1SelfTeacher(Session1NormalizedTrait, SelfOriginalTeacher):
"""
Convai2 as Session 1.
"""
pass
class MscTeacher(MultiTaskTeacher):
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
super().add_cmdline_args(parser, partial_opt)
parser = parser.add_argument_group('Multi Session Chat (MSC) Teacher Args')
parser.add_argument(
'--include-session1',
type=bool,
default=True,
help="whether to include session 1 (convai2:normalized)",
)
parser.add_argument(
'--include-last-session',
type=bool,
default=False,
help="whether to include session 5",
)
SessionBaseMscTeacher.add_cmdline_args(parser, partial_opt)
Session1SelfTeacher.add_cmdline_args(parser, partial_opt)
return parser
def __init__(self, opt, shared=None):
msc_tasks = [
'msc:SessionBaseMsc:session_id=2',
'msc:SessionBaseMsc:session_id=3',
'msc:SessionBaseMsc:session_id=4',
]
if opt.get('include_session1', False) and not opt['session_openning']:
if opt['previous_persona_type'] in [
'predsum_self',
'predsum_both',
'predsum_their',
]:
msc_tasks = [
'msc:Session1Self:is_convai2_session_level=True'
] + msc_tasks
else:
msc_tasks = [
'msc:Session1Self:is_convai2_session_level=False'
] + msc_tasks
if opt.get('include_last_session', False) and 'train' not in opt['datatype']:
msc_tasks += ['msc:SessionBaseMsc:session_id=5']
opt = copy.deepcopy(opt)
opt['task'] = ','.join(msc_tasks)
super().__init__(opt, shared)
class DefaultTeacher(MscTeacher):
pass
class ContextGenerator(BaseContextGenerator):
"""
Generates contexts shown to bots for generating prompt when collecting human-human
followup chat in the personal knowledge human evaluation.
This generator was used to generate the context information shown to bots at the
beginning of a conversation, when crowdsourcing the conversations that for per-turn
human evaluation.
"""
def __init__(self, override_opt, datatype='valid', seed: Optional[int] = None):
"""
Initalize the context generator.
override_opt: only a 'datapath' key is required, to specify the ParlAI data folder
"""
def setup_opt(opt):
parser = dsd.setup_args()
parser.set_params(**opt)
return parser.parse_args([])
if seed is not None:
self.rng = random.Random(seed)
else:
self.rng = random.Random()
with open(override_opt['completed_run_stats']) as f:
override_opt.update(json.load(f))
bot_model_name = override_opt['bot_model_name']
bot_msc_opt = copy.deepcopy(COMMON_CONFIG)
bot_msc_opt.update(MODEL_OPT[bot_model_name])
ui_msc_opt = copy.deepcopy(COMMON_CONFIG)
ui_msc_opt.update(UI_OPT[bot_model_name])
self.ui_msc_teacher = SessionBaseMscTeacher(setup_opt(ui_msc_opt))
self.bot_msc_teacher = SessionBaseMscTeacher(setup_opt(bot_msc_opt))
self.bot_sorted_initial_data_indices_to_episode = {}
self.ui_sorted_initial_data_indices_to_episode = {}
self.initial_data_indices_to_complete = override_opt.get(
'initial_data_indices_to_complete', INITIAL_DATA_TO_COMPLETE
)
self._set_teacher_data_map()
self.context_done_statistics = copy.deepcopy(
override_opt.get('context_done_statistics', {})
)
def get_context(self, model_name: str = None) -> dict:
"""
Get context information to be shown at the beginning of one conversation.
Values in return dict:
- context_dataset: the dataset ('msc') used to generate the context information.
- your_persona_strings: persona strings for the "self" side
- their_persona_strings: persona strings for the "partner" side
- context_for_bot_prompt: text of dialogue context shown to the bot to generate the session opennings
- observation_for_bot: observation containing dialogue context shown to the bot to generate the session opennings
- time_num: number of hours/days that have transpired since last chat session
- time_unit: unit(hours/days) of the time that have transpired since last chat session
"""
# Determine which dataset we will show context for
if model_name not in self.context_done_statistics:
self.context_done_statistics[model_name] = []
initial_data_indices_list = [
x
for x in self.initial_data_indices_to_complete
if x not in self.context_done_statistics[model_name]
]
if len(initial_data_indices_list) == 0:
return None
# Select episode
initial_data_index = self.rng.sample(initial_data_indices_list, 1)[0]
# Mark context seletected
self.context_done_statistics[model_name].append(initial_data_index)
# Extract personas
return self._extract_personas(initial_data_index)
def _set_teacher_data_map(self):
self.ui_sorted_initial_data_indices_to_episode = {
episode[0]['initial_data_id']: episode
for episode in self.ui_msc_teacher.data.data
}
self.bot_sorted_initial_data_indices_to_episode = {
episode[0]['initial_data_id']: episode
for episode in self.bot_msc_teacher.data.data
}
def _extract_personas(self, initial_data_index: str) -> dict:
"""
For the given ConvAI2 conversation, return strings of both speakers' personas.
"""
ui_first_entry = self.ui_sorted_initial_data_indices_to_episode[
initial_data_index
][0]
bot_first_entry = self.bot_sorted_initial_data_indices_to_episode[
initial_data_index
][0]
ui_context = ui_first_entry['text'].split('\n')
your_persona_strings = []
their_persona_strings = []
for str_ in ui_context[:-1]: # The last string is the first utterance
if str_.startswith('your persona: '): # Here, "you" are Person 2
your_persona_strings.append(str_[len('your persona: ') :])
elif str_.startswith("partner's persona: "):
their_persona_strings.append(str_[len("partner's persona: ") :])
return {
'context_dataset': bot_first_entry['id'],
'your_persona_strings': your_persona_strings,
'their_persona_strings': their_persona_strings,
'context_for_bot_prompt': bot_first_entry['text'],
'observation_for_bot': bot_first_entry,
'time_num': bot_first_entry['time_num'],
'time_unit': bot_first_entry['time_unit'],
}
|
bae5390abae4a1d52fed5d353f7e59d869c7edae
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/22_专题/滑动窗口替换k次最长连续 1 模型/1156. 单字符重复子串的最大长度-变形题.py
|
e9dbdd94e68d59d0b78797385e6a52878d5c8c02
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579
| 2023-09-01T12:21:58
| 2023-09-01T12:21:58
| 385,861,235
| 225
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,263
|
py
|
1156. 单字符重复子串的最大长度-变形题.py
|
from collections import Counter
from itertools import groupby
# 1 <= text.length <= 20000
# 给你一个字符串 text,你只能交换其中两个字符一次或者什么都不做,然后得到一些单字符重复的子串。返回其中最长的子串的长度。
# [k for k, g in groupby('AAAABBBCCDAABBB')] --> A B C D A B
# [list(g) for k, g in groupby('AAAABBBCCD')] --> AAAA BBB CC D
# 考虑两种情况
# 1. ..aaaabaaaa.. 被一个b分隔了,把b换成a `remove 1 divider`
# 2. ..aaaa...aa.. => 从后面拿一个a过来 `extend 1`
class Solution:
def maxRepOpt1(self, text: str) -> int:
counter = Counter(text)
groups = [[char, len(list(group))] for char, group in groupby(text)]
print(groups)
# 1. extend 1 情形
res = max(min(count + 1, counter[char]) for char, count in groups)
# 2. remove 1 divider 情形
for i in range(1, len(groups) - 1):
if groups[i - 1][0] == groups[i + 1][0] and groups[i][1] == 1:
sameChar = groups[i - 1][0]
res = max(res, min(groups[i - 1][1] + groups[i + 1][1] + 1, counter[sameChar]))
return res
print(Solution().maxRepOpt1(text="aaabaaa"))
# 输出:6
|
e39b39ab5a3f01153d068490ec222f0c05121561
|
abbc2d332bdfa036ac12438983e6d74cf4107e64
|
/SiamDW/SiamDW-RPN/siamrpn/config.py
|
fd4357c3f3a205c0fefa4fd9730f6702563019ae
|
[] |
permissive
|
HonglinChu/SiamTrackers
|
c494cff7543a433e8ec7dbf6d9439b1e7395b0c0
|
805208b5348346d35e64abcbe901a3829743e157
|
refs/heads/master
| 2023-08-29T06:50:59.532271
| 2023-03-06T09:13:53
| 2023-03-06T09:13:53
| 253,718,080
| 1,166
| 243
|
Apache-2.0
| 2023-08-03T16:39:53
| 2020-04-07T07:24:00
|
Python
|
UTF-8
|
Python
| false
| false
| 3,917
|
py
|
config.py
|
import numpy as np
class Config:
# dataset related
exemplar_size = 127 # exemplar size
instance_size = 271 # 默认SiamDW # instance size
context_amount = 0.5 # context amount
sample_type = 'uniform'
#--------
train_epoch_size = 1000 # ?
val_epoch_size = 100 #?
out_feature = 19 #??
max_inter = 100#??最大间隔
eps = 0.01 #???
#--------
# training related
exem_stretch = False
ohem_pos = False #原始都是False 训练的时候没有对anchors非极大值抑制,具体参考loss.py函数rpn_cross_entropy_balance()
ohem_neg = False #原始都是False 训练的时候没有对anchors非极大值抑制,具体参考loss.py函数rpn_cross_entropy_balance()
ohem_reg = False #原始都是False 训练的时候没有对anchors非极大值抑制,具体参考loss.py函数rpn_smoothL1()
fix_former_3_layers = True
scale_range = (0.001, 0.7)
ratio_range = (0.1, 10)
pairs_per_video_per_epoch = 2 # pairs per video
train_ratio = 0.99 # training ratio of VID dataset
frame_range_vid = 100 # frame range of choosing the instance
frame_range_ytb = 1
train_batch_size = 16 # training batch size 64
valid_batch_size = 16 # validation batch size 64
train_num_workers = 8 # number of workers of train dataloader
valid_num_workers = 8 # number of workers of validation dataloader
clip = 10 # grad clip
start_lr = 1e-2
end_lr = 1e-5
#--------------
warm_lr = 1e-3 #??
warm_scale = warm_lr/start_lr #??
#---------------
epoch = 50
lr = np.logspace(np.log10(start_lr), np.log10(end_lr), num=epoch)[0]
gamma = np.logspace(np.log10(start_lr), np.log10(end_lr), num=epoch)[1] / \
np.logspace(np.log10(start_lr), np.log10(end_lr), num=epoch)[0]
# decay rate of LR_Schedular
step_size = 1 # step size of LR_Schedular
momentum = 0.9 # momentum of SGD
weight_decay = 0.0005 # weight decay of optimizator
seed = 6666 # seed to sample training videos
log_dir = './models/logs' # log dirs
max_translate = 12 # max translation of random shift
scale_resize = 0.15 # scale step of instance image
total_stride = 8 # total stride of backbone
valid_scope = int((instance_size - exemplar_size) / total_stride + 1)#anchor的范围
#--------------------------------------------------
anchor_valid_scope = 2 * valid_scope + 1 #
#--------------------------------------------------
anchor_scales = np.array([8, ])
anchor_ratios = np.array([0.33, 0.5, 1, 2, 3])
anchor_num = len(anchor_scales) * len(anchor_ratios)
anchor_base_size = 8
pos_threshold = 0.6
neg_threshold = 0.3
num_pos = 16
num_neg = 48
lamb = 1 # cls:res =1:1
save_interval = 1
show_interval = 100
show_topK = 3
pretrained_model = './models/CIResNet22_PRETRAIN.model'
# tracking related
gray_ratio = 0.25
blur_ratio = 0.15
score_size = int((instance_size - exemplar_size) / total_stride + 1)
penalty_k = 0.22
window_influence = 0.40
lr_box = 0.30
min_scale = 0.1
max_scale = 10
def update(self, cfg):
for k, v in cfg.items():
setattr(self, k, v)
self.score_size = (self.instance_size - self.exemplar_size) //self.total_stride + 1 #
#self.valid_scope = int((self.instance_size - self.exemplar_size) / self.total_stride / 2)#anchor的范围
self.valid_scope= self.score_size
config = Config()
|
115cb7eec09a694ccf960e613886e6d3f04ac2af
|
73435e02a95865e74f03015ec0237f9b66a8318d
|
/aiogoogle/models.py
|
3b2f6efd65af2fc06bae9a6d501208ef85bb5692
|
[
"MIT"
] |
permissive
|
omarryhan/aiogoogle
|
fc33177c0e5a03a1a3a3a7093021689cbc9074fd
|
3728c77d246e58111d36aacc4f4733cad79b0b5d
|
refs/heads/master
| 2023-08-16T20:55:34.577527
| 2023-08-05T16:05:10
| 2023-08-05T16:05:10
| 157,001,739
| 168
| 53
|
MIT
| 2023-09-02T10:34:52
| 2018-11-10T16:55:49
|
Python
|
UTF-8
|
Python
| false
| false
| 15,440
|
py
|
models.py
|
from urllib.parse import urlparse, urlunparse, urlencode, parse_qs
from typing import AsyncIterable
import pprint
from .excs import HTTPError, AuthError, ValidationError
DEFAULT_DOWNLOAD_CHUNK_SIZE = 1024 * 1024
DEFAULT_UPLOAD_CHUNK_SIZE = 1024 * 1024
class ResumableUpload:
"""
Resumable Upload Object. Works in conjuction with media upload
Arguments:
file_path (str): Full path of the file to be uploaded
upload_path (str): The URI path to be used for upload. Should be used in conjunction with the rootURL property at the API-level.
multipart (bool): True if this endpoint supports upload multipart media.
chunk_size (int): Size of a chunk of bytes that a session should read at a time when uploading in multipart.
"""
def __init__(self, multipart=None, chunk_size=None, upload_path=None):
self.upload_path = upload_path
self.multipart = multipart
self.chunk_size = chunk_size or DEFAULT_UPLOAD_CHUNK_SIZE
class MediaUpload:
"""
Media Upload
Arguments:
file_path_or_bytes (str, bytes): Full path or content of the file to be uploaded
upload_path (str): The URI path to be used for upload. Should be used in conjunction with the rootURL property at the API-level.
mime_range (list): list of MIME Media Ranges for acceptable media uploads to this method.
max_size (int): Maximum size of a media upload in bytes
multipart (bool): True if this endpoint supports upload multipart media.
chunksize (int): Size of a chunk of bytes that a session should read at a time when uploading in multipart.
resumable (aiogoogle.models.ResumableUplaod): A ResumableUpload object
validate (bool): Whether or not a session should validate the upload size before sending
pipe_from (file object, AsyncIterable): class object to stream file content from
"""
def __init__(
self,
file_path_or_bytes,
upload_path=None,
mime_range=None,
max_size=None,
multipart=False,
chunk_size=None,
resumable=None,
validate=True,
pipe_from=None
):
if isinstance(file_path_or_bytes, bytes):
self.file_body = file_path_or_bytes
self.file_path = None
else:
self.file_body = None
self.file_path = file_path_or_bytes
self.upload_path = upload_path
self.mime_range = mime_range
self.max_size = max_size
self.multipart = multipart
self.chunk_size = chunk_size or DEFAULT_UPLOAD_CHUNK_SIZE
self.resumable = resumable
self.validate = validate
self.pipe_from = pipe_from
async def run_validation(self, size_func):
if self.validate and self.max_size:
size = await size_func(self.file_path) if self.file_path else len(self.file_body)
if size > self.max_size:
raise ValidationError(
f'"{self}" has a size of {size / 1000}KB. '
f'Max upload size for this endpoint is: '
f'{self.max_size / 1000}KB.'
)
async def aiter_file(self, aiter_func):
if self.file_path:
async for chunk in aiter_func(self.file_path, self.chunk_size):
yield chunk
elif self.pipe_from:
if isinstance(self.pipe_from, AsyncIterable):
async for chunk in self.pipe_from:
yield chunk
else:
yield self.pipe_from.read()
else:
async for chunk in self._aiter_body():
yield chunk
async def _aiter_body(self):
for x in range(0, len(self.file_body), self.chunk_size):
yield self.file_body[x:x + self.chunk_size]
async def read_file(self, read_func):
return self.file_body or await read_func(self.file_path)
def __str__(self):
return self.file_path or "File object"
class MediaDownload:
"""
Media Download
Arguments:
file_path (str): Full path of the file to be downloaded
chunksize (int): Size of a chunk of bytes that a session should write at a time when downloading.
pipe_to (object): class object to stream file content to
"""
def __init__(self, file_path=None, chunk_size=None, pipe_to=None):
self.file_path = file_path
self.pipe_to = pipe_to
self.chunk_size = chunk_size or DEFAULT_DOWNLOAD_CHUNK_SIZE
class Request:
"""
Request class for the whole library. Auth Managers, GoogleAPI and Sessions should all use this.
.. note::
For HTTP body, only pass one of the following params:
- json: json as a dict
- data: www-url-form-encoded form as a dict/ bytes/ text/
Parameters:
method (str): HTTP method as a string (upper case) e.g. 'GET'
url (str): full url as a string. e.g. 'https://example.com/api/v1/resource?filter=filter#something
batch_url (str): full url of for sending this request in a batch
json (dict): json as a dict
data (any): www-url-form-encoded form as a dict/ bytes/ text/
headers (dict): headers as a dict
media_download (aiogoogle.models.MediaDownload): MediaDownload object
media_upload (aiogoogle.models.MediaUpload): MediaUpload object
timeout (int): Individual timeout for this request
callback (callable): Synchronous callback that takes the content of the response as the only argument. Should also return content.
_verify_ssl (boolean): Defaults to True.
upload_file_content_type (str): Optional content-type header string. In case you don't want to use the default application/octet-stream (Or whatever is auto-detected by your transport handler)
"""
def __init__(
self,
method=None,
url=None,
batch_url=None,
headers=None,
json=None,
data=None,
media_upload=None,
media_download=None,
timeout=None,
callback=None,
_verify_ssl=True,
upload_file_content_type=None,
):
self.method = method
self.url = url
self.batch_url = batch_url
self.headers = {} if headers is None else headers
self.data = data
self.json = json
self.media_upload = media_upload
self.media_download = media_download
self.timeout = timeout
self.callback = callback
self._verify_ssl = _verify_ssl
self.upload_file_content_type = upload_file_content_type
def _add_query_param(self, query: dict):
url = self.url
if "?" not in url:
if url.endswith("/"):
url = url[:-1]
url += "?"
else:
url += "&"
query = urlencode(query)
url += query
self.url = url
def _rm_query_param(self, name: str):
u = urlparse(self.url)
query = parse_qs(u.query)
query.pop(name, None)
u = u._replace(query=urlencode(query, True))
self.url = urlunparse(u)
@classmethod
def batch_requests(cls, *requests):
"""
Given many requests, will create a batch request per https://developers.google.com/discovery/v1/batch
Arguments:
*requests (aiogoogle.models.Request): Request objects
Returns:
aiogoogle.models.Request:
"""
raise NotImplementedError
@classmethod
def from_response(cls, response):
return Request(
url=response.url,
headers=response.headers,
json=response.json,
data=response.data,
)
class Response:
"""
Respnse Object
Arguments:
status_code (int): HTTP Status code
headers (dict): HTTP response headers
url (str): Request URL
json (dict): Json Response if any
data (any): data
reason (str): reason for http error if any
req (aiogoogle.models.Request): request that caused this response
download_file (str): path of the download file specified in the request
pipe_to (object): class object to stream file content to specified in the request.
upload_file (str): path of the upload file specified in the request
pipe_from (file object): class object to stream file content from
session_factory (aiogoogle.sessions.abc.AbstractSession): A callable implementation of aiogoogle's session interface
auth_manager (aiogoogle.auth.managers.ServiceAccountManager): Service account authorization manager.
user_creds (aiogoogle.auth.creds.UserCreds): user_creds to make an api call with.
"""
def __init__(
self,
status_code=None,
headers=None,
url=None,
json=None,
data=None,
reason=None,
req=None,
download_file=None,
pipe_to=None,
upload_file=None,
pipe_from=None,
session_factory=None,
auth_manager=None,
user_creds=None
):
if json and data:
raise TypeError("Pass either json or data, not both.")
self.status_code = status_code
self.headers = headers
self.url = url
self.json = json
self.data = data
self.reason = reason
self.req = req
self.download_file = download_file
self.pipe_to = pipe_to
self.upload_file = upload_file
self.pipe_from = pipe_from
self.session_factory = session_factory
self.auth_manager = auth_manager
# Used for refreshing tokens for the Oauth2 authentication workflow.
self.user_creds = user_creds
@staticmethod
async def _next_page_generator(
prev_res,
session_factory,
req_token_name=None,
res_token_name=None,
json_req=False,
):
from .auth.managers import ServiceAccountManager, Oauth2Manager
prev_url = None
while prev_res is not None:
# Avoid infinite looping if google sent the same token twice
if prev_url == prev_res.req.url:
break
prev_url = prev_res.req.url
# yield
yield prev_res.content
# get request for next page
next_req = prev_res.next_page(
req_token_name=req_token_name,
res_token_name=res_token_name,
json_req=json_req,
)
if next_req is not None:
async with session_factory() as sess:
if isinstance(prev_res.auth_manager, (ServiceAccountManager, Oauth2Manager)):
authorize_params = [next_req]
if isinstance(prev_res.auth_manager, ServiceAccountManager):
is_refreshed = await prev_res.auth_manager.refresh()
else:
is_refreshed, user_creds = await prev_res.auth_manager.refresh(prev_res.user_creds)
authorize_params.append(user_creds)
if is_refreshed is True:
prev_res.auth_manager.authorize(*authorize_params)
prev_res = await sess.send(next_req, full_res=True, auth_manager=prev_res.auth_manager)
else:
prev_res = None
def __call__(
self,
session_factory=None,
req_token_name=None,
res_token_name=None,
json_req=False,
):
"""
Returns a generator that yields the contents of the next pages if any (and this page as well)
Arguments:
session_factory (aiogoogle.sessions.abc.AbstractSession): A session factory
req_token_name (str):
* name of the next_page token in the request
* Default: "pageToken"
res_token_name (str):
* name of the next_page token in json response
* Default: "nextPageToken"
json_req (dict): Normally, nextPageTokens should be sent in URL query params. If you want it in A json body, set this to True
Returns:
async generator: self._next_page_generator (staticmethod)
"""
if session_factory is None:
session_factory = self.session_factory
return self._next_page_generator(
self, session_factory, req_token_name, res_token_name, json_req
)
def __aiter__(self):
return self._next_page_generator(self, self.session_factory)
def __iter__(self):
raise TypeError(
'You probably forgot to use an "async for" statement instead of just a "for" statement.'
)
@property
def content(self):
"""
Equals either ``self.json`` or ``self.data``
"""
return self.json or self.data
def next_page(
self, req_token_name=None, res_token_name=None, json_req=False
) -> Request:
"""
Method that returns a request object that requests the next page of a resource
Arguments:
req_token_name (str):
* name of the next_page token in the request
* Default: "pageToken"
res_token_name (str):
* name of the next_page token in json response
* Default: "nextPageToken"
json_req (dict): Normally, nextPageTokens should be sent in URL query params. If you want it in A json body, set this to True
Returns:
A request object (aiogoogle.models.Request):
"""
if req_token_name is None:
req_token_name = "pageToken"
if res_token_name is None:
res_token_name = "nextPageToken"
res_token = self.json.get(res_token_name, None)
if res_token == "":
res_token = None
if res_token is None:
return None
# request = Request.from_response(self)
request = self.req
if json_req:
request.json[req_token_name] = res_token
else:
request._rm_query_param(req_token_name)
request._add_query_param({req_token_name: res_token})
return request
@property
def error_msg(self):
if self.json is not None and self.json.get("error") is not None:
return pprint.pformat(self.json["error"])
def raise_for_status(self):
if self.status_code >= 400:
if self.error_msg is not None:
self.reason = "\n\n" + self.reason + "\n\nContent:\n" + self.error_msg
self.reason = "\n\n" + self.reason + "\n\nRequest URL:\n" + self.req.url
if self.status_code == 401:
raise AuthError(msg=self.reason, req=self.req, res=self)
else:
raise HTTPError(msg=self.reason, req=self.req, res=self)
def __str__(self):
return str(self.content)
def __repr__(self):
return f"Aiogoogle response model. Status: {str(self.status_code)}"
|
389c153d8d857f68d3f5336ec40650c9f8de61e4
|
2b8f195b10e8e12db1252318922668cb432ea8ab
|
/mvlearn/utils/_testing.py
|
20bdd2dd6401d76c98f4892d6f28792cb9bba106
|
[
"MIT"
] |
permissive
|
mvlearn/mvlearn
|
70fba0fc52e1467101adadf46cf61e7076838c2f
|
003dccea563926fca5d957f5bbf39c1494acfe94
|
refs/heads/main
| 2023-04-18T15:47:53.716354
| 2022-04-05T22:17:18
| 2022-04-05T22:17:18
| 206,838,300
| 136
| 17
|
MIT
| 2023-03-08T17:37:59
| 2019-09-06T16:56:51
|
Python
|
UTF-8
|
Python
| false
| false
| 684
|
py
|
_testing.py
|
from functools import partial
def requires_module(function, name, call=None):
"""Skip a test if package is not available (decorator)."""
import pytest
call = ("import %s" % name) if call is None else call
reason = "Test %s skipped, requires %s." % (function.__name__, name)
try:
exec(call) in globals(), locals()
except Exception as exc:
if len(str(exc)) > 0 and str(exc) != "No module named %s" % name:
reason += " Got exception (%s)" % (exc,)
skip = True
else:
skip = False
return pytest.mark.skipif(skip, reason=reason)(function)
requires_multiviewica = partial(requires_module, name="multiviewica")
|
a663968b5cb001d46eff13243c1e6ad053dce639
|
3ca67d69abd4e74b7145b340cdda65532f90053b
|
/programmers/난이도별/level02.다리를_지나는_트럭/sangmandu.py
|
ed90c18d26c27ec2eabddd96ae8bddf26350a0c5
|
[] |
no_license
|
DKU-STUDY/Algorithm
|
19549516984b52a1c5cd73e1ed1e58f774d6d30e
|
6f78efdbefd8eedab24e43d74c7dae7f95c2893b
|
refs/heads/master
| 2023-02-18T06:48:39.309641
| 2023-02-09T07:16:14
| 2023-02-09T07:16:14
| 258,455,710
| 175
| 49
| null | 2023-02-09T07:16:16
| 2020-04-24T08:42:27
|
Python
|
UTF-8
|
Python
| false
| false
| 2,245
|
py
|
sangmandu.py
|
'''
https://programmers.co.kr/learn/courses/30/lessons/42583
다리를 지나는 트럭 : 다리의 길이와 트럭의 무게 다리의 하중을 고려하여 모든 트럭이 지나는 시간 구하기
다리를 지나는 시간은 이전 트럭이 출발한 시점부터 자신이 출발한 시점까지의 대기 시간을 모두 합 + 다리 길이 + 1 이다.
'''
from collections import deque
def solution(bridge_length, weight, truck_weights):
b, w, t = bridge_length, weight, truck_weights
time = deque([1])
wsum = deque([t[0]])
sec = 0
for v, i in enumerate(t[1:]):
if i <= weight - sum(wsum):
time.append(1)
wsum.append(i)
if sum(time) - time[0] == b:
sec += time.popleft()
wsum.popleft()
continue
if i <= weight - sum(wsum) + wsum[0]:
sec += time.popleft()
wsum.popleft()
time.append(b - sum(time))
wsum.append(i)
continue
while i > weight - sum(wsum):
sec += time.popleft()
wsum.popleft()
time.append(b - sum(time))
wsum.append(i)
return sum(time) + b
'''
100%의 모든 사람들이 1초에 대해서 반복문을 설정한데 비해
트럭의 순서에 대해서 반복문을 돌리려고 했음
=> 실제 1초에 대해서 구현한 대부분 코드는 1000.00ms가 넘는다.
=> 다음과 같이 0.1ms가 걸리지 않아 결과적으로 엄청나게 빠른 코드 생성
=> 실전에서 다음과 같이 풀었으면 스포트라이트를 받을 것 같다. 제한 시간 내에는 불가능 할 듯.
테스트 1 〉 통과 (0.01ms, 10.4MB)
테스트 2 〉 통과 (0.02ms, 10.3MB)
테스트 3 〉 통과 (0.01ms, 10.3MB)
테스트 4 〉 통과 (0.40ms, 10.3MB)
테스트 5 〉 통과 (0.90ms, 10.3MB)
테스트 6 〉 통과 (0.79ms, 10.3MB)
테스트 7 〉 통과 (0.01ms, 10.3MB)
테스트 8 〉 통과 (0.02ms, 10.3MB)
테스트 9 〉 통과 (0.48ms, 10.4MB)
테스트 10 〉 통과 (0.02ms, 10.2MB)
테스트 11 〉 통과 (0.02ms, 10.3MB)
테스트 12 〉 통과 (0.07ms, 10.2MB)
테스트 13 〉 통과 (0.07ms, 10.3MB)
테스트 14 〉 통과 (0.01ms, 10.3MB)
'''
|
5e53fe48d93d8d989350f0c230cdf903a0cda0e0
|
fbefc01f9f984eadf068ec25b7f3250949da6da0
|
/src/pyvesync/vesyncswitch.py
|
af06fc02f5e3354d9aed84a7c6a1cd4a2257ff31
|
[
"MIT"
] |
permissive
|
webdjoe/pyvesync
|
a779772ae068a6e820f001c7a4e90d9854f9ed56
|
433b51618539bbce854e972aef0258946b25eced
|
refs/heads/master
| 2023-08-23T02:32:13.049313
| 2023-08-09T01:26:37
| 2023-08-09T01:26:37
| 123,392,081
| 125
| 56
|
MIT
| 2023-09-02T03:15:44
| 2018-03-01T06:24:08
|
Python
|
UTF-8
|
Python
| false
| false
| 12,644
|
py
|
vesyncswitch.py
|
"""Classes for VeSync Switch Devices."""
import logging
import json
from abc import ABCMeta, abstractmethod
from typing import Dict, Union, Optional
from pyvesync.helpers import Helpers as helpers
from pyvesync.vesyncbasedevice import VeSyncBaseDevice
logger = logging.getLogger(__name__)
feature_dict: Dict[str, Dict[str, Union[list, str]]] = {
'ESWL01': {
'module': 'VeSyncWallSwitch',
'features': []
},
'ESWD16': {
'module': 'VeSyncDimmerSwitch',
'features': ['dimmable']
},
'ESWL03': {
'module': 'VeSyncWallSwitch',
'features': []
}
}
switch_modules: dict = {k: v['module']
for k, v in feature_dict.items()}
__all__: list = list(switch_modules.values()) + ['switch_modules']
class VeSyncSwitch(VeSyncBaseDevice):
"""Etekcity Switch Base Class."""
__metaclasss__ = ABCMeta
def __init__(self, details, manager):
"""Initialize Switch Base Class."""
super().__init__(details, manager)
self.features = feature_dict.get(self.device_type, {}).get('features')
if self.features is None:
logger.error('% device configuration not set', self.device_name)
raise KeyError(f'Device configuration not set {self.device_name}')
self.details = {}
def is_dimmable(self) -> bool:
"""Return True if switch is dimmable."""
return bool('dimmable' in self.features)
@abstractmethod
def get_details(self) -> None:
"""Get Device Details."""
@abstractmethod
def turn_on(self) -> bool:
"""Turn Switch On."""
@abstractmethod
def turn_off(self) -> bool:
"""Turn switch off."""
@abstractmethod
def get_config(self) -> None:
"""Get configuration and firmware deatils."""
@property
def active_time(self) -> int:
"""Get active time of switch."""
return self.details.get('active_time', 0)
def update(self) -> None:
"""Update device details."""
self.get_details()
class VeSyncWallSwitch(VeSyncSwitch):
"""Etekcity standard wall switch class."""
def __init__(self, details, manager):
"""Initialize standard etekcity wall switch class."""
super().__init__(details, manager)
def get_details(self) -> None:
"""Get switch device details."""
body = helpers.req_body(self.manager, 'devicedetail')
body['uuid'] = self.uuid
head = helpers.req_headers(self.manager)
r, _ = helpers.call_api(
'/inwallswitch/v1/device/devicedetail', 'post',
headers=head, json_object=body
)
if r is not None and helpers.code_check(r):
self.device_status = r.get('deviceStatus', self.device_status)
self.details['active_time'] = r.get('activeTime', 0)
self.connection_status = r.get(
'connectionStatus', self.connection_status)
else:
logger.debug('Error getting %s details', self.device_name)
def get_config(self) -> None:
"""Get switch device configuration info."""
body = helpers.req_body(self.manager, 'devicedetail')
body['method'] = 'configurations'
body['uuid'] = self.uuid
r, _ = helpers.call_api(
'/inwallswitch/v1/device/configurations',
'post',
headers=helpers.req_headers(self.manager),
json_object=body,
)
if helpers.code_check(r):
self.config = helpers.build_config_dict(r)
else:
logger.warning('Unable to get %s config info',
self.device_name)
def turn_off(self) -> bool:
"""Turn off switch device."""
body = helpers.req_body(self.manager, 'devicestatus')
body['status'] = 'off'
body['uuid'] = self.uuid
head = helpers.req_headers(self.manager)
r, _ = helpers.call_api(
'/inwallswitch/v1/device/devicestatus', 'put',
headers=head, json_object=body
)
if r is not None and helpers.code_check(r):
self.device_status = 'off'
return True
logger.warning('Error turning %s off', self.device_name)
return False
def turn_on(self) -> bool:
"""Turn on switch device."""
body = helpers.req_body(self.manager, 'devicestatus')
body['status'] = 'on'
body['uuid'] = self.uuid
head = helpers.req_headers(self.manager)
r, _ = helpers.call_api(
'/inwallswitch/v1/device/devicestatus', 'put',
headers=head, json_object=body
)
if r is not None and helpers.code_check(r):
self.device_status = 'on'
return True
logger.warning('Error turning %s on', self.device_name)
return False
class VeSyncDimmerSwitch(VeSyncSwitch):
"""Vesync Dimmer Switch Class with RGB Faceplate."""
def __init__(self, details, manager):
"""Initilize dimmer switch class."""
super().__init__(details, manager)
self._brightness = 0
self._rgb_value = {'red': 0, 'blue': 0, 'green': 0}
self._rgb_status = 'unknown'
self._indicator_light = 'unknown'
def get_details(self) -> None:
"""Get dimmer switch details."""
body = helpers.req_body(self.manager, 'devicedetail')
body['uuid'] = self.uuid
head = helpers.req_headers(self.manager)
r, _ = helpers.call_api(
'/dimmer/v1/device/devicedetail', 'post',
headers=head, json_object=body
)
if r is not None and helpers.code_check(r):
self.device_status = r.get('deviceStatus', self.device_status)
self.details['active_time'] = r.get('activeTime', 0)
self.connection_status = r.get(
'connectionStatus', self.connection_status)
self._brightness = r.get('brightness')
self._rgb_status = r.get('rgbStatus')
self._rgb_value = r.get('rgbValue')
self._indicator_light = r.get('indicatorlightStatus')
else:
logger.debug('Error getting %s details', self.device_name)
@property
def brightness(self) -> float:
"""Return brightness in percent."""
return self._brightness
@property
def indicator_light_status(self) -> str:
"""Faceplate brightness light status."""
return self._indicator_light
@property
def rgb_light_status(self) -> str:
"""RGB Faceplate light status."""
return self._rgb_status
@property
def rgb_light_value(self) -> dict:
"""RGB Light Values."""
return self._rgb_value
def switch_toggle(self, status: str) -> bool:
"""Toggle switch status."""
if status not in ['on', 'off']:
logger.debug('Invalid status passed to wall switch')
return False
body = helpers.req_body(self.manager, 'devicestatus')
body['status'] = status
body['uuid'] = self.uuid
head = helpers.req_headers(self.manager)
r, _ = helpers.call_api(
'/dimmer/v1/device/devicestatus', 'put',
headers=head, json_object=body
)
if r is not None and helpers.code_check(r):
self.device_status = status
return True
logger.warning('Error turning %s %s',
self.device_name, status)
return False
def turn_on(self) -> bool:
"""Turn switch on."""
return self.switch_toggle('on')
def turn_off(self) -> bool:
"""Turn switch off."""
return self.switch_toggle('off')
def indicator_light_toggle(self, status: str) -> bool:
"""Toggle indicator light."""
if status not in ['on', 'off']:
logger.debug('Invalid status for wall switch')
return False
body = helpers.req_body(self.manager, 'devicestatus')
body['status'] = status
body['uuid'] = self.uuid
head = helpers.req_headers(self.manager)
r, _ = helpers.call_api(
'/dimmer/v1/device/indicatorlightstatus',
'put', headers=head, json_object=body
)
if r is not None and helpers.code_check(r):
self.device_status = status
return True
logger.warning('Error turning %s indicator light %s',
self.device_name, status)
return False
def indicator_light_on(self) -> bool:
"""Turn Indicator light on."""
return self.indicator_light_toggle('on')
def indicator_light_off(self) -> bool:
"""Turn indicator light off."""
return self.indicator_light_toggle('off')
def rgb_color_status(self, status: str,
red: Optional[int] = None,
blue: Optional[int] = None,
green: Optional[int] = None) -> bool:
"""Set faceplate RGB color."""
body = helpers.req_body(self.manager, 'devicestatus')
body['status'] = status
body['uuid'] = self.uuid
head = helpers.req_headers(self.manager)
if red is not None and blue is not None and green is not None:
body['rgbValue'] = {'red': red, 'blue': blue, 'green': green}
r, _ = helpers.call_api(
'/dimmer/v1/device/devicergbstatus', 'put',
headers=head, json_object=body
)
if r is not None and helpers.code_check(r):
self._rgb_status = status
if body.get('rgbValue') is not None:
self._rgb_value = {'red': red, 'blue': blue, 'green': green}
return True
logger.warning('Error turning %s off', self.device_name)
return False
def rgb_color_off(self) -> bool:
"""Turn RGB Color Off."""
return self.rgb_color_status('off')
def rgb_color_on(self) -> bool:
"""Turn RGB Color Off."""
return self.rgb_color_status('on')
def rgb_color_set(self, red: int, green: int, blue: int) -> bool:
"""Set RGB color of faceplate."""
try:
red = int(red)
green = int(green)
blue = int(blue)
except ValueError:
return False
if isinstance(red, int) and isinstance(
green, int) and isinstance(blue, int):
for color in [red, green, blue]:
if color < 0 or color > 255:
logger.warning('Invalid RGB value')
return False
return bool(self.rgb_color_status('on', red, green, blue))
return False
def set_brightness(self, brightness: int) -> bool:
"""Set brightness of dimmer - 1 - 100."""
if isinstance(brightness, int) and (
brightness > 0 or brightness <= 100):
body = helpers.req_body(self.manager, 'devicestatus')
body['brightness'] = brightness
body['uuid'] = self.uuid
head = helpers.req_headers(self.manager)
r, _ = helpers.call_api(
'/dimmer/v1/device/updatebrightness', 'put',
headers=head, json_object=body
)
if r is not None and helpers.code_check(r):
self._brightness = brightness
return True
logger.warning('Error setting %s brightness', self.device_name)
else:
logger.warning('Invalid brightness')
return False
def displayJSON(self) -> str:
"""JSON API for dimmer switch."""
sup_val = json.loads(super().displayJSON())
if self.is_dimmable is True: # pylint: disable=using-constant-test
sup_val.update(
{
'Indicator Light': str(self.active_time),
'Brightness': str(self._brightness),
'RGB Light': str(self._rgb_status),
}
)
return json.dumps(sup_val, indent=4)
def get_config(self) -> None:
"""Get dimmable switch device configuration info."""
body = helpers.req_body(self.manager, 'devicedetail')
body['method'] = 'configurations'
body['uuid'] = self.uuid
r, _ = helpers.call_api(
'/dimmer/v1/device/configurations',
'post',
headers=helpers.req_headers(self.manager),
json_object=body,
)
if helpers.code_check(r):
self.config = helpers.build_config_dict(r)
else:
logger.warning('Unable to get %s config info', self.device_name)
|
8c701e9d9a741275d63b05b475d5359edf9d45ac
|
d125dc644ecb37c014c771c02549c1ebf35eca4d
|
/common/fuzzer_stats.py
|
3a0353fa68fefdd4932e8c1404e97d633c719698
|
[
"Apache-2.0"
] |
permissive
|
google/fuzzbench
|
316d28b2eff2015fe2f479668151b3259bfa2579
|
ff8ef0c6da62268521061a432c5b9e228c2f53dc
|
refs/heads/master
| 2023-09-04T11:04:20.324945
| 2023-08-29T17:37:10
| 2023-08-29T17:37:10
| 238,105,619
| 1,005
| 402
|
Apache-2.0
| 2023-09-13T15:07:49
| 2020-02-04T02:22:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,410
|
py
|
fuzzer_stats.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for dealing with self reported fuzzer stats."""
import json
SCHEMA = {'execs_per_sec': float}
def validate_fuzzer_stats(stats_json_str):
"""Validate that |stats_json_str| is a json representation of valid fuzzer
stats. Raises an exception if it is not, otherwise returns successfully."""
stats = json.loads(stats_json_str)
if not isinstance(stats, dict):
raise ValueError(f'{stats} is not a dict.')
for key, value in stats.items():
if key not in SCHEMA:
raise ValueError(f'Key {key} is not a valid stat key.')
expected_type = SCHEMA[key]
if isinstance(value, expected_type):
continue
raise ValueError(
f'Key "{key}" has value "{value}" which is type: "{type(value)}"' +
f'. Expected type: "{expected_type}".')
|
0dddcefe313c09d6722c1b810d3246df561f7b7d
|
05e634a232574f676434dfa8e4183f3d0a1a4bc9
|
/paddlecv/ppcv/utils/timer.py
|
f5be5b983c0f8a8ce886180d5dcbe887da89c7df
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/models
|
67ac00d93c5255ac64a9d80ae5be2e8927e47cee
|
8042c21b690ffc0162095e749a41b94dd38732da
|
refs/heads/release/2.4
| 2023-09-04T15:23:59.543625
| 2023-07-20T11:54:16
| 2023-07-20T11:54:16
| 88,868,842
| 7,633
| 3,597
|
Apache-2.0
| 2023-09-05T23:23:54
| 2017-04-20T13:30:15
|
Python
|
UTF-8
|
Python
| false
| false
| 2,980
|
py
|
timer.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import os
import ast
import glob
import yaml
import copy
import numpy as np
class Times(object):
def __init__(self):
self.time = 0.
# start time
self.st = 0.
# end time
self.et = 0.
def start(self):
self.st = time.time()
def end(self, repeats=1, accumulative=True):
self.et = time.time()
if accumulative:
self.time += (self.et - self.st) / repeats
else:
self.time = (self.et - self.st) / repeats
def reset(self):
self.time = 0.
self.st = 0.
self.et = 0.
def value(self):
return round(self.time, 4)
class PipeTimer(Times):
def __init__(self, cfg):
super(PipeTimer, self).__init__()
self.total_time = Times()
self.module_time = dict()
for op in cfg:
op_name = op.values()['name']
self.module_time.update({op_name: Times()})
self.img_num = 0
def get_total_time(self):
total_time = self.total_time.value()
average_latency = total_time / max(1, self.img_num)
qps = 0
if total_time > 0:
qps = 1 / average_latency
return total_time, average_latency, qps
def info(self):
total_time, average_latency, qps = self.get_total_time()
print("------------------ Inference Time Info ----------------------")
print("total_time(ms): {}, img_num: {}".format(total_time * 1000,
self.img_num))
for k, v in self.module_time.items():
v_time = round(v.value(), 4)
if v_time > 0:
print("{} time(ms): {}; per frame average time(ms): {}".format(
k, v_time * 1000, v_time * 1000 / self.img_num))
print("average latency time(ms): {:.2f}, QPS: {:2f}".format(
average_latency * 1000, qps))
return qps
def report(self, average=False):
dic = {}
for m, time in self.module_time:
dic[m] = round(time.value() / max(1, self.img_num),
4) if average else time.value()
dic['total'] = round(self.total_time.value() / max(1, self.img_num),
4) if average else self.total_time.value()
dic['img_num'] = self.img_num
return dic
|
afb1a74a34f158dc386e23b77493c9a397e77800
|
8ddd4c20622a6df48d808e03c084cbd2b1043182
|
/summertime/evaluation/rouge_metric.py
|
1c6817d9a481d00f1fde6dafc7336e360380ee38
|
[
"Apache-2.0"
] |
permissive
|
Yale-LILY/SummerTime
|
7f92824ef56d57af5292dba910f9c52913c5d29f
|
761676ddda5dce5cf776ab16ee38b6d995b631ac
|
refs/heads/main
| 2023-08-17T16:10:08.684350
| 2022-03-27T21:19:15
| 2022-03-27T21:19:15
| 348,897,374
| 232
| 29
|
Apache-2.0
| 2023-08-29T05:42:47
| 2021-03-18T00:59:17
|
Python
|
UTF-8
|
Python
| false
| false
| 718
|
py
|
rouge_metric.py
|
from summ_eval.rouge_metric import RougeMetric
from summertime.evaluation.summeval_metric import SummEvalMetric
from typing import List, Dict
class Rouge(SummEvalMetric):
metric_name = "rouge"
range = (0, 1)
higher_is_better = True
requires_heavy_compute = False
def __init__(self):
se_metric = RougeMetric()
super(Rouge, self).__init__(se_metric)
def evaluate(
self,
inputs: List[str],
targets: List[str],
keys: List[str] = ["rouge_1_f_score", "rouge_2_f_score", "rouge_l_f_score"],
) -> Dict[str, float]:
score_dict = self.se_metric.evaluate_batch(inputs, targets)
return {key: score_dict["rouge"][key] for key in keys}
|
6d8a673a13e61192149f720d3b0c2ea95d1bbee0
|
8da41ffa2ccb09e04f95db0f211e0ed69a42a352
|
/courses/machine_learning/deepdive/10_recommend/labs/endtoend/airflow/dags/training.py
|
0bf4eac2befa0ff64af4edf0e2644b2bd5ae48b6
|
[
"Apache-2.0"
] |
permissive
|
GoogleCloudPlatform/training-data-analyst
|
808af9b09a0e5f5657c4ca76cdd205f808d76d89
|
975a95032ce5b7012d1772c7f1f5cfe606eae839
|
refs/heads/master
| 2023-09-05T19:50:59.722334
| 2023-09-04T14:25:33
| 2023-09-04T14:25:33
| 56,459,948
| 7,311
| 5,917
|
Apache-2.0
| 2023-09-13T21:45:54
| 2016-04-17T21:39:27
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 6,029
|
py
|
training.py
|
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DAG definition for recserv model training."""
import airflow
from airflow import DAG
# Reference for all available airflow operators:
# https://github.com/apache/incubator-airflow/tree/master/airflow/contrib/operators
from airflow.contrib.operators.bigquery_operator import BigQueryOperator
from airflow.contrib.operators.bigquery_to_gcs import BigQueryToCloudStorageOperator
from airflow.hooks.base_hook import BaseHook
# from airflow.contrib.operators.mlengine_operator import MLEngineTrainingOperator
# above mlengine_operator currently doesnt support custom MasterType so we import our own plugins:
# custom plugins
from airflow.operators.app_engine_admin_plugin import AppEngineVersionOperator
from airflow.operators.ml_engine_plugin import MLEngineTrainingOperator
import datetime
def _get_project_id():
"""Get project ID from default GCP connection."""
extras = BaseHook.get_connection('google_cloud_default').extra_dejson
key = 'extra__google_cloud_platform__project'
if key in extras:
project_id = extras[key]
else:
raise ('Must configure project_id in google_cloud_default '
'connection from Airflow Console')
return project_id
PROJECT_ID = _get_project_id()
# Data set constants, used in BigQuery tasks. You can change these
# to conform to your data.
# TODO: Specify your BigQuery dataset name and table name
DATASET = ''
TABLE_NAME = ''
ARTICLE_CUSTOM_DIMENSION = '10'
# TODO: Confirm bucket name and region
# GCS bucket names and region, can also be changed.
BUCKET = 'gs://recserve_' + PROJECT_ID
REGION = 'us-east1'
# The code package name comes from the model code in the wals_ml_engine
# directory of the solution code base.
PACKAGE_URI = BUCKET + '/code/wals_ml_engine-0.1.tar.gz'
JOB_DIR = BUCKET + '/jobs'
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': airflow.utils.dates.days_ago(2),
'email': ['airflow@example.com'],
'email_on_failure': True,
'email_on_retry': False,
'retries': 5,
'retry_delay': datetime.timedelta(minutes=5)
}
# Default schedule interval using cronjob syntax - can be customized here
# or in the Airflow console.
# TODO: Specify a schedule interval in CRON syntax to run once a day at 2100 hours (9pm)
# Reference: https://airflow.apache.org/scheduler.html
schedule_interval = '' # example '00 XX 0 0 0'
# TODO: Title your DAG to be recommendations_training_v1
dag = DAG('',
default_args=default_args,
schedule_interval=schedule_interval)
dag.doc_md = __doc__
#
#
# Task Definition
#
#
# BigQuery training data query
bql='''
#legacySql
SELECT
fullVisitorId as clientId,
ArticleID as contentId,
(nextTime - hits.time) as timeOnPage,
FROM(
SELECT
fullVisitorId,
hits.time,
MAX(IF(hits.customDimensions.index={0},
hits.customDimensions.value,NULL)) WITHIN hits AS ArticleID,
LEAD(hits.time, 1) OVER (PARTITION BY fullVisitorId, visitNumber
ORDER BY hits.time ASC) as nextTime
FROM [{1}.{2}.{3}]
WHERE hits.type = "PAGE"
) HAVING timeOnPage is not null and contentId is not null;
'''
bql = bql.format(ARTICLE_CUSTOM_DIMENSION, PROJECT_ID, DATASET, TABLE_NAME)
# TODO: Complete the BigQueryOperator task to truncate the table if it already exists before writing
# Reference: https://airflow.apache.org/integration.html#bigqueryoperator
t1 = BigQuerySomething( # correct the operator name
task_id='bq_rec_training_data',
bql=bql,
destination_dataset_table='%s.recommendation_events' % DATASET,
write_disposition='WRITE_T_______', # specify to truncate on writes
dag=dag)
# BigQuery training data export to GCS
# TODO: Fill in the missing operator name for task #2 which
# takes a BigQuery dataset and table as input and exports it to GCS as a CSV
training_file = BUCKET + '/data/recommendation_events.csv'
t2 = BigQueryToCloudSomethingSomething( # correct the name
task_id='bq_export_op',
source_project_dataset_table='%s.recommendation_events' % DATASET,
destination_cloud_storage_uris=[training_file],
export_format='CSV',
dag=dag
)
# ML Engine training job
job_id = 'recserve_{0}'.format(datetime.datetime.now().strftime('%Y%m%d%H%M'))
job_dir = BUCKET + '/jobs/' + job_id
output_dir = BUCKET
training_args = ['--job-dir', job_dir,
'--train-files', training_file,
'--output-dir', output_dir,
'--data-type', 'web_views',
'--use-optimized']
# TODO: Fill in the missing operator name for task #3 which will
# start a new training job to Cloud ML Engine
# Reference: https://airflow.apache.org/integration.html#cloud-ml-engine
# https://cloud.google.com/ml-engine/docs/tensorflow/machine-types
t3 = MLEngineSomethingSomething( # complete the name
task_id='ml_engine_training_op',
project_id=PROJECT_ID,
job_id=job_id,
package_uris=[PACKAGE_URI],
training_python_module='trainer.task',
training_args=training_args,
region=REGION,
scale_tier='CUSTOM',
master_type='complex_model_m_gpu',
dag=dag
)
# App Engine deploy new version
t4 = AppEngineVersionOperator(
task_id='app_engine_deploy_version',
project_id=PROJECT_ID,
service_id='default',
region=REGION,
service_spec=None,
dag=dag
)
# TODO: Be sure to set_upstream dependencies for all tasks
t2.set_upstream(t1)
t3.set_upstream(t2)
t4.set_upstream(t) # complete
|
94f5b78ebeccc576ce5937e7ba97baa4b8de5809
|
3dcf6e78be6d822861ca0d81b2fae7050b58f945
|
/oct2py/core.py
|
b75a4fe126464cd18b3b47d8b376ae30b5cd96ba
|
[
"MIT"
] |
permissive
|
blink1073/oct2py
|
3c2e71a9480d0f9a4411f1a98fa830c1e78076ed
|
0ca02d853390f695494a5d23ad219c62da46eb66
|
refs/heads/main
| 2023-08-25T19:27:30.490304
| 2023-08-08T13:33:49
| 2023-08-08T13:33:49
| 5,377,231
| 228
| 40
|
MIT
| 2023-09-09T11:02:06
| 2012-08-11T04:19:27
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 28,865
|
py
|
core.py
|
"""Core oct2py functionality."""
# Copyright (c) oct2py developers.
# Distributed under the terms of the MIT License.
import atexit
import logging
import os
import os.path as osp
import shutil
import tempfile
import warnings
import numpy as np
from metakernel.pexpect import EOF, TIMEOUT # type:ignore
from octave_kernel.kernel import STDIN_PROMPT, OctaveEngine # type:ignore
from .dynamic import (
OctavePtr,
_make_function_ptr_instance,
_make_user_class,
_make_variable_ptr_instance,
)
from .io import Cell, StructArray, read_file, write_file
from .utils import Oct2PyError, get_log
HERE = osp.realpath(osp.dirname(__file__))
class Oct2Py:
"""Manages an Octave session.
Uses MAT files to pass data between Octave and Numpy.
The function must either exist as an m-file in this directory or
on Octave's path.
The first command will take about 0.5s for Octave to load up.
The subsequent commands will be much faster.
You may provide a logger object for logging events, or the oct2py.get_log()
default will be used. When calling commands, logger.info() will be used
to stream output, unless a `stream_handler` is provided.
Parameters
----------
logger : logging object, optional
Optional logger to use for Oct2Py session
timeout : float, optional
Timeout in seconds for commands
oned_as : {'row', 'column'}, optional
If 'column', write 1-D numpy arrays as column vectors.
If 'row', write 1-D numpy arrays as row vectors.}
temp_dir : str, optional
If specified, the session's MAT files will be created in the
directory, otherwise a default directory is used. This can be
a shared memory (tmpfs) path.
convert_to_float : bool, optional
If true, convert integer types to float when passing to Octave.
backend: string, optional
The graphics_toolkit to use for plotting.
"""
def __init__( # noqa
self,
logger=None,
timeout=None,
oned_as="row",
temp_dir=None,
convert_to_float=True,
backend=None,
):
"""Start Octave and set up the session."""
self._oned_as = oned_as
self._engine = None
self._logger = None
self.logger = logger
self.timeout = timeout
self.backend = backend or "default"
if temp_dir is None:
temp_dir_obj = tempfile.mkdtemp()
self.temp_dir = temp_dir_obj
atexit.register(shutil.rmtree, self.temp_dir)
else:
self.temp_dir = temp_dir
self.convert_to_float = convert_to_float
self._user_classes = {}
self._function_ptrs = {}
self.restart()
@property
def logger(self):
"""The logging instance used by the session."""
return self._logger
@logger.setter
def logger(self, value):
self._logger = value or get_log()
if self._engine:
self._engine.logger = self._logger
def __enter__(self):
"""Return octave object, restart session if necessary"""
if not self._engine:
self.restart()
return self
def __exit__(self, type_, value, traceback):
"""Close session"""
self.exit()
def __del__(self):
"""Delete session"""
self.exit()
def exit(self): # noqa
"""Quits this octave session and cleans up."""
if self._engine:
self._engine.repl.terminate()
self._engine = None
def push(self, name, var, timeout=None, verbose=True):
"""
Put a variable or variables into the Octave session.
Parameters
----------
name : str or list
Name of the variable(s).
var : object or list
The value(s) to pass.
timeout : float
Time to wait for response from Octave (per line).
**kwargs: Deprecated kwargs, ignored.
Examples
--------
>>> from oct2py import octave
>>> y = [1, 2]
>>> octave.push('y', y)
>>> octave.pull('y')
array([[1., 2.]])
>>> octave.push(['x', 'y'], ['spam', [1, 2, 3, 4]])
>>> octave.pull(['x', 'y']) # doctest: +SKIP
[u'spam', array([[1, 2, 3, 4]])]
Notes
-----
Integer type arguments will be converted to floating point
unless `convert_to_float=False`.
"""
if isinstance(name, str):
name = [name]
var = [var]
for n, v in zip(name, var):
self.feval("assignin", "base", n, v, nout=0, timeout=timeout, verbose=verbose)
def pull(self, var, timeout=None, verbose=True):
"""
Retrieve a value or values from the Octave session.
Parameters
----------
var : str or list
Name of the variable(s) to retrieve.
timeout : float, optional.
Time to wait for response from Octave (per line).
**kwargs: Deprecated kwargs, ignored.
Returns
-------
out : object
Object returned by Octave.
Raises
------
Oct2PyError
If the variable does not exist in the Octave session.
Examples
--------
>>> from oct2py import octave
>>> y = [1, 2]
>>> octave.push('y', y)
>>> octave.pull('y')
array([[1., 2.]])
>>> octave.push(['x', 'y'], ['spam', [1, 2, 3, 4]])
>>> octave.pull(['x', 'y']) # doctest: +SKIP
[u'spam', array([[1, 2, 3, 4]])]
"""
if isinstance(var, str):
var = [var]
outputs = []
for name in var:
exist = self._exist(name)
if exist == 1:
outputs.append(self.feval("evalin", "base", name, timeout=timeout, verbose=verbose))
else:
outputs.append(self.get_pointer(name, timeout=timeout))
if len(outputs) == 1:
return outputs[0]
return outputs
def get_pointer(self, name, timeout=None):
"""Get a pointer to a named object in the Octave workspace.
Parameters
----------
name: str
The name of the object in the Octave workspace.
timemout: float, optional.
Time to wait for response from Octave (per line).
Examples
--------
>>> from oct2py import octave
>>> octave.eval('foo = [1, 2];')
>>> ptr = octave.get_pointer('foo')
>>> ptr.value
array([[1., 2.]])
>>> ptr.address
'foo'
>>> # Can be passed as an argument
>>> octave.disp(ptr) # doctest: +SKIP
1 2
>>> from oct2py import octave
>>> sin = octave.get_pointer('sin') # equivalent to `octave.sin`
>>> sin.address
'@sin'
>>> x = octave.quad(sin, 0, octave.pi())
>>> x
2.0
Notes
-----
Pointers can be passed to `feval` or dynamic functions as function
arguments. A pointer passed as a nested value will be passed by value
instead.
Raises
------
Oct2PyError
If the variable does not exist in the Octave session or is of
unknown type.
Returns
-------
A variable, object, user class, or function pointer as appropriate.
"""
exist = self._exist(name)
isobject = self._isobject(name, exist)
if exist == 0:
raise Oct2PyError('"%s" is undefined' % name)
elif exist == 1:
return _make_variable_ptr_instance(self, name)
elif isobject:
return self._get_user_class(name)
elif exist in [2, 3, 5]:
return self._get_function_ptr(name)
raise Oct2PyError('Unknown type for object "%s"' % name)
def extract_figures(self, plot_dir, remove=False):
"""Extract the figures in the directory to IPython display objects.
Parameters
----------
plot_dir: str
The plot dir where the figures were created.
remove: bool, optional.
Whether to remove the plot directory after saving.
"""
if not self._engine:
msg = "Session is not open"
raise Oct2PyError(msg)
figures = self._engine.extract_figures(plot_dir, remove)
return figures
def feval(self, func_path, *func_args, **kwargs):
"""Run a function in Octave and return the result.
Parameters
----------
func_path: str
Name of function to run or a path to an m-file.
func_args: object, optional
Args to send to the function.
nout: int or str, optional.
The desired number of returned values, defaults to 1. If nout
value is 'max_nout', _get_max_nout() will be used.
store_as: str, optional
If given, saves the result to the given Octave variable name
instead of returning it.
verbose : bool, optional
Log Octave output at INFO level. If False, log at DEBUG level.
stream_handler: callable, optional
A function that is called for each line of output from the
evaluation.
timeout: float, optional
The timeout in seconds for the call.
plot_dir: str, optional
If specificed, save the session's plot figures to the plot
directory instead of displaying the plot window.
plot_backend: str, optional
The plotting back end to use.
plot_name : str, optional
Saved plots will start with `plot_name` and
end with "_%%.xxx' where %% is the plot number and
xxx is the `plot_format`.
plot_format: str, optional
The format in which to save the plot.
plot_width: int, optional
The plot with in pixels.
plot_height: int, optional
The plot height in pixels.
Notes
-----
The function arguments passed follow Octave calling convention, not
Python. That is, all values must be passed as a comma separated list,
not using `x=foo` assignment.
Examples
--------
>>> from oct2py import octave
>>> cell = octave.feval('cell', 10, 10, 10)
>>> cell.shape
(10, 10, 10)
>>> from oct2py import octave
>>> x = octave.feval('linspace', 0, octave.pi() / 2)
>>> x.shape
(1, 100)
>>> from oct2py import octave
>>> x = octave.feval('svd', octave.hilb(3))
>>> x
array([[1.40831893],
[0.12232707],
[0.00268734]])
>>> # specify three return values
>>> (u, v, d) = octave.feval('svd', octave.hilb(3), nout=3)
>>> u.shape
(3, 3)
Returns
-------
The Python value(s) returned by the Octave function call.
"""
if not self._engine:
msg = "Session is not open"
raise Oct2PyError(msg)
# nout handler
nout = kwargs.get("nout", None)
if nout is None:
nout = 1
elif nout == "max_nout":
nout = self._get_max_nout(func_path)
plot_dir = kwargs.get("plot_dir")
# Choose appropriate plot backend.
default_backend = "inline" if plot_dir else self.backend
backend = kwargs.get("plot_backend", default_backend)
settings = dict(
backend=backend,
format=kwargs.get("plot_format"),
name=kwargs.get("plot_name"),
width=kwargs.get("plot_width"),
height=kwargs.get("plot_height"),
resolution=kwargs.get("plot_res"),
)
self._engine.plot_settings = settings
dname = osp.dirname(func_path)
fname = osp.basename(func_path)
func_name, ext = osp.splitext(fname)
if ext and ext != ".m":
msg = "Need to give path to .m file"
raise TypeError(msg)
if func_name == "clear":
msg = 'Cannot use `clear` command directly, use eval("clear(var1, var2)")'
raise Oct2PyError(msg)
stream_handler = kwargs.get("stream_handler")
verbose = kwargs.get("verbose", True)
store_as = kwargs.get("store_as", "")
timeout = kwargs.get("timeout", self.timeout)
if not stream_handler:
stream_handler = self.logger.info if verbose else self.logger.debug
return self._feval(
func_name,
func_args,
dname=dname,
nout=nout,
timeout=timeout,
stream_handler=stream_handler,
store_as=store_as,
plot_dir=plot_dir,
)
def eval( # noqa
self,
cmds,
verbose=True,
timeout=None,
stream_handler=None,
temp_dir=None,
plot_dir=None,
plot_name="plot",
plot_format="svg",
plot_backend=None,
plot_width=None,
plot_height=None,
plot_res=None,
nout=0,
**kwargs,
):
"""
Evaluate an Octave command or commands.
Parameters
----------
cmds : str or list
Commands(s) to pass to Octave.
verbose : bool, optional
Log Octave output at INFO level. If False, log at DEBUG level.
stream_handler: callable, optional
A function that is called for each line of output from the
evaluation.
timeout : float, optional
Time to wait for response from Octave (per line). If not given,
the instance `timeout` is used.
nout : int or str, optional.
The desired number of returned values, defaults to 0. If nout
is 0, the `ans` will be returned as the return value. If nout
value is 'max_nout', _get_max_nout() will be used.
temp_dir: str, optional
If specified, the session's MAT files will be created in the
directory, otherwise a the instance `temp_dir` is used.
a shared memory (tmpfs) path.
plot_dir: str, optional
If specificed, save the session's plot figures to the plot
directory instead of displaying the plot window.
plot_name : str, optional
Saved plots will start with `plot_name` and
end with "_%%.xxx' where %% is the plot number and
xxx is the `plot_format`.
plot_format: str, optional
The format in which to save the plot (PNG by default).
plot_width: int, optional
The plot with in pixels.
plot_height: int, optional
The plot height in pixels.
plot_backend: str, optional
The plot backend to use.
plot_res: int, optional
The plot resolution in pixels per inch.
**kwargs Deprectated kwargs.
Examples
--------
>>> from oct2py import octave
>>> octave.eval('disp("hello")') # doctest: +SKIP
hello
>>> x = octave.eval('round(quad(@sin, 0, pi/2));')
>>> x
1.0
>>> a = octave.eval('disp("hello");1;') # doctest: +SKIP
hello
>>> a = octave.eval('disp("hello");1;', verbose=False)
>>> a
1.0
>>> from oct2py import octave
>>> lines = []
>>> octave.eval('for i = 1:3; disp(i);end', \
stream_handler=lines.append)
>>> lines # doctest: +SKIP
[' 1', ' 2', ' 3']
Returns
-------
out : object
Octave "ans" variable, or None.
Notes
-----
The deprecated `log` kwarg will temporarily set the `logger` level to
`WARN`. Using the `logger` settings directly is preferred.
The deprecated `return_both` kwarg will still work, but the preferred
method is to use the `stream_handler`. If `stream_handler` is given,
the `return_both` kwarg will be honored but will give an empty string
as the reponse.
Raises
------
Oct2PyError
If the command(s) fail.
"""
if isinstance(cmds, str):
cmds = [cmds]
prev_temp_dir = self.temp_dir
self.temp_dir = temp_dir or self.temp_dir
prev_log_level = self.logger.level
if kwargs.get("log") is False:
self.logger.setLevel(logging.WARN)
for name in ["log", "return_both"]:
if name not in kwargs:
continue
msg = "Using deprecated `%s` kwarg, see docs on `Oct2Py.eval()`"
warnings.warn(msg % name, stacklevel=2)
return_both = kwargs.pop("return_both", False)
lines: list = []
if return_both and not stream_handler:
stream_handler = lines.append
ans = None
for cmd in cmds:
resp = self.feval(
"evalin",
"base",
cmd,
nout=nout,
timeout=timeout,
stream_handler=stream_handler,
verbose=verbose,
plot_dir=plot_dir,
plot_name=plot_name,
plot_format=plot_format,
plot_backend=plot_backend,
plot_width=plot_width,
plot_height=plot_height,
plot_res=plot_res,
)
if resp is not None:
ans = resp
self.temp_dir = prev_temp_dir
self.logger.setLevel(prev_log_level)
if return_both:
return "\n".join(lines), ans
return ans
def restart(self):
"""Restart an Octave session in a clean state"""
if self._engine:
self._engine.repl.terminate()
if "OCTAVE_EXECUTABLE" not in os.environ and "OCTAVE" in os.environ:
os.environ["OCTAVE_EXECUTABLE"] = os.environ["OCTAVE"]
try:
self._engine = OctaveEngine(stdin_handler=self._handle_stdin, logger=self.logger)
except Exception as e:
raise Oct2PyError(str(e)) from None
# Add local Octave scripts.
self._engine.eval('addpath("%s");' % HERE.replace(osp.sep, "/"))
def _feval( # noqa
self,
func_name,
func_args=(),
dname="",
nout=0,
timeout=None,
stream_handler=None,
store_as="",
plot_dir=None,
):
"""Run the given function with the given args."""
engine = self._engine
if engine is None:
msg = "Session is closed"
raise Oct2PyError(msg)
# Set up our mat file paths.
out_file = osp.join(self.temp_dir, "writer.mat")
out_file = out_file.replace(osp.sep, "/")
in_file = osp.join(self.temp_dir, "reader.mat")
in_file = in_file.replace(osp.sep, "/")
func_args = list(func_args)
ref_indices = []
for i, value in enumerate(func_args):
if isinstance(value, OctavePtr):
ref_indices.append(i + 1)
func_args[i] = value.address
ref_arr = np.array(ref_indices)
# Save the request data to the output file.
req = dict(
func_name=func_name,
func_args=tuple(func_args),
dname=dname or "",
nout=nout,
store_as=store_as or "",
ref_indices=ref_arr,
)
write_file(req, out_file, oned_as=self._oned_as, convert_to_float=self.convert_to_float)
# Set up the engine and evaluate the `_pyeval()` function.
engine.line_handler = stream_handler or self.logger.info
if timeout is None:
timeout = self.timeout
try:
engine.eval(f'_pyeval("{out_file}", "{in_file}");', timeout=timeout)
except KeyboardInterrupt:
stream_handler(engine.repl.interrupt())
raise
except TIMEOUT:
stream_handler(engine.repl.interrupt())
msg = "Timed out, interrupting"
raise Oct2PyError(msg) from None
except EOF:
if not self._engine:
return
stream_handler(engine.repl.child.before)
self.restart()
msg = "Session died, restarting"
raise Oct2PyError(msg) from None
# Read in the output.
resp = read_file(in_file, self)
if resp["err"]:
msg = self._parse_error(resp["err"])
raise Oct2PyError(msg)
result = resp["result"].ravel().tolist()
if isinstance(result, list) and len(result) == 1:
result = result[0]
# Check for sentinel value.
if (
isinstance(result, Cell)
and result.size == 1
and isinstance(result[0], str)
and result[0] == "__no_value__"
):
result = None
if plot_dir:
engine.make_figures(plot_dir)
return result
def _parse_error(self, err):
"""Create a traceback for an Octave evaluation error."""
self.logger.debug(err)
stack = err.get("stack", [])
if not err["message"].startswith("parse error:"):
err["message"] = "error: " + err["message"]
errmsg = "Octave evaluation error:\n%s" % err["message"]
if not isinstance(stack, StructArray):
return errmsg
errmsg += "\nerror: called from:"
for item in stack[:-1]:
errmsg += "\n %(name)s at line %(line)d" % item
try: # noqa
errmsg += ", column %(column)d" % item
except Exception: # noqa
pass
return errmsg
def _handle_stdin(self, line):
"""Handle a stdin request from the session."""
return input(line.replace(STDIN_PROMPT, ""))
def _print_doc(self, name):
"""
Print the documentation of an Octave procedure or object.
Parameters
----------
name : str
Function name to search for.
Returns
-------
out : None
"""
print(self._get_doc(name)) # noqa
def _get_doc(self, name):
"""
Get the documentation of an Octave procedure or object.
Parameters
----------
name : str
Function name to search for.
Returns
-------
out : str
Documentation string.
Raises
------
Oct2PyError
If the procedure or object function has a syntax error.
"""
doc = "No documentation for %s" % name
engine = self._engine
if not engine:
msg = "Session is not open"
raise Oct2PyError(msg)
doc = engine.eval('help("%s")' % name, silent=True)
if "syntax error:" in doc.lower():
raise Oct2PyError(doc)
if "error:" in doc.lower():
doc = engine.eval('type("%s")' % name, silent=True)
doc = "\n".join(doc.splitlines()[:3])
default = self.feval.__doc__
default = " " + default[default.find("func_args:") :] # type:ignore
default = "\n".join([line[8:] for line in default.splitlines()])
doc = "\n".join(doc.splitlines())
doc = "\n" + doc + "\n\nParameters\n----------\n" + default
doc += "\n**kwargs - Deprecated keyword arguments\n\n"
doc += "Notes\n-----\n"
doc += "Keyword arguments to dynamic functions are deprecated.\n"
doc += "The `plot_*` kwargs will be ignored, but the rest will\n"
doc += "used as key - value pairs as in version 3.x.\n"
doc += "Use `set_plot_settings()` for plot settings, and use\n"
doc += "`func_args` directly for key - value pairs."
return doc
def _exist(self, name):
"""Test whether a name exists and return the name code.
Raises an error when the name does not exist.
"""
cmd = 'exist("%s")' % name
if not self._engine:
msg = "Session is not open"
raise Oct2PyError(msg)
resp = self._engine.eval(cmd, silent=True).strip()
exist = int(resp.split()[-1])
if exist == 0:
cmd = "class(%s)" % name
resp = self._engine.eval(cmd, silent=True).strip()
if "error:" in resp:
msg = 'Value "%s" does not exist in Octave workspace'
raise Oct2PyError(msg % name)
else:
exist = 2
return exist
def _isobject(self, name, exist):
"""Test whether the name is an object."""
if exist in [2, 5]:
return False
cmd = "isobject(%s)" % name
if not self._engine:
msg = "Session is not open"
raise Oct2PyError(msg)
resp = self._engine.eval(cmd, silent=True).strip()
return resp == "ans = 1"
def _get_function_ptr(self, name):
"""Get or create a function pointer of the given name."""
func = _make_function_ptr_instance
self._function_ptrs.setdefault(name, func(self, name))
return self._function_ptrs[name]
def _get_user_class(self, name):
"""Get or create a user class of the given type."""
self._user_classes.setdefault(name, _make_user_class(self, name))
return self._user_classes[name]
def __getattr__(self, attr):
"""Automatically creates a wapper to an Octave function or object.
Adapted from the mlabwrap project.
"""
# needed for help(Oct2Py())
if attr.startswith("__"):
return super().__getattr__(attr) # type:ignore
# close_ -> close
name = attr[:-1] if attr[-1] == "_" else attr
if self._engine is None:
msg = "Session is closed"
raise Oct2PyError(msg)
# Make sure the name exists.
exist = self._exist(name)
if exist not in [2, 3, 5, 103]:
msg = 'Name "%s" is not a valid callable, use `pull` for variables'
raise Oct2PyError(msg % name)
if name == "clear":
msg = 'Cannot use `clear` command directly, use `eval("clear(var1, var2)")`'
raise Oct2PyError(msg)
# Check for user defined class.
if self._isobject(name, exist):
obj = self._get_user_class(name)
else:
obj = self._get_function_ptr(name)
# !!! attr, *not* name, because we might have python keyword name!
setattr(self, attr, obj)
return obj
def _get_max_nout(self, func_path):
"""Get or count maximum nout of .m function."""
if not osp.isabs(func_path):
func_path = self.which(func_path)
nout = 0 # default nout of eval
status = "NOT FUNCTION"
if func_path.endswith(".m"): # only if `func_path` is .m file
with open(func_path, encoding="utf8") as fid:
for line in fid:
if line[0] != "f": # noqa # not function
if status == "NOT FUNCTION":
continue
line = line.translate( # noqa
str.maketrans("", "", "[]()")
).split() # type:ignore
try: # noqa
line.remove("function") # type:ignore
except Exception: # noqa
pass
for char in line:
if char == "...":
status = "FUNCTION"
continue
if char != "=":
nout += 1
else:
return nout
return nout
|
c0ae069cdbf5dc7e50c7beef4476827cd187c008
|
31be8b08142116537fb68490490048fc95344a41
|
/stubs/pycparser/c_ast.pyi
|
570c9781c15d371c0800112797272f910038e312
|
[
"MIT"
] |
permissive
|
simonlindholm/decomp-permuter
|
f79ab10ae0e46d1e1be0aab1c8a248c1cc31c1af
|
3f8a292ed0353569ccb727d57703b369df55507f
|
refs/heads/main
| 2023-05-25T16:44:06.803784
| 2023-05-15T21:44:16
| 2023-05-15T21:44:16
| 172,801,212
| 102
| 33
|
MIT
| 2023-04-03T02:53:55
| 2019-02-26T22:33:20
|
Python
|
UTF-8
|
Python
| false
| false
| 14,119
|
pyi
|
c_ast.pyi
|
# -----------------------------------------------------------------
# pycparser: c_ast.py
#
# AST Node classes.
#
# Eli Bendersky [https://eli.thegreenplace.net/]
# License: BSD
# -----------------------------------------------------------------
from typing import TextIO, Iterable, List, Any, Optional, Union as Union_
from .plyparser import Coord
import sys
class Node(object):
coord: Optional[Coord]
def __repr__(self) -> str: ...
def __iter__(self) -> Iterable[Node]: ...
def children(self) -> Iterable[Node]: ...
def show(
self,
buf: TextIO = sys.stdout,
offset: int = 0,
attrnames: bool = False,
nodenames: bool = False,
showcoord: bool = False,
) -> None: ...
Expression = Union_[
"ArrayRef",
"Assignment",
"BinaryOp",
"Cast",
"CompoundLiteral",
"Constant",
"ExprList",
"FuncCall",
"ID",
"StructRef",
"TernaryOp",
"UnaryOp",
]
Statement = Union_[
Expression,
"Break",
"Case",
"Compound",
"Continue",
"Decl",
"Default",
"DoWhile",
"EmptyStatement",
"For",
"Goto",
"If",
"Label",
"Return",
"Switch",
"Typedef",
"While",
"Pragma",
]
Type = Union_["PtrDecl", "ArrayDecl", "FuncDecl", "TypeDecl"]
InnerType = Union_["IdentifierType", "Struct", "Union", "Enum"]
ExternalDeclaration = Union_["FuncDef", "Decl", "Typedef", "Pragma"]
AnyNode = Union_[
Statement,
Type,
InnerType,
"Alignas",
"FuncDef",
"EllipsisParam",
"Enumerator",
"EnumeratorList",
"FileAST",
"InitList",
"NamedInitializer",
"ParamList",
"Typename",
]
class NodeVisitor:
def visit(self, node: Node) -> None: ...
def generic_visit(self, node: Node) -> None: ...
def visit_Alignas(self, node: Alignas) -> None: ...
def visit_ArrayDecl(self, node: ArrayDecl) -> None: ...
def visit_ArrayRef(self, node: ArrayRef) -> None: ...
def visit_Assignment(self, node: Assignment) -> None: ...
def visit_BinaryOp(self, node: BinaryOp) -> None: ...
def visit_Break(self, node: Break) -> None: ...
def visit_Case(self, node: Case) -> None: ...
def visit_Cast(self, node: Cast) -> None: ...
def visit_Compound(self, node: Compound) -> None: ...
def visit_CompoundLiteral(self, node: CompoundLiteral) -> None: ...
def visit_Constant(self, node: Constant) -> None: ...
def visit_Continue(self, node: Continue) -> None: ...
def visit_Decl(self, node: Decl) -> None: ...
def visit_DeclList(self, node: DeclList) -> None: ...
def visit_Default(self, node: Default) -> None: ...
def visit_DoWhile(self, node: DoWhile) -> None: ...
def visit_EllipsisParam(self, node: EllipsisParam) -> None: ...
def visit_EmptyStatement(self, node: EmptyStatement) -> None: ...
def visit_Enum(self, node: Enum) -> None: ...
def visit_Enumerator(self, node: Enumerator) -> None: ...
def visit_EnumeratorList(self, node: EnumeratorList) -> None: ...
def visit_ExprList(self, node: ExprList) -> None: ...
def visit_FileAST(self, node: FileAST) -> None: ...
def visit_For(self, node: For) -> None: ...
def visit_FuncCall(self, node: FuncCall) -> None: ...
def visit_FuncDecl(self, node: FuncDecl) -> None: ...
def visit_FuncDef(self, node: FuncDef) -> None: ...
def visit_Goto(self, node: Goto) -> None: ...
def visit_ID(self, node: ID) -> None: ...
def visit_IdentifierType(self, node: IdentifierType) -> None: ...
def visit_If(self, node: If) -> None: ...
def visit_InitList(self, node: InitList) -> None: ...
def visit_Label(self, node: Label) -> None: ...
def visit_NamedInitializer(self, node: NamedInitializer) -> None: ...
def visit_ParamList(self, node: ParamList) -> None: ...
def visit_PtrDecl(self, node: PtrDecl) -> None: ...
def visit_Return(self, node: Return) -> None: ...
def visit_Struct(self, node: Struct) -> None: ...
def visit_StructRef(self, node: StructRef) -> None: ...
def visit_Switch(self, node: Switch) -> None: ...
def visit_TernaryOp(self, node: TernaryOp) -> None: ...
def visit_TypeDecl(self, node: TypeDecl) -> None: ...
def visit_Typedef(self, node: Typedef) -> None: ...
def visit_Typename(self, node: Typename) -> None: ...
def visit_UnaryOp(self, node: UnaryOp) -> None: ...
def visit_Union(self, node: Union) -> None: ...
def visit_While(self, node: While) -> None: ...
def visit_Pragma(self, node: Pragma) -> None: ...
class Alignas(Node):
alignment: Union_[Expression, Typename]
coord: Optional[Coord]
def __init__(
self,
alignment: Union_[Expression, Typename],
coord: Optional[Coord] = None,
): ...
class ArrayDecl(Node):
type: Type
dim: Optional[Expression]
dim_quals: List[str]
def __init__(
self,
type: Type,
dim: Optional[Node],
dim_quals: List[str],
coord: Optional[Coord] = None,
): ...
class ArrayRef(Node):
name: Expression
subscript: Expression
def __init__(self, name: Node, subscript: Node, coord: Optional[Coord] = None): ...
class Assignment(Node):
op: str
lvalue: Expression
rvalue: Expression
def __init__(
self,
op: str,
lvalue: Expression,
rvalue: Expression,
coord: Optional[Coord] = None,
): ...
class BinaryOp(Node):
op: str
left: Expression
right: Expression
def __init__(
self, op: str, left: Node, right: Node, coord: Optional[Coord] = None
): ...
class Break(Node):
def __init__(self, coord: Optional[Coord] = None): ...
class Case(Node):
expr: Expression
stmts: List[Statement]
def __init__(
self, expr: Expression, stmts: List[Statement], coord: Optional[Coord] = None
): ...
class Cast(Node):
to_type: "Typename"
expr: Expression
def __init__(
self, to_type: "Typename", expr: Expression, coord: Optional[Coord] = None
): ...
class Compound(Node):
block_items: Optional[List[Statement]]
def __init__(
self, block_items: Optional[List[Statement]], coord: Optional[Coord] = None
): ...
class CompoundLiteral(Node):
type: "Typename"
init: "InitList"
def __init__(
self, type: "Typename", init: "InitList", coord: Optional[Coord] = None
): ...
class Constant(Node):
type: str
value: str
def __init__(self, type: str, value: str, coord: Optional[Coord] = None): ...
class Continue(Node):
def __init__(self, coord: Optional[Coord] = None): ...
class Decl(Node):
name: Optional[str]
quals: List[str] # e.g. const
align: List[Alignas]
storage: List[str] # e.g. register
funcspec: List[str] # e.g. inline
type: Union_[Type, "Struct", "Enum", "Union"]
init: Optional[Union_[Expression, "InitList"]]
bitsize: Optional[Expression]
def __init__(
self,
name: Optional[str],
quals: List[str],
align: List[Alignas],
storage: List[str],
funcspec: List[str],
type: Union_[Type, "Struct", "Enum", "Union"],
init: Optional[Union_[Expression, "InitList"]],
bitsize: Optional[Expression],
coord: Optional[Coord] = None,
): ...
class DeclList(Node):
decls: List[Decl]
def __init__(self, decls: List[Decl], coord: Optional[Coord] = None): ...
class Default(Node):
stmts: List[Statement]
def __init__(self, stmts: List[Statement], coord: Optional[Coord] = None): ...
class DoWhile(Node):
cond: Expression
stmt: Statement
def __init__(
self, cond: Expression, stmt: Statement, coord: Optional[Coord] = None
): ...
class EllipsisParam(Node):
def __init__(self, coord: Optional[Coord] = None): ...
class EmptyStatement(Node):
def __init__(self, coord: Optional[Coord] = None): ...
class Enum(Node):
name: Optional[str]
values: "Optional[EnumeratorList]"
def __init__(
self,
name: Optional[str],
values: "Optional[EnumeratorList]",
coord: Optional[Coord] = None,
): ...
class Enumerator(Node):
name: str
value: Optional[Expression]
def __init__(
self, name: str, value: Optional[Expression], coord: Optional[Coord] = None
): ...
class EnumeratorList(Node):
enumerators: List[Enumerator]
def __init__(
self, enumerators: List[Enumerator], coord: Optional[Coord] = None
): ...
class ExprList(Node):
exprs: List[Union_[Expression, Typename]] # typename only for offsetof
def __init__(
self, exprs: List[Union_[Expression, Typename]], coord: Optional[Coord] = None
): ...
class FileAST(Node):
ext: List[ExternalDeclaration]
def __init__(
self, ext: List[ExternalDeclaration], coord: Optional[Coord] = None
): ...
class For(Node):
init: Union_[None, Expression, DeclList]
cond: Optional[Expression]
next: Optional[Expression]
stmt: Statement
def __init__(
self,
init: Union_[None, Expression, DeclList],
cond: Optional[Expression],
next: Optional[Expression],
stmt: Statement,
coord: Optional[Coord] = None,
): ...
class FuncCall(Node):
name: Expression
args: Optional[ExprList]
def __init__(
self, name: Expression, args: Optional[ExprList], coord: Optional[Coord] = None
): ...
class FuncDecl(Node):
args: Optional[ParamList]
type: Type # return type
def __init__(
self, args: Optional[ParamList], type: Type, coord: Optional[Coord] = None
): ...
class FuncDef(Node):
decl: Decl
param_decls: Optional[List[Decl]]
body: Compound
def __init__(
self,
decl: Decl,
param_decls: Optional[List[Decl]],
body: Compound,
coord: Optional[Coord] = None,
): ...
class Goto(Node):
name: str
def __init__(self, name: str, coord: Optional[Coord] = None): ...
class ID(Node):
name: str
def __init__(self, name: str, coord: Optional[Coord] = None): ...
class IdentifierType(Node):
names: List[str] # e.g. ['long', 'int']
def __init__(self, names: List[str], coord: Optional[Coord] = None): ...
class If(Node):
cond: Expression
iftrue: Statement
iffalse: Optional[Statement]
def __init__(
self,
cond: Expression,
iftrue: Statement,
iffalse: Optional[Statement],
coord: Optional[Coord] = None,
): ...
class InitList(Node):
exprs: List[Union_[Expression, "NamedInitializer"]]
def __init__(
self,
exprs: List[Union_[Expression, "NamedInitializer"]],
coord: Optional[Coord] = None,
): ...
class Label(Node):
name: str
stmt: Statement
def __init__(self, name: str, stmt: Statement, coord: Optional[Coord] = None): ...
class NamedInitializer(Node):
name: List[Expression] # [ID(x), Constant(4)] for {.x[4] = ...}
expr: Expression
def __init__(
self, name: List[Expression], expr: Expression, coord: Optional[Coord] = None
): ...
class ParamList(Node):
params: List[Union_[Decl, ID, Typename, EllipsisParam]]
def __init__(
self,
params: List[Union_[Decl, ID, Typename, EllipsisParam]],
coord: Optional[Coord] = None,
): ...
class PtrDecl(Node):
quals: List[str]
type: Type
def __init__(self, quals: List[str], type: Type, coord: Optional[Coord] = None): ...
class Return(Node):
expr: Optional[Expression]
def __init__(self, expr: Optional[Expression], coord: Optional[Coord] = None): ...
class Struct(Node):
name: Optional[str]
decls: Optional[List[Union_[Decl, Pragma]]]
def __init__(
self,
name: Optional[str],
decls: Optional[List[Union_[Decl, Pragma]]],
coord: Optional[Coord] = None,
): ...
class StructRef(Node):
name: Expression
type: str
field: ID
def __init__(
self, name: Expression, type: str, field: ID, coord: Optional[Coord] = None
): ...
class Switch(Node):
cond: Expression
stmt: Statement
def __init__(
self, cond: Expression, stmt: Statement, coord: Optional[Coord] = None
): ...
class TernaryOp(Node):
cond: Expression
iftrue: Expression
iffalse: Expression
def __init__(
self,
cond: Expression,
iftrue: Expression,
iffalse: Expression,
coord: Optional[Coord] = None,
): ...
class TypeDecl(Node):
declname: Optional[str]
quals: List[str]
type: InnerType
align: List[Alignas]
def __init__(
self,
declname: Optional[str],
quals: List[str],
align: List[Alignas],
type: InnerType,
coord: Optional[Coord] = None,
): ...
class Typedef(Node):
name: str
quals: List[str]
storage: List[str]
type: Type
def __init__(
self,
name: str,
quals: List[str],
storage: List[str],
type: Type,
coord: Optional[Coord] = None,
): ...
class Typename(Node):
name: None
quals: List[str]
align: List[Alignas]
type: Type
def __init__(
self,
name: None,
quals: List[str],
align: List[Alignas],
type: Type,
coord: Optional[Coord] = None,
): ...
class UnaryOp(Node):
op: str
expr: Union_[Expression, Typename]
def __init__(
self, op: str, expr: Union_[Expression, Typename], coord: Optional[Coord] = None
): ...
class Union(Node):
name: Optional[str]
decls: Optional[List[Union_[Decl, Pragma]]]
def __init__(
self,
name: Optional[str],
decls: Optional[List[Union_[Decl, Pragma]]],
coord: Optional[Coord] = None,
): ...
class While(Node):
cond: Expression
stmt: Statement
def __init__(
self, cond: Expression, stmt: Statement, coord: Optional[Coord] = None
): ...
class Pragma(Node):
string: str
def __init__(self, string: str, coord: Optional[Coord] = None): ...
|
70f2f124311ce2a2b2b1bfe13ec141e59ad3267a
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/inspections/PyUnresolvedReferencesInspection/unresolvedUnreachable.py
|
e47812f45030c52b111b1a238c076a83e710033b
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 106
|
py
|
unresolvedUnreachable.py
|
def f():
x = 1
print(<error descr="Unresolved reference 'z'">z</error>)
return x
print(x)
|
0275ab2064ec0ea146ab6500b6ca14fdc34eeb38
|
32d934cabb1eac917bb583a1428b87f78b335a4e
|
/code_per_day/day_49_to_50.py
|
1ac93caa762f41ba6d77c814b32401ee73857b2d
|
[] |
no_license
|
zotroneneis/magical_universe
|
7339fefcfdf47e21e5ebcc6f56e3f1949230932a
|
c5da3367b7854c4cf9625c45e03742dba3a6d63c
|
refs/heads/master
| 2022-12-07T20:21:25.427333
| 2022-11-13T14:33:01
| 2022-11-13T14:33:01
| 141,951,821
| 414
| 58
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 442
|
py
|
day_49_to_50.py
|
import sys
sys.path.append('../')
import yaml
from magical_universe import CastleKilmereMember, Pupil
if __name__ == "__main__":
with open('config.yaml', 'r') as c:
config = yaml.load(c)
bromley = CastleKilmereMember(**config['bromley'])
print('bromley: ', bromley)
lissy = Pupil(**config['lissy'])
print('lissy: ', lissy)
luke = Pupil(**config['luke'])
print('luke: ', luke)
|
30baa136f7355bfa86df90d16b566d47c39683b2
|
04142fdda9b3fb29fb7456d5bc3e504985f24cbe
|
/tests/test_cnn/test_wrappers.py
|
8c76ccbdd4f930a1f50d38b38d1d47d09d6d5b3b
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmcv
|
419e301bbc1d7d45331d67eccfd673f290a796d5
|
6e9ee26718b22961d5c34caca4108413b1b7b3af
|
refs/heads/main
| 2023-08-31T07:08:27.223321
| 2023-08-28T09:02:10
| 2023-08-28T09:02:10
| 145,670,155
| 5,319
| 1,900
|
Apache-2.0
| 2023-09-14T02:37:16
| 2018-08-22T07:05:26
|
Python
|
UTF-8
|
Python
| false
| false
| 12,852
|
py
|
test_wrappers.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import patch
import pytest
import torch
import torch.nn as nn
from mmengine.utils import digit_version
from mmengine.utils.dl_utils import TORCH_VERSION
from mmcv.cnn.bricks import (Conv2d, Conv3d, ConvTranspose2d, ConvTranspose3d,
Linear, MaxPool2d, MaxPool3d)
if torch.__version__ != 'parrots':
torch_version = '1.1'
else:
torch_version = 'parrots'
@patch('torch.__version__', torch_version)
@pytest.mark.parametrize(
'in_w,in_h,in_channel,out_channel,kernel_size,stride,padding,dilation',
[(10, 10, 1, 1, 3, 1, 0, 1), (20, 20, 3, 3, 5, 2, 1, 2)])
def test_conv2d(in_w, in_h, in_channel, out_channel, kernel_size, stride,
padding, dilation):
"""
CommandLine:
xdoctest -m tests/test_wrappers.py test_conv2d
"""
# train mode
# wrapper op with 0-dim input
x_empty = torch.randn(0, in_channel, in_h, in_w)
torch.manual_seed(0)
wrapper = Conv2d(
in_channel,
out_channel,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation)
wrapper_out = wrapper(x_empty)
# torch op with 3-dim input as shape reference
x_normal = torch.randn(3, in_channel, in_h, in_w).requires_grad_(True)
torch.manual_seed(0)
ref = nn.Conv2d(
in_channel,
out_channel,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation)
ref_out = ref(x_normal)
assert wrapper_out.shape[0] == 0
assert wrapper_out.shape[1:] == ref_out.shape[1:]
wrapper_out.sum().backward()
assert wrapper.weight.grad is not None
assert wrapper.weight.grad.shape == wrapper.weight.shape
assert torch.equal(wrapper(x_normal), ref_out)
# eval mode
x_empty = torch.randn(0, in_channel, in_h, in_w)
wrapper = Conv2d(
in_channel,
out_channel,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation)
wrapper.eval()
wrapper(x_empty)
@patch('torch.__version__', torch_version)
@pytest.mark.parametrize(
'in_w,in_h,in_t,in_channel,out_channel,kernel_size,stride,padding,dilation', # noqa: E501
[(10, 10, 10, 1, 1, 3, 1, 0, 1), (20, 20, 20, 3, 3, 5, 2, 1, 2)])
def test_conv3d(in_w, in_h, in_t, in_channel, out_channel, kernel_size, stride,
padding, dilation):
"""
CommandLine:
xdoctest -m tests/test_wrappers.py test_conv3d
"""
# train mode
# wrapper op with 0-dim input
x_empty = torch.randn(0, in_channel, in_t, in_h, in_w)
torch.manual_seed(0)
wrapper = Conv3d(
in_channel,
out_channel,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation)
wrapper_out = wrapper(x_empty)
# torch op with 3-dim input as shape reference
x_normal = torch.randn(3, in_channel, in_t, in_h,
in_w).requires_grad_(True)
torch.manual_seed(0)
ref = nn.Conv3d(
in_channel,
out_channel,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation)
ref_out = ref(x_normal)
assert wrapper_out.shape[0] == 0
assert wrapper_out.shape[1:] == ref_out.shape[1:]
wrapper_out.sum().backward()
assert wrapper.weight.grad is not None
assert wrapper.weight.grad.shape == wrapper.weight.shape
assert torch.equal(wrapper(x_normal), ref_out)
# eval mode
x_empty = torch.randn(0, in_channel, in_t, in_h, in_w)
wrapper = Conv3d(
in_channel,
out_channel,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation)
wrapper.eval()
wrapper(x_empty)
@patch('torch.__version__', torch_version)
@pytest.mark.parametrize(
'in_w,in_h,in_channel,out_channel,kernel_size,stride,padding,dilation',
[(10, 10, 1, 1, 3, 1, 0, 1), (20, 20, 3, 3, 5, 2, 1, 2)])
def test_conv_transposed_2d(in_w, in_h, in_channel, out_channel, kernel_size,
stride, padding, dilation):
# wrapper op with 0-dim input
x_empty = torch.randn(0, in_channel, in_h, in_w, requires_grad=True)
# out padding must be smaller than either stride or dilation
op = min(stride, dilation) - 1
if torch.__version__ == 'parrots':
op = 0
torch.manual_seed(0)
wrapper = ConvTranspose2d(
in_channel,
out_channel,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
output_padding=op)
wrapper_out = wrapper(x_empty)
# torch op with 3-dim input as shape reference
x_normal = torch.randn(3, in_channel, in_h, in_w)
torch.manual_seed(0)
ref = nn.ConvTranspose2d(
in_channel,
out_channel,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
output_padding=op)
ref_out = ref(x_normal)
assert wrapper_out.shape[0] == 0
assert wrapper_out.shape[1:] == ref_out.shape[1:]
wrapper_out.sum().backward()
assert wrapper.weight.grad is not None
assert wrapper.weight.grad.shape == wrapper.weight.shape
assert torch.equal(wrapper(x_normal), ref_out)
# eval mode
x_empty = torch.randn(0, in_channel, in_h, in_w)
wrapper = ConvTranspose2d(
in_channel,
out_channel,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
output_padding=op)
wrapper.eval()
wrapper(x_empty)
@patch('torch.__version__', torch_version)
@pytest.mark.parametrize(
'in_w,in_h,in_t,in_channel,out_channel,kernel_size,stride,padding,dilation', # noqa: E501
[(10, 10, 10, 1, 1, 3, 1, 0, 1), (20, 20, 20, 3, 3, 5, 2, 1, 2)])
def test_conv_transposed_3d(in_w, in_h, in_t, in_channel, out_channel,
kernel_size, stride, padding, dilation):
# wrapper op with 0-dim input
x_empty = torch.randn(0, in_channel, in_t, in_h, in_w, requires_grad=True)
# out padding must be smaller than either stride or dilation
op = min(stride, dilation) - 1
torch.manual_seed(0)
wrapper = ConvTranspose3d(
in_channel,
out_channel,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
output_padding=op)
wrapper_out = wrapper(x_empty)
# torch op with 3-dim input as shape reference
x_normal = torch.randn(3, in_channel, in_t, in_h, in_w)
torch.manual_seed(0)
ref = nn.ConvTranspose3d(
in_channel,
out_channel,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
output_padding=op)
ref_out = ref(x_normal)
assert wrapper_out.shape[0] == 0
assert wrapper_out.shape[1:] == ref_out.shape[1:]
wrapper_out.sum().backward()
assert wrapper.weight.grad is not None
assert wrapper.weight.grad.shape == wrapper.weight.shape
assert torch.equal(wrapper(x_normal), ref_out)
# eval mode
x_empty = torch.randn(0, in_channel, in_t, in_h, in_w)
wrapper = ConvTranspose3d(
in_channel,
out_channel,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
output_padding=op)
wrapper.eval()
wrapper(x_empty)
@patch('torch.__version__', torch_version)
@pytest.mark.parametrize(
'in_w,in_h,in_channel,out_channel,kernel_size,stride,padding,dilation',
[(10, 10, 1, 1, 3, 1, 0, 1), (20, 20, 3, 3, 5, 2, 1, 2)])
def test_max_pool_2d(in_w, in_h, in_channel, out_channel, kernel_size, stride,
padding, dilation):
# wrapper op with 0-dim input
x_empty = torch.randn(0, in_channel, in_h, in_w, requires_grad=True)
wrapper = MaxPool2d(
kernel_size, stride=stride, padding=padding, dilation=dilation)
wrapper_out = wrapper(x_empty)
# torch op with 3-dim input as shape reference
x_normal = torch.randn(3, in_channel, in_h, in_w)
ref = nn.MaxPool2d(
kernel_size, stride=stride, padding=padding, dilation=dilation)
ref_out = ref(x_normal)
assert wrapper_out.shape[0] == 0
assert wrapper_out.shape[1:] == ref_out.shape[1:]
assert torch.equal(wrapper(x_normal), ref_out)
@patch('torch.__version__', torch_version)
@pytest.mark.parametrize(
'in_w,in_h,in_t,in_channel,out_channel,kernel_size,stride,padding,dilation', # noqa: E501
[(10, 10, 10, 1, 1, 3, 1, 0, 1), (20, 20, 20, 3, 3, 5, 2, 1, 2)])
@pytest.mark.skipif(
torch.__version__ == 'parrots' and not torch.cuda.is_available(),
reason='parrots requires CUDA support')
def test_max_pool_3d(in_w, in_h, in_t, in_channel, out_channel, kernel_size,
stride, padding, dilation):
# wrapper op with 0-dim input
x_empty = torch.randn(0, in_channel, in_t, in_h, in_w, requires_grad=True)
wrapper = MaxPool3d(
kernel_size, stride=stride, padding=padding, dilation=dilation)
if torch.__version__ == 'parrots':
x_empty = x_empty.cuda()
wrapper_out = wrapper(x_empty)
# torch op with 3-dim input as shape reference
x_normal = torch.randn(3, in_channel, in_t, in_h, in_w)
ref = nn.MaxPool3d(
kernel_size, stride=stride, padding=padding, dilation=dilation)
if torch.__version__ == 'parrots':
x_normal = x_normal.cuda()
ref_out = ref(x_normal)
assert wrapper_out.shape[0] == 0
assert wrapper_out.shape[1:] == ref_out.shape[1:]
assert torch.equal(wrapper(x_normal), ref_out)
@patch('torch.__version__', torch_version)
@pytest.mark.parametrize('in_w,in_h,in_feature,out_feature', [(10, 10, 1, 1),
(20, 20, 3, 3)])
def test_linear(in_w, in_h, in_feature, out_feature):
# wrapper op with 0-dim input
x_empty = torch.randn(0, in_feature, requires_grad=True)
torch.manual_seed(0)
wrapper = Linear(in_feature, out_feature)
wrapper_out = wrapper(x_empty)
# torch op with 3-dim input as shape reference
x_normal = torch.randn(3, in_feature)
torch.manual_seed(0)
ref = nn.Linear(in_feature, out_feature)
ref_out = ref(x_normal)
assert wrapper_out.shape[0] == 0
assert wrapper_out.shape[1:] == ref_out.shape[1:]
wrapper_out.sum().backward()
assert wrapper.weight.grad is not None
assert wrapper.weight.grad.shape == wrapper.weight.shape
assert torch.equal(wrapper(x_normal), ref_out)
# eval mode
x_empty = torch.randn(0, in_feature)
wrapper = Linear(in_feature, out_feature)
wrapper.eval()
wrapper(x_empty)
@patch('mmcv.cnn.bricks.wrappers.TORCH_VERSION', (1, 10))
def test_nn_op_forward_called():
for m in ['Conv2d', 'ConvTranspose2d', 'MaxPool2d']:
with patch(f'torch.nn.{m}.forward') as nn_module_forward:
# randn input
x_empty = torch.randn(0, 3, 10, 10)
wrapper = eval(m)(3, 2, 1)
wrapper(x_empty)
nn_module_forward.assert_called_with(x_empty)
# non-randn input
x_normal = torch.randn(1, 3, 10, 10)
wrapper = eval(m)(3, 2, 1)
wrapper(x_normal)
nn_module_forward.assert_called_with(x_normal)
for m in ['Conv3d', 'ConvTranspose3d', 'MaxPool3d']:
with patch(f'torch.nn.{m}.forward') as nn_module_forward:
# randn input
x_empty = torch.randn(0, 3, 10, 10, 10)
wrapper = eval(m)(3, 2, 1)
wrapper(x_empty)
nn_module_forward.assert_called_with(x_empty)
# non-randn input
x_normal = torch.randn(1, 3, 10, 10, 10)
wrapper = eval(m)(3, 2, 1)
wrapper(x_normal)
nn_module_forward.assert_called_with(x_normal)
with patch('torch.nn.Linear.forward') as nn_module_forward:
# randn input
x_empty = torch.randn(0, 3)
wrapper = Linear(3, 3)
wrapper(x_empty)
nn_module_forward.assert_called_with(x_empty)
# non-randn input
x_normal = torch.randn(1, 3)
wrapper = Linear(3, 3)
wrapper(x_normal)
nn_module_forward.assert_called_with(x_normal)
@pytest.mark.skipif(
digit_version(TORCH_VERSION) < digit_version('1.10'),
reason='MaxPool2d and MaxPool3d will fail fx for torch<=1.9')
def test_fx_compatibility():
from torch import fx
# ensure the fx trace can pass the network
for Net in (MaxPool2d, MaxPool3d):
net = Net(1)
gm_module = fx.symbolic_trace(net) # noqa: F841
for Net in (Linear, ):
net = Net(1, 1)
gm_module = fx.symbolic_trace(net) # noqa: F841
for Net in (Conv2d, ConvTranspose2d, Conv3d, ConvTranspose3d):
net = Net(1, 1, 1)
gm_module = fx.symbolic_trace(net) # noqa: F841
|
46de98eb4c03f2da3c878cfdc0cedda183df28d8
|
cadb6dceb7bb67ce47ef48b2c83f480a65d6b01a
|
/s3prl/problem/ssl/mockingjay.py
|
47c44a575b92b199ed07c7bb5e8186039fdcd8b9
|
[
"Apache-2.0",
"CC-BY-NC-4.0"
] |
permissive
|
s3prl/s3prl
|
52ec2ae4df5a61c786c122085603aa9c5e8c2681
|
76a9432b824f6ae3eae09a35a67782c4ed582832
|
refs/heads/main
| 2023-08-17T02:26:57.524087
| 2023-06-10T17:12:27
| 2023-06-10T17:12:27
| 196,905,457
| 1,549
| 398
|
Apache-2.0
| 2023-09-14T13:07:05
| 2019-07-15T01:54:52
|
Python
|
UTF-8
|
Python
| false
| false
| 6,333
|
py
|
mockingjay.py
|
import torch
from torch.nn import L1Loss
from s3prl.corpus.librispeech import librispeech_for_pretrain
from s3prl.dataset.pretrain_mockingjay_pipe import PretrainMockingjayPipe
from s3prl.nn.predictor_mockingjay import PredictorMockingjay
from s3prl.nn.transformer_mockingjay import TransformerMockingjay
from s3prl.sampler import FixedBatchSizeBatchSampler, MaxTimestampBatchSampler
from s3prl.task import Task
from s3prl.task.feat_reconstruction_task import FeatReconstructionTask
from s3prl.util.configuration import override_parent_cfg
from s3prl.util.workspace import Workspace
from .base import SslProblem
_input_size = 240
_mask_args = dict(
position_encoding_size=768, # int, this should be identical to `hidden_size`
mask_proportion=0.15, # float, mask this percentage of all spectrogram frames in each sequence at random during MAM training
mask_consecutive_min=7, # int, mask this amount of consecutive frames
mask_consecutive_max=7, # int, mask this amount of consecutive frames
mask_allow_overlap=True, # bool, allow overlap masking
mask_bucket_ratio=1.5, # float, only used when overlap is not allowed. sample a mask from each bucket in size of [sampled mask_consecutive * mask_bucket_ratio]
mask_frequency=0.0, # float, mask maximum this percentage of frequency bands, set to 0 for no frequency mask
)
_audio_config = dict(
kaldi={
"feat_type": "fbank",
"fbank": {
"frame_length": 25.0,
"frame_shift": 10.0,
"num_mel_bins": _input_size // 3, # because delta={"order": 2}
"use_log_fbank": True,
},
"mfcc": {"frame_length": 25.0, "frame_shift": 10.0, "num_ceps": 13},
"spectrogram": {"frame_length": 25.0, "frame_shift": 10.0},
},
delta={"order": 2, "win_length": 5},
cmvn={"use_cmvn": True},
)
pretrain_task_pipe_config = dict(
_cls=PretrainMockingjayPipe,
**_mask_args,
**_audio_config,
)
_transformer_config = dict(
hidden_size=768, # Size of the encoder layers and the pooler layer.
num_hidden_layers=3, # Number of hidden layers in the Transformer encoder.
num_attention_heads=12, # Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size=3072, # The size of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act="gelu", # The non-linear activation function (function or string) in the encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob=0.1, # The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob=0.1, # The dropout ratio for the attention probabilities.
initializer_range=0.02, # The sttdev of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps=1.0e-12, # The epsilon used by LayerNorm.
share_layer=False, # Share layer weights
pre_layer_norm=False, # To apply the pre layer normalization technique introduced in: https://arxiv.org/abs/2002.04745
)
class Mockingjay(SslProblem):
"""
Mockingjay pre-train problem
"""
@override_parent_cfg(
corpus=dict(
_cls=librispeech_for_pretrain,
dataset_root="???",
),
train_datapipe=pretrain_task_pipe_config,
train_sampler=dict(
_cls=MaxTimestampBatchSampler,
max_timestamp=16000 * 20,
shuffle=True,
),
valid_datapipe=pretrain_task_pipe_config,
valid_sampler=dict(
_cls=FixedBatchSizeBatchSampler,
batch_size=2,
),
test_datapipe=pretrain_task_pipe_config,
test_sampler=dict(
_cls=FixedBatchSizeBatchSampler,
batch_size=2,
),
upstream=dict(
_cls=TransformerMockingjay,
config=_transformer_config,
input_dim=_input_size,
output_attentions=False,
keep_multihead_output=False,
with_input_module=True,
),
predictor=dict(
_cls=PredictorMockingjay,
config=_transformer_config,
output_dim=_input_size,
input_dim=None, # automatically use `hidden_size` from `_transformer_config`
),
task=dict(
_cls=FeatReconstructionTask,
loss=L1Loss,
),
)
@classmethod
def setup_problem(cls, **cfg):
"""
This setups the Mockingjay problem, containing train/valid/test datasets & samplers and a task object
"""
super().setup_problem(**cfg)
@override_parent_cfg(
optimizer=dict(
_cls="torch.optim.AdamW",
lr=2.0e-4,
),
trainer=dict(
total_steps=1000000,
eval_step=50000,
save_step=50000,
gradient_clipping=5.0,
gradient_accumulate_steps=4,
valid_metric="loss",
valid_higher_better=False,
),
)
@classmethod
def train(cls, **cfg):
"""
Train the setup problem with the train/valid datasets & samplers and the task object
"""
super().train(**cfg)
@override_parent_cfg()
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@classmethod
def save_additional(
cls,
additional_dir: Workspace,
workspace: Workspace,
task: Task,
):
all_states = dict(
Config={}, # placeholder
SpecHead=task.predictor.state_dict(),
Transformer=task.upstream.state_dict(),
Upstream_Config=dict(
transformer=_transformer_config,
audio=_audio_config,
task=dict(sequence_length=0),
),
)
torch.save(
all_states, str(additional_dir.parent.resolve()) + "/all_states.ckpt"
)
@override_parent_cfg(
start_stage=0,
final_stage=2,
stage_0=dict(
_method="setup_problem",
),
stage_1=dict(
_method="train",
),
stage_2=dict(
_method="inference",
),
)
@classmethod
def run_stages(cls, **cfg):
super().run_stages(**cfg)
|
8a30e4efc37012b6047ea34192a96c83c990fe6c
|
a133a7c64f6e08def0f936898466990d1fd1b31f
|
/atomate/feff/firetasks/parse_outputs.py
|
301dc1126bb4cc1a3250f502a5982ed90f1838cf
|
[
"LicenseRef-scancode-hdf5",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] |
permissive
|
hackingmaterials/atomate
|
a6458f9323b8f14d7b4ebb6558fb578d50a3f1ed
|
f4060e55ae3a22289fde9516ff0e8e4ac1d22190
|
refs/heads/main
| 2023-08-07T21:53:24.701157
| 2023-07-25T22:28:06
| 2023-07-25T22:28:06
| 43,023,379
| 217
| 173
|
NOASSERTION
| 2023-08-25T22:09:48
| 2015-09-23T19:53:55
|
Python
|
UTF-8
|
Python
| false
| false
| 4,299
|
py
|
parse_outputs.py
|
import json
import os
from datetime import datetime
from glob import glob
import numpy as np
from fireworks import FiretaskBase, FWAction, explicit_serialize
from fireworks.user_objects.firetasks.filepad_tasks import get_fpad
from fireworks.utilities.fw_serializers import DATETIME_HANDLER
from pymatgen.io.feff.inputs import Atoms, Tags
from atomate.common.firetasks.glue_tasks import get_calc_loc
from atomate.feff.database import FeffCalcDb
from atomate.utils.utils import env_chk, get_logger
__author__ = "Kiran Mathew"
__email__ = "kmathew@lbl.gov"
logger = get_logger(__name__)
@explicit_serialize
class SpectrumToDbTask(FiretaskBase):
"""
Parse the output of absorption/core-loss spectrum calculations(xmu.dat, eels.dat) and insert it
into the database.
Required_params:
absorbing_atom (str): absorbing atom symbol
structure (Structure): input structure
spectrum_type (str): XANES, EXAFS, ELNES, EXELFS
output_file (str): the output file name. xmu.dat or eels.dat
Optional_params:
input_file (str): path to the feff input file.
calc_dir (str): path to dir (on current filesystem) that contains FEFF output files.
Default: use current working directory.
calc_loc (str OR bool): if True will set most recent calc_loc. If str search for the most
recent calc_loc with the matching name
db_file (str): path to the db file.
edge (str): absorption edge
metadata (dict): meta data
"""
required_params = ["absorbing_atom", "structure", "spectrum_type", "output_file"]
optional_params = [
"input_file",
"calc_dir",
"calc_loc",
"db_file",
"edge",
"metadata",
]
def run_task(self, fw_spec):
calc_dir = os.getcwd()
if "calc_dir" in self:
calc_dir = self["calc_dir"]
elif self.get("calc_loc"):
calc_dir = get_calc_loc(self["calc_loc"], fw_spec["calc_locs"])["path"]
logger.info(f"PARSING DIRECTORY: {calc_dir}")
db_file = env_chk(self.get("db_file"), fw_spec)
cluster_dict = None
tags = Tags.from_file(filename="feff.inp")
if "RECIPROCAL" not in tags:
cluster_dict = Atoms.cluster_from_file("feff.inp").as_dict()
doc = {
"input_parameters": tags.as_dict(),
"cluster": cluster_dict,
"structure": self["structure"].as_dict(),
"absorbing_atom": self["absorbing_atom"],
"spectrum_type": self["spectrum_type"],
"spectrum": np.loadtxt(
os.path.join(calc_dir, self["output_file"])
).tolist(),
"edge": self.get("edge", None),
"metadata": self.get("metadata", None),
"dir_name": os.path.abspath(os.getcwd()),
"last_updated": datetime.utcnow(),
}
if not db_file:
with open("feff_task.json", "w") as f:
f.write(json.dumps(doc, default=DATETIME_HANDLER))
else:
db = FeffCalcDb.from_db_file(db_file, admin=True)
db.insert(doc)
logger.info("Finished parsing the spectrum")
return FWAction(stored_data={"task_id": doc.get("task_id", None)})
@explicit_serialize
class AddPathsToFilepadTask(FiretaskBase):
"""
Insert the scattering amplitude outputs(all feffNNNN.dat files) to gridfs using filepad.
Optional_params:
labels (list): list of labels to tag the inserted files. Useful for querying later.
filepad_file (str): path to the filepad connection settings file.
compress (bool): Whether or not to compress the file contents before insertion.
metadata (dict): metadata.
"""
optional_params = ["labels", "filepad_file", "compress", "metadata"]
def run_task(self, fw_spec):
paths = glob("feff????.dat")
fpad = get_fpad(self.get("filepad_file", None))
labels = self.get("labels", None)
for i, p in enumerate(paths):
label = labels[i] if labels is not None else None
fpad.add_file(
p,
label=label,
metadata=self.get("metadata", None),
compress=self.get("compress", True),
)
|
c5ecdc82964c4838cbe09d11b5d4f2f139fa745b
|
50bfacc4354005439ce8f1614b38a08c48811625
|
/tests/debugpy/test_attach.py
|
7d635dbce90a8967a47e46c5b6ddd3b5d178e7d0
|
[
"MIT"
] |
permissive
|
microsoft/debugpy
|
ef8dd7d5382e4dab44cf73b0e2ac6c801a6f91d7
|
86d542e55986e1290b0b99eb2cac5bc961206bbe
|
refs/heads/main
| 2023-09-01T17:23:31.223770
| 2023-08-28T19:23:23
| 2023-08-28T20:14:39
| 234,187,391
| 1,384
| 124
|
NOASSERTION
| 2023-09-12T18:14:57
| 2020-01-15T22:38:20
|
Python
|
UTF-8
|
Python
| false
| false
| 7,979
|
py
|
test_attach.py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
import pytest
import sys
from tests import debug
from tests.debug import runners
from tests.patterns import some
@pytest.mark.parametrize("stop_method", ["breakpoint", "pause"])
@pytest.mark.parametrize("is_client_connected", ["is_client_connected", ""])
@pytest.mark.parametrize("wait_for_client", ["wait_for_client", ""])
def test_attach_api(pyfile, wait_for_client, is_client_connected, stop_method):
@pyfile
def code_to_debug():
import debuggee
import debugpy
import sys
import time
from debuggee import backchannel, scratchpad
# Test different ways of calling configure().
debugpy.configure(qt="none", subProcess=True, python=sys.executable)
debugpy.configure({"qt": "none", "subProcess": True, "python": sys.executable})
debugpy.configure({"qt": "none"}, python=sys.executable)
debuggee.setup()
_, host, port, wait_for_client, is_client_connected, stop_method = sys.argv
port = int(port)
debugpy.listen(address=(host, port))
if wait_for_client:
backchannel.send("wait_for_client")
debugpy.wait_for_client()
if is_client_connected:
backchannel.send("is_client_connected")
while not debugpy.is_client_connected():
print("looping until is_client_connected()")
time.sleep(0.1)
if stop_method == "breakpoint":
backchannel.send("breakpoint?")
assert backchannel.receive() == "proceed"
debugpy.breakpoint()
print("break") # @breakpoint
else:
scratchpad["paused"] = False
backchannel.send("loop?")
assert backchannel.receive() == "proceed"
while not scratchpad["paused"]:
print("looping until paused")
time.sleep(0.1)
with debug.Session() as session:
host, port = runners.attach_connect.host, runners.attach_connect.port
session.config.update({"connect": {"host": host, "port": port}})
backchannel = session.open_backchannel()
session.spawn_debuggee(
[
code_to_debug,
host,
port,
wait_for_client,
is_client_connected,
stop_method,
]
)
session.wait_for_adapter_socket()
session.connect_to_adapter((host, port))
with session.request_attach():
pass
if wait_for_client:
assert backchannel.receive() == "wait_for_client"
if is_client_connected:
assert backchannel.receive() == "is_client_connected"
if stop_method == "breakpoint":
assert backchannel.receive() == "breakpoint?"
backchannel.send("proceed")
session.wait_for_stop(
expected_frames=[some.dap.frame(code_to_debug, "breakpoint")]
)
elif stop_method == "pause":
assert backchannel.receive() == "loop?"
backchannel.send("proceed")
session.request("pause", freeze=False)
session.wait_for_stop("pause")
session.scratchpad["paused"] = True
else:
pytest.fail(stop_method)
session.request_continue()
@pytest.mark.parametrize("run", runners.all_attach_connect)
def test_reattach(pyfile, target, run):
@pyfile
def code_to_debug():
import time
import debuggee
import debugpy
from debuggee import scratchpad
debuggee.setup()
debugpy.breakpoint()
object() # @first
scratchpad["exit"] = False
while not scratchpad["exit"]:
time.sleep(0.1)
debugpy.breakpoint()
object() # @second
with debug.Session() as session1:
session1.captured_output = set()
session1.expected_exit_code = None # not expected to exit on disconnect
with run(session1, target(code_to_debug)):
pass
session1.wait_for_stop(expected_frames=[some.dap.frame(code_to_debug, "first")])
session1.disconnect()
with debug.Session() as session2:
session2.config.update(session1.config)
if "connect" in session2.config:
session2.connect_to_adapter(
(session2.config["connect"]["host"], session2.config["connect"]["port"])
)
with session2.request_attach():
pass
session2.wait_for_stop(
expected_frames=[some.dap.frame(code_to_debug, "second")]
)
session2.scratchpad["exit"] = True
session2.request_continue()
session1.wait_for_exit()
@pytest.mark.parametrize("pid_type", ["int", "str"])
@pytest.mark.skipif(
not sys.platform.startswith("linux"),
reason="https://github.com/microsoft/debugpy/issues/311",
)
def test_attach_pid_client(pyfile, target, pid_type):
@pyfile
def code_to_debug():
import debuggee
import time
debuggee.setup()
def do_something(i):
time.sleep(0.2)
proceed = True
print(i) # @bp
return proceed
for i in range(500):
if not do_something(i):
break
def before_request(command, arguments):
if command == "attach":
assert isinstance(arguments["processId"], int)
if pid_type == "str":
arguments["processId"] = str(arguments["processId"])
session1 = debug.Session()
session1.before_request = before_request
session1.config["redirectOutput"] = True
session1.captured_output = set()
session1.expected_exit_code = None # not expected to exit on disconnect
with session1.attach_pid(target(code_to_debug), wait=False):
session1.set_breakpoints(code_to_debug, all)
session1.wait_for_stop(expected_frames=[some.dap.frame(code_to_debug, "bp")])
pid = session1.config["processId"]
# Note: don't call session1.disconnect because it'd deadlock in channel.close()
# (because the fd is in a read() in a different thread, we can't call close() on it).
session1.request("disconnect")
session1.wait_for_terminated()
with debug.Session() as session2:
with session2.attach_pid(pid, wait=False):
session2.set_breakpoints(code_to_debug, all)
stop = session2.wait_for_stop(
expected_frames=[some.dap.frame(code_to_debug, "bp")]
)
# Remove breakpoint and continue.
session2.set_breakpoints(code_to_debug, [])
session2.request(
"setExpression",
{"frameId": stop.frame_id, "expression": "proceed", "value": "False"},
)
session2.scratchpad["exit"] = True
session2.request_continue()
def test_cancel_wait(pyfile):
@pyfile
def code_to_debug():
import debugpy
import sys
import threading
import time
from debuggee import backchannel
def cancel():
time.sleep(1)
debugpy.wait_for_client.cancel()
_, host, port = sys.argv
port = int(port)
debugpy.listen(address=(host, port))
threading.Thread(target=cancel).start()
debugpy.wait_for_client()
backchannel.send("exit")
with debug.Session() as session:
host, port = runners.attach_connect.host, runners.attach_connect.port
session.config.update({"connect": {"host": host, "port": port}})
session.expected_exit_code = None
backchannel = session.open_backchannel()
session.spawn_debuggee(
[
code_to_debug,
host,
port,
]
)
assert backchannel.receive() == "exit"
|
64593e291fe894fecca0520498feae05497265b0
|
fb1e852da0a026fb59c8cb24aeb40e62005501f1
|
/infoxlm/src-infoxlm/infoxlm/data/tlm_dataset.py
|
92429ed1206f245c8807c1cbe4fb496b05d7ad1f
|
[
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.1-or-later",
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"MIT"
] |
permissive
|
microsoft/unilm
|
134aa44867c5ed36222220d3f4fd9616d02db573
|
b60c741f746877293bb85eed6806736fc8fa0ffd
|
refs/heads/master
| 2023-08-31T04:09:05.779071
| 2023-08-29T14:07:57
| 2023-08-29T14:07:57
| 198,350,484
| 15,313
| 2,192
|
MIT
| 2023-08-19T11:33:20
| 2019-07-23T04:15:28
|
Python
|
UTF-8
|
Python
| false
| false
| 726
|
py
|
tlm_dataset.py
|
import torch
from fairseq.data import FairseqDataset
class TLMDataset(FairseqDataset):
def __init__(self, src_dataset, tgt_dataset, bos, eos):
assert len(src_dataset) == len(tgt_dataset)
self.src_dataset = src_dataset
self.tgt_dataset = tgt_dataset
self.bos = bos
self.eos = eos
self._sizes = src_dataset.sizes + tgt_dataset.sizes
def __len__(self):
return len(self.src_dataset)
@property
def sizes(self):
return self._sizes
def __getitem__(self, index):
src_item = self.src_dataset[index]
tgt_item = self.tgt_dataset[index]
return torch.cat([
src_item.new([self.bos]), src_item, src_item.new([self.eos]),
tgt_item, tgt_item.new([self.eos]),
])
|
bfe53c12b2586044c904925eadff961cd2641292
|
334174f296ec618d5813e028187461519583ae06
|
/network_controllers/dnac/device_list.py
|
1bfe9af2ee0ce4973622dc13e1047ee77559b8b8
|
[
"MIT"
] |
permissive
|
CiscoDevNet/netprog_basics
|
096713a564189c7b08da443537a8f2ea15effdef
|
ecdd5a2c884ac27fae4dcf469daed684ebf9abfa
|
refs/heads/master
| 2023-04-27T12:23:15.784293
| 2023-02-23T14:57:23
| 2023-02-23T14:57:23
| 98,338,085
| 738
| 456
|
MIT
| 2023-09-05T16:54:09
| 2017-07-25T18:33:33
|
Python
|
UTF-8
|
Python
| false
| false
| 2,286
|
py
|
device_list.py
|
#! /usr/bin/env python
"""
Learning Series: Network Programmability Basics
Module: Network Controllers
Lesson: Program your own DNA with DNA Center APIs
Author: Hank Preston <hapresto@cisco.com>
example1.py
Illustrate the following concepts:
- Building DNA Center API Code
- Start from Postman Auto-generated code
- Multiple requests in one script
"""
__author__ = "Hank Preston"
__author_email__ = "hapresto@cisco.com"
__copyright__ = "Copyright (c) 2016 Cisco Systems, Inc."
__license__ = "MIT"
from device_info import dnac
import requests
import json
import urllib3
# Silence the insecure warning due to SSL Certificate
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
headers = {
'content-type': "application/json",
'x-auth-token': ""
}
def dnac_login(host, username, password):
"""
Use the REST API to Log into an DNA Center and retrieve ticket
"""
url = "https://{}/api/system/v1/auth/token".format(host)
# Make Login request and return the response body
response = requests.request("POST", url,
auth = (username, password),
headers=headers, verify=False)
return response.json()["Token"]
def network_device_list(host, token):
"""
Use the REST API to retrieve the list of network devices
"""
url = "https://{}/api/v1/network-device".format(host)
headers["x-auth-token"] = token
# Make API request and return the response body
response = requests.request("GET", url, headers=headers, verify=False)
return response.json()["response"]
# Entry point for program
if __name__ == '__main__':
# Log into the DNA Center Controller to get Ticket
token = dnac_login(dnac["host"], dnac["username"], dnac["password"])
# Get the list of devices
devices = network_device_list(dnac["host"], token)
# Loop through the devices and print details
for device in devices:
print("{} in family {}".format(device["hostname"], device["family"]))
print(" Management IP: {}".format(device["managementIpAddress"]))
print(" Platform Type: {}".format(device["platformId"]))
print(" Software Version: {}".format(device["softwareVersion"]))
print("")
|
338175a7747f9ac93d17086704152ceb865515e4
|
4bcc9806152542ab43fc2cf47c499424f200896c
|
/tensorflow/python/keras/constraints.py
|
85a38e8dc850660cf24bb217a05d45e757dff6cc
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] |
permissive
|
tensorflow/tensorflow
|
906276dbafcc70a941026aa5dc50425ef71ee282
|
a7f3934a67900720af3d3b15389551483bee50b8
|
refs/heads/master
| 2023-08-25T04:24:41.611870
| 2023-08-25T04:06:24
| 2023-08-25T04:14:08
| 45,717,250
| 208,740
| 109,943
|
Apache-2.0
| 2023-09-14T20:55:50
| 2015-11-07T01:19:20
|
C++
|
UTF-8
|
Python
| false
| false
| 11,444
|
py
|
constraints.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
# pylint: disable=g-classes-have-attributes
"""Constraints: functions that impose constraints on weight values."""
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import backend
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import array_ops_stack
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import while_loop
from tensorflow.tools.docs import doc_controls
class Constraint:
"""Base class for weight constraints.
A `Constraint` instance works like a stateless function.
Users who subclass this
class should override the `__call__` method, which takes a single
weight parameter and return a projected version of that parameter
(e.g. normalized or clipped). Constraints can be used with various Keras
layers via the `kernel_constraint` or `bias_constraint` arguments.
Here's a simple example of a non-negative weight constraint:
>>> class NonNegative(tf.keras.constraints.Constraint):
...
... def __call__(self, w):
... return w * tf.cast(tf.math.greater_equal(w, 0.), w.dtype)
>>> weight = tf.constant((-1.0, 1.0))
>>> NonNegative()(weight)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([0., 1.], dtype=float32)>
>>> tf.keras.layers.Dense(4, kernel_constraint=NonNegative())
"""
def __call__(self, w):
"""Applies the constraint to the input weight variable.
By default, the inputs weight variable is not modified.
Users should override this method to implement their own projection
function.
Args:
w: Input weight variable.
Returns:
Projected variable (by default, returns unmodified inputs).
"""
return w
def get_config(self):
"""Returns a Python dict of the object config.
A constraint config is a Python dictionary (JSON-serializable) that can
be used to reinstantiate the same object.
Returns:
Python dict containing the configuration of the constraint object.
"""
return {}
class MaxNorm(Constraint):
"""MaxNorm weight constraint.
Constrains the weights incident to each hidden unit
to have a norm less than or equal to a desired value.
Also available via the shortcut function `tf.keras.constraints.max_norm`.
Args:
max_value: the maximum norm value for the incoming weights.
axis: integer, axis along which to calculate weight norms.
For instance, in a `Dense` layer the weight matrix
has shape `(input_dim, output_dim)`,
set `axis` to `0` to constrain each weight vector
of length `(input_dim,)`.
In a `Conv2D` layer with `data_format="channels_last"`,
the weight tensor has shape
`(rows, cols, input_depth, output_depth)`,
set `axis` to `[0, 1, 2]`
to constrain the weights of each filter tensor of size
`(rows, cols, input_depth)`.
"""
def __init__(self, max_value=2, axis=0):
self.max_value = max_value
self.axis = axis
@doc_controls.do_not_generate_docs
def __call__(self, w):
norms = backend.sqrt(
math_ops.reduce_sum(math_ops.square(w), axis=self.axis, keepdims=True))
desired = backend.clip(norms, 0, self.max_value)
return w * (desired / (backend.epsilon() + norms))
@doc_controls.do_not_generate_docs
def get_config(self):
return {'max_value': self.max_value, 'axis': self.axis}
class NonNeg(Constraint):
"""Constrains the weights to be non-negative.
Also available via the shortcut function `tf.keras.constraints.non_neg`.
"""
def __call__(self, w):
return w * math_ops.cast(math_ops.greater_equal(w, 0.), backend.floatx())
class UnitNorm(Constraint):
"""Constrains the weights incident to each hidden unit to have unit norm.
Also available via the shortcut function `tf.keras.constraints.unit_norm`.
Args:
axis: integer, axis along which to calculate weight norms.
For instance, in a `Dense` layer the weight matrix
has shape `(input_dim, output_dim)`,
set `axis` to `0` to constrain each weight vector
of length `(input_dim,)`.
In a `Conv2D` layer with `data_format="channels_last"`,
the weight tensor has shape
`(rows, cols, input_depth, output_depth)`,
set `axis` to `[0, 1, 2]`
to constrain the weights of each filter tensor of size
`(rows, cols, input_depth)`.
"""
def __init__(self, axis=0):
self.axis = axis
@doc_controls.do_not_generate_docs
def __call__(self, w):
return w / (
backend.epsilon() + backend.sqrt(
math_ops.reduce_sum(
math_ops.square(w), axis=self.axis, keepdims=True)))
@doc_controls.do_not_generate_docs
def get_config(self):
return {'axis': self.axis}
class MinMaxNorm(Constraint):
"""MinMaxNorm weight constraint.
Constrains the weights incident to each hidden unit
to have the norm between a lower bound and an upper bound.
Also available via the shortcut function `tf.keras.constraints.min_max_norm`.
Args:
min_value: the minimum norm for the incoming weights.
max_value: the maximum norm for the incoming weights.
rate: rate for enforcing the constraint: weights will be
rescaled to yield
`(1 - rate) * norm + rate * norm.clip(min_value, max_value)`.
Effectively, this means that rate=1.0 stands for strict
enforcement of the constraint, while rate<1.0 means that
weights will be rescaled at each step to slowly move
towards a value inside the desired interval.
axis: integer, axis along which to calculate weight norms.
For instance, in a `Dense` layer the weight matrix
has shape `(input_dim, output_dim)`,
set `axis` to `0` to constrain each weight vector
of length `(input_dim,)`.
In a `Conv2D` layer with `data_format="channels_last"`,
the weight tensor has shape
`(rows, cols, input_depth, output_depth)`,
set `axis` to `[0, 1, 2]`
to constrain the weights of each filter tensor of size
`(rows, cols, input_depth)`.
"""
def __init__(self, min_value=0.0, max_value=1.0, rate=1.0, axis=0):
self.min_value = min_value
self.max_value = max_value
self.rate = rate
self.axis = axis
@doc_controls.do_not_generate_docs
def __call__(self, w):
norms = backend.sqrt(
math_ops.reduce_sum(math_ops.square(w), axis=self.axis, keepdims=True))
desired = (
self.rate * backend.clip(norms, self.min_value, self.max_value) +
(1 - self.rate) * norms)
return w * (desired / (backend.epsilon() + norms))
@doc_controls.do_not_generate_docs
def get_config(self):
return {
'min_value': self.min_value,
'max_value': self.max_value,
'rate': self.rate,
'axis': self.axis
}
class RadialConstraint(Constraint):
"""Constrains `Conv2D` kernel weights to be the same for each radius.
Also available via the shortcut function
`tf.keras.constraints.radial_constraint`.
For example, the desired output for the following 4-by-4 kernel:
```
kernel = [[v_00, v_01, v_02, v_03],
[v_10, v_11, v_12, v_13],
[v_20, v_21, v_22, v_23],
[v_30, v_31, v_32, v_33]]
```
is this::
```
kernel = [[v_11, v_11, v_11, v_11],
[v_11, v_33, v_33, v_11],
[v_11, v_33, v_33, v_11],
[v_11, v_11, v_11, v_11]]
```
This constraint can be applied to any `Conv2D` layer version, including
`Conv2DTranspose` and `SeparableConv2D`, and with either `"channels_last"` or
`"channels_first"` data format. The method assumes the weight tensor is of
shape `(rows, cols, input_depth, output_depth)`.
"""
@doc_controls.do_not_generate_docs
def __call__(self, w):
w_shape = w.shape
if w_shape.rank is None or w_shape.rank != 4:
raise ValueError(
'The weight tensor must be of rank 4, but is of shape: %s' % w_shape)
height, width, channels, kernels = w_shape
w = backend.reshape(w, (height, width, channels * kernels))
# TODO(cpeter): Switch map_fn for a faster tf.vectorized_map once
# backend.switch is supported.
w = backend.map_fn(
self._kernel_constraint,
backend.stack(array_ops_stack.unstack(w, axis=-1), axis=0))
return backend.reshape(
backend.stack(array_ops_stack.unstack(w, axis=0), axis=-1),
(height, width, channels, kernels))
def _kernel_constraint(self, kernel):
"""Radially constraints a kernel with shape (height, width, channels)."""
padding = backend.constant([[1, 1], [1, 1]], dtype='int32')
kernel_shape = backend.shape(kernel)[0]
start = backend.cast(kernel_shape / 2, 'int32')
kernel_new = backend.switch(
backend.cast(math_ops.floormod(kernel_shape, 2), 'bool'),
lambda: kernel[start - 1:start, start - 1:start],
lambda: kernel[start - 1:start, start - 1:start] + backend.zeros( # pylint: disable=g-long-lambda
(2, 2), dtype=kernel.dtype))
index = backend.switch(
backend.cast(math_ops.floormod(kernel_shape, 2), 'bool'),
lambda: backend.constant(0, dtype='int32'),
lambda: backend.constant(1, dtype='int32'))
while_condition = lambda index, *args: backend.less(index, start)
def body_fn(i, array):
return i + 1, array_ops.pad(
array,
padding,
constant_values=kernel[start + i, start + i])
_, kernel_new = while_loop.while_loop(
while_condition,
body_fn, [index, kernel_new],
shape_invariants=[
index.get_shape(),
tensor_shape.TensorShape([None, None])
])
return kernel_new
# Aliases.
max_norm = MaxNorm
non_neg = NonNeg
unit_norm = UnitNorm
min_max_norm = MinMaxNorm
radial_constraint = RadialConstraint
# Legacy aliases.
maxnorm = max_norm
nonneg = non_neg
unitnorm = unit_norm
def serialize(constraint):
return serialize_keras_object(constraint)
def deserialize(config, custom_objects=None):
return deserialize_keras_object(
config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='constraint')
def get(identifier):
if identifier is None:
return None
if isinstance(identifier, dict):
return deserialize(identifier)
elif isinstance(identifier, str):
config = {'class_name': str(identifier), 'config': {}}
return deserialize(config)
elif callable(identifier):
return identifier
else:
raise ValueError('Could not interpret constraint identifier: ' +
str(identifier))
|
f717c5407a6098319a9cab05c73f0340d8941758
|
0ca218c0f54dac33a2ade4accfdf8f5be3207588
|
/examples/sharding/separate_databases.py
|
f836aaec00af67fe5213e5094120f78044437e1d
|
[
"MIT"
] |
permissive
|
sqlalchemy/sqlalchemy
|
9d949c67c9b5396b1f33e7ff0f3230c81babf5be
|
b382bff6e3464f039db0fd1f2ce1b79038675e48
|
refs/heads/main
| 2023-08-31T17:40:59.565421
| 2023-08-30T15:01:41
| 2023-08-30T15:01:41
| 159,271,175
| 8,083
| 1,489
|
MIT
| 2023-09-12T18:53:55
| 2018-11-27T03:35:03
|
Python
|
UTF-8
|
Python
| false
| false
| 10,311
|
py
|
separate_databases.py
|
"""Illustrates sharding using distinct SQLite databases."""
from __future__ import annotations
import datetime
from sqlalchemy import Column
from sqlalchemy import create_engine
from sqlalchemy import ForeignKey
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import select
from sqlalchemy import Table
from sqlalchemy.ext.horizontal_shard import set_shard_id
from sqlalchemy.ext.horizontal_shard import ShardedSession
from sqlalchemy.orm import DeclarativeBase
from sqlalchemy.orm import Mapped
from sqlalchemy.orm import mapped_column
from sqlalchemy.orm import relationship
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import operators
from sqlalchemy.sql import visitors
echo = True
db1 = create_engine("sqlite://", echo=echo)
db2 = create_engine("sqlite://", echo=echo)
db3 = create_engine("sqlite://", echo=echo)
db4 = create_engine("sqlite://", echo=echo)
# create session function. this binds the shard ids
# to databases within a ShardedSession and returns it.
Session = sessionmaker(
class_=ShardedSession,
shards={
"north_america": db1,
"asia": db2,
"europe": db3,
"south_america": db4,
},
)
# mappings and tables
class Base(DeclarativeBase):
pass
# we need a way to create identifiers which are unique across all databases.
# one easy way would be to just use a composite primary key, where one value
# is the shard id. but here, we'll show something more "generic", an id
# generation function. we'll use a simplistic "id table" stored in database
# #1. Any other method will do just as well; UUID, hilo, application-specific,
# etc.
ids = Table("ids", Base.metadata, Column("nextid", Integer, nullable=False))
def id_generator(ctx):
# in reality, might want to use a separate transaction for this.
with db1.begin() as conn:
nextid = conn.scalar(ids.select().with_for_update())
conn.execute(ids.update().values({ids.c.nextid: ids.c.nextid + 1}))
return nextid
# table setup. we'll store a lead table of continents/cities, and a secondary
# table storing locations. a particular row will be placed in the database
# whose shard id corresponds to the 'continent'. in this setup, secondary rows
# in 'weather_reports' will be placed in the same DB as that of the parent, but
# this can be changed if you're willing to write more complex sharding
# functions.
class WeatherLocation(Base):
__tablename__ = "weather_locations"
id: Mapped[int] = mapped_column(primary_key=True, default=id_generator)
continent: Mapped[str]
city: Mapped[str]
reports: Mapped[list[Report]] = relationship(back_populates="location")
def __init__(self, continent: str, city: str):
self.continent = continent
self.city = city
class Report(Base):
__tablename__ = "weather_reports"
id: Mapped[int] = mapped_column(primary_key=True)
location_id: Mapped[int] = mapped_column(
ForeignKey("weather_locations.id")
)
temperature: Mapped[float]
report_time: Mapped[datetime.datetime] = mapped_column(
default=datetime.datetime.now
)
location: Mapped[WeatherLocation] = relationship(back_populates="reports")
def __init__(self, temperature: float):
self.temperature = temperature
# define sharding functions.
# we'll use a straight mapping of a particular set of "country"
# attributes to shard id.
shard_lookup = {
"North America": "north_america",
"Asia": "asia",
"Europe": "europe",
"South America": "south_america",
}
def shard_chooser(mapper, instance, clause=None):
"""shard chooser.
looks at the given instance and returns a shard id
note that we need to define conditions for
the WeatherLocation class, as well as our secondary Report class which will
point back to its WeatherLocation via its 'location' attribute.
"""
if isinstance(instance, WeatherLocation):
return shard_lookup[instance.continent]
else:
return shard_chooser(mapper, instance.location)
def identity_chooser(mapper, primary_key, *, lazy_loaded_from, **kw):
"""identity chooser.
given a primary key, returns a list of shards
to search. here, we don't have any particular information from a
pk so we just return all shard ids. often, you'd want to do some
kind of round-robin strategy here so that requests are evenly
distributed among DBs.
"""
if lazy_loaded_from:
# if we are in a lazy load, we can look at the parent object
# and limit our search to that same shard, assuming that's how we've
# set things up.
return [lazy_loaded_from.identity_token]
else:
return ["north_america", "asia", "europe", "south_america"]
def execute_chooser(context):
"""statement execution chooser.
this also returns a list of shard ids, which can just be all of them. but
here we'll search into the execution context in order to try to narrow down
the list of shards to SELECT.
"""
ids = []
# we'll grab continent names as we find them
# and convert to shard ids
for column, operator, value in _get_select_comparisons(context.statement):
# "shares_lineage()" returns True if both columns refer to the same
# statement column, adjusting for any annotations present.
# (an annotation is an internal clone of a Column object
# and occur when using ORM-mapped attributes like
# "WeatherLocation.continent"). A simpler comparison, though less
# accurate, would be "column.key == 'continent'".
if column.shares_lineage(WeatherLocation.__table__.c.continent):
if operator == operators.eq:
ids.append(shard_lookup[value])
elif operator == operators.in_op:
ids.extend(shard_lookup[v] for v in value)
if len(ids) == 0:
return ["north_america", "asia", "europe", "south_america"]
else:
return ids
def _get_select_comparisons(statement):
"""Search a Select or Query object for binary expressions.
Returns expressions which match a Column against one or more
literal values as a list of tuples of the form
(column, operator, values). "values" is a single value
or tuple of values depending on the operator.
"""
binds = {}
clauses = set()
comparisons = []
def visit_bindparam(bind):
# visit a bind parameter.
value = bind.effective_value
binds[bind] = value
def visit_column(column):
clauses.add(column)
def visit_binary(binary):
if binary.left in clauses and binary.right in binds:
comparisons.append(
(binary.left, binary.operator, binds[binary.right])
)
elif binary.left in binds and binary.right in clauses:
comparisons.append(
(binary.right, binary.operator, binds[binary.left])
)
# here we will traverse through the query's criterion, searching
# for SQL constructs. We will place simple column comparisons
# into a list.
if statement.whereclause is not None:
visitors.traverse(
statement.whereclause,
{},
{
"bindparam": visit_bindparam,
"binary": visit_binary,
"column": visit_column,
},
)
return comparisons
# further configure create_session to use these functions
Session.configure(
shard_chooser=shard_chooser,
identity_chooser=identity_chooser,
execute_chooser=execute_chooser,
)
def setup():
# create tables
for db in (db1, db2, db3, db4):
Base.metadata.create_all(db)
# establish initial "id" in db1
with db1.begin() as conn:
conn.execute(ids.insert(), {"nextid": 1})
def main():
setup()
# save and load objects!
tokyo = WeatherLocation("Asia", "Tokyo")
newyork = WeatherLocation("North America", "New York")
toronto = WeatherLocation("North America", "Toronto")
london = WeatherLocation("Europe", "London")
dublin = WeatherLocation("Europe", "Dublin")
brasilia = WeatherLocation("South America", "Brasila")
quito = WeatherLocation("South America", "Quito")
tokyo.reports.append(Report(80.0))
newyork.reports.append(Report(75))
quito.reports.append(Report(85))
with Session() as sess:
sess.add_all(
[tokyo, newyork, toronto, london, dublin, brasilia, quito]
)
sess.commit()
t = sess.get(WeatherLocation, tokyo.id)
assert t.city == tokyo.city
assert t.reports[0].temperature == 80.0
# select across shards
asia_and_europe = sess.execute(
select(WeatherLocation).filter(
WeatherLocation.continent.in_(["Europe", "Asia"])
)
).scalars()
assert {c.city for c in asia_and_europe} == {
"Tokyo",
"London",
"Dublin",
}
# optionally set a shard id for the query and all related loaders
north_american_cities_w_t = sess.execute(
select(WeatherLocation)
.filter(WeatherLocation.city.startswith("T"))
.options(set_shard_id("north_america"))
).scalars()
# Tokyo not included since not in the north_america shard
assert {c.city for c in north_american_cities_w_t} == {
"Toronto",
}
# the Report class uses a simple integer primary key. So across two
# databases, a primary key will be repeated. The "identity_token"
# tracks in memory that these two identical primary keys are local to
# different shards.
newyork_report = newyork.reports[0]
tokyo_report = tokyo.reports[0]
assert inspect(newyork_report).identity_key == (
Report,
(1,),
"north_america",
)
assert inspect(tokyo_report).identity_key == (Report, (1,), "asia")
# the token representing the originating shard is also available
# directly
assert inspect(newyork_report).identity_token == "north_america"
assert inspect(tokyo_report).identity_token == "asia"
if __name__ == "__main__":
main()
|
61423f352833862c9ac7a33254717e2d023cd0f0
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/helpers/typeshed/stubs/braintree/braintree/webhook_notification_gateway.pyi
|
9f26304aa798ec4820553f3c24dac5db52fcee47
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 636
|
pyi
|
webhook_notification_gateway.pyi
|
from typing import Any
from braintree.exceptions.invalid_challenge_error import InvalidChallengeError as InvalidChallengeError
from braintree.exceptions.invalid_signature_error import InvalidSignatureError as InvalidSignatureError
from braintree.util.crypto import Crypto as Crypto
from braintree.util.xml_util import XmlUtil as XmlUtil
from braintree.webhook_notification import WebhookNotification as WebhookNotification
text_type = str
class WebhookNotificationGateway:
gateway: Any
config: Any
def __init__(self, gateway) -> None: ...
def parse(self, signature, payload): ...
def verify(self, challenge): ...
|
69a4a85ce333b5cef65cd0930fed937231901954
|
35b6013c1943f37d1428afd2663c8aba0a02628d
|
/securitycenter/snippets/snippets_notification_receiver.py
|
7c6fae680b98ccf5c08a3591868a9594a7fb447c
|
[
"Apache-2.0"
] |
permissive
|
GoogleCloudPlatform/python-docs-samples
|
d2a251805fbeab15d76ed995cf200727f63f887d
|
44e819e713c3885e38c99c16dc73b7d7478acfe8
|
refs/heads/main
| 2023-08-28T12:52:01.712293
| 2023-08-28T11:18:28
| 2023-08-28T11:18:28
| 35,065,876
| 7,035
| 7,593
|
Apache-2.0
| 2023-09-14T20:20:56
| 2015-05-04T23:26:13
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,110
|
py
|
snippets_notification_receiver.py
|
#!/usr/bin/env python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Demo for receiving notifications."""
def receive_notifications(project_id, subscription_name):
# [START securitycenter_receive_notifications]
# Requires https://cloud.google.com/pubsub/docs/quickstart-client-libraries#pubsub-client-libraries-python
import concurrent
from google.cloud import pubsub_v1
from google.cloud.securitycenter_v1 import NotificationMessage
# TODO: project_id = "your-project-id"
# TODO: subscription_name = "your-subscription-name"
def callback(message):
# Print the data received for debugging purpose if needed
print(f"Received message: {message.data}")
notification_msg = NotificationMessage.from_json(message.data)
print(
"Notification config name: {}".format(
notification_msg.notification_config_name
)
)
print(f"Finding: {notification_msg.finding}")
# Ack the message to prevent it from being pulled again
message.ack()
subscriber = pubsub_v1.SubscriberClient()
subscription_path = subscriber.subscription_path(project_id, subscription_name)
streaming_pull_future = subscriber.subscribe(subscription_path, callback=callback)
print(f"Listening for messages on {subscription_path}...\n")
try:
streaming_pull_future.result(timeout=1) # Block for 1 second
except concurrent.futures.TimeoutError:
streaming_pull_future.cancel()
# [END securitycenter_receive_notifications]
return True
|
37513edaecf74ffcb3a6af9be16976d3c9f9ab03
|
88ae8695987ada722184307301e221e1ba3cc2fa
|
/native_client/pnacl/driver/pnacl-nm.py
|
cb5e0357a6d5e1c56fa03d6725c31ce1b4622a53
|
[
"BSD-3-Clause",
"Zlib",
"Classpath-exception-2.0",
"BSD-Source-Code",
"LZMA-exception",
"LicenseRef-scancode-unicode",
"LGPL-3.0-only",
"LGPL-2.0-or-later",
"LicenseRef-scancode-philippe-de-muyter",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-intel-osl-1993",
"HPND-sell-variant",
"ICU",
"LicenseRef-scancode-protobuf",
"bzip2-1.0.6",
"Spencer-94",
"NCSA",
"LicenseRef-scancode-nilsson-historical",
"CC0-1.0",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.1-only",
"LicenseRef-scancode-other-copyleft",
"GPL-2.0-or-later",
"NTP",
"GPL-2.0-only",
"LicenseRef-scancode-other-permissive",
"GPL-3.0-only",
"GFDL-1.1-only",
"W3C",
"LicenseRef-scancode-python-cwi",
"GCC-exception-3.1",
"BSL-1.0",
"Python-2.0",
"GPL-1.0-or-later",
"LGPL-2.1-or-later",
"LicenseRef-scancode-unknown-license-reference",
"CPL-1.0",
"GFDL-1.1-or-later",
"W3C-19980720",
"LGPL-2.0-only",
"LicenseRef-scancode-amd-historical",
"LicenseRef-scancode-ietf",
"SAX-PD",
"LicenseRef-scancode-x11-hanson",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"dtoa",
"MIT",
"LicenseRef-scancode-public-domain-disclaimer",
"PSF-2.0",
"LicenseRef-scancode-newlib-historical",
"LicenseRef-scancode-generic-exception",
"SMLNJ",
"HP-1986",
"LicenseRef-scancode-free-unknown",
"SunPro",
"MPL-1.1"
] |
permissive
|
iridium-browser/iridium-browser
|
71d9c5ff76e014e6900b825f67389ab0ccd01329
|
5ee297f53dc7f8e70183031cff62f37b0f19d25f
|
refs/heads/master
| 2023-08-03T16:44:16.844552
| 2023-07-20T15:17:00
| 2023-07-23T16:09:30
| 220,016,632
| 341
| 40
|
BSD-3-Clause
| 2021-08-13T13:54:45
| 2019-11-06T14:32:31
| null |
UTF-8
|
Python
| false
| false
| 1,710
|
py
|
pnacl-nm.py
|
#!/usr/bin/python3
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import driver_tools
from driver_env import env
from driver_log import Log
import filetype
EXTRA_ENV = {
'TOOLNAME': '',
'INPUTS': '',
'FLAGS': '',
}
PATTERNS = [
( '(-.*)', "env.append('FLAGS', $0)"),
( '(.*)', "env.append('INPUTS', pathtools.normalize($0))"),
]
def main(argv):
env.update(EXTRA_ENV)
driver_tools.ParseArgs(argv, PATTERNS)
inputs = env.get('INPUTS')
if len(inputs) == 0:
Log.Fatal("No input files given")
for infile in inputs:
driver_tools.CheckPathLength(infile)
env.push()
env.set('input', infile)
# For frozen PNaCl bitcode, use 'llvm-nm -bitcode-format=pnacl'. For all
# other formats, use the binutils nm with our gold plugin.
# Update: llvm-nm -bitcode-format=pnacl is currently disabled.
if filetype.IsPNaClBitcode(infile):
Log.Fatal(
'nm on finalized bitcode is currently disabled.\n'
'See: https://code.google.com/p/nativeclient/issues/detail?id=3993')
else:
env.set('TOOLNAME', '${NM}')
env.append('FLAGS', '--plugin=${GOLD_PLUGIN_SO}')
driver_tools.Run('"${TOOLNAME}" ${FLAGS} ${input}')
env.pop()
# only reached in case of no errors
return 0
def get_help(unused_argv):
return """
Usage: %s [option(s)] [file(s)]
List symbols in [file(s)].
* For stable PNaCl bitcode files, this calls the llvm-nm tool.
* For all other files, this calls the standard nm from binutils - please see
that tool's help pages for options.
""" % env.getone('SCRIPT_NAME')
|
5841b94d2ee3c37ac44d2c485a047f8d766de9b1
|
54292bb222c6525217458e92ddacfc4e2635b83e
|
/python/phonenumbers/shortdata/region_RO.py
|
37c90cfba979b7bdaca3722e62240eeff474e71c
|
[
"Apache-2.0"
] |
permissive
|
daviddrysdale/python-phonenumbers
|
0d69b48033d1464c0a6c358274062f1db2ee8c4a
|
2f06ef6db2ca83f3856fbb8019a0c665f5971b13
|
refs/heads/dev
| 2023-08-31T09:37:20.570690
| 2023-08-22T05:18:22
| 2023-08-22T05:18:22
| 1,643,611
| 2,944
| 406
|
Apache-2.0
| 2023-08-08T06:49:07
| 2011-04-21T03:06:38
|
Python
|
UTF-8
|
Python
| false
| false
| 1,081
|
py
|
region_RO.py
|
"""Auto-generated file, do not edit by hand. RO metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_RO = PhoneMetadata(id='RO', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='[18]\\d{2,5}', possible_length=(3, 4, 5, 6)),
toll_free=PhoneNumberDesc(national_number_pattern='11(?:2|6\\d{3})', example_number='112', possible_length=(3, 6)),
premium_rate=PhoneNumberDesc(national_number_pattern='(?:1(?:18[39]|[24])|8[48])\\d\\d', example_number='1200', possible_length=(4, 6)),
emergency=PhoneNumberDesc(national_number_pattern='112', example_number='112', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='1(?:1(?:2|6(?:000|1(?:11|23))|8(?:(?:01|8[18])1|119|[23]00|932))|[24]\\d\\d|9(?:0(?:00|19)|1[19]|21|3[02]|5[178]))|8[48]\\d\\d', example_number='112', possible_length=(3, 4, 5, 6)),
sms_services=PhoneNumberDesc(national_number_pattern='(?:1[24]|8[48])\\d\\d', example_number='1200', possible_length=(4,)),
short_data=True)
|
55053166569e1ea6f768e1d839a3e3e5de4c46c7
|
4805a71711625735215cc1a773a85712be305b59
|
/Tools/site_scons/site_tools/pyext.py
|
003d2deb469c2d02ebd4eca64bb37675af402bc4
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
cython/cython
|
0a75b75b7eaf19eeedaaebca9d49adb603e3e8f5
|
6ba3daf319d94058de74e8e7f53f932092b38441
|
refs/heads/master
| 2023-09-04T11:09:56.569277
| 2023-09-04T07:45:47
| 2023-09-04T07:45:47
| 1,099,265
| 8,352
| 1,704
|
Apache-2.0
| 2023-09-14T06:33:34
| 2010-11-21T07:44:20
|
Python
|
UTF-8
|
Python
| false
| false
| 9,226
|
py
|
pyext.py
|
"""SCons.Tool.pyext
Tool-specific initialization for python extensions builder.
AUTHORS:
- David Cournapeau
- Dag Sverre Seljebotn
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import sys
import SCons
from SCons.Tool import SourceFileScanner, ProgramScanner
# Create common python builders
def createPythonObjectBuilder(env):
"""This is a utility function that creates the PythonObject Builder in an
Environment if it is not there already.
If it is already there, we return the existing one.
"""
try:
pyobj = env['BUILDERS']['PythonObject']
except KeyError:
pyobj = SCons.Builder.Builder(action = {},
emitter = {},
prefix = '$PYEXTOBJPREFIX',
suffix = '$PYEXTOBJSUFFIX',
src_builder = ['CFile', 'CXXFile'],
source_scanner = SourceFileScanner,
single_source = 1)
env['BUILDERS']['PythonObject'] = pyobj
return pyobj
def createPythonExtensionBuilder(env):
"""This is a utility function that creates the PythonExtension Builder in
an Environment if it is not there already.
If it is already there, we return the existing one.
"""
try:
pyext = env['BUILDERS']['PythonExtension']
except KeyError:
import SCons.Action
import SCons.Defaults
action = SCons.Action.Action("$PYEXTLINKCOM", "$PYEXTLINKCOMSTR")
action_list = [ SCons.Defaults.SharedCheck,
action]
pyext = SCons.Builder.Builder(action = action_list,
emitter = "$SHLIBEMITTER",
prefix = '$PYEXTPREFIX',
suffix = '$PYEXTSUFFIX',
target_scanner = ProgramScanner,
src_suffix = '$PYEXTOBJSUFFIX',
src_builder = 'PythonObject')
env['BUILDERS']['PythonExtension'] = pyext
return pyext
def pyext_coms(platform):
"""Return PYEXTCCCOM, PYEXTCXXCOM and PYEXTLINKCOM for the given
platform."""
if platform == 'win32':
pyext_cccom = "$PYEXTCC /Fo$TARGET /c $PYEXTCCSHARED "\
"$PYEXTCFLAGS $PYEXTCCFLAGS $_CCCOMCOM "\
"$_PYEXTCPPINCFLAGS $SOURCES"
pyext_cxxcom = "$PYEXTCXX /Fo$TARGET /c $PYEXTCSHARED "\
"$PYEXTCXXFLAGS $PYEXTCCFLAGS $_CCCOMCOM "\
"$_PYEXTCPPINCFLAGS $SOURCES"
pyext_linkcom = '${TEMPFILE("$PYEXTLINK $PYEXTLINKFLAGS '\
'/OUT:$TARGET.windows $( $_LIBDIRFLAGS $) '\
'$_LIBFLAGS $_PYEXTRUNTIME $SOURCES.windows")}'
else:
pyext_cccom = "$PYEXTCC -o $TARGET -c $PYEXTCCSHARED "\
"$PYEXTCFLAGS $PYEXTCCFLAGS $_CCCOMCOM "\
"$_PYEXTCPPINCFLAGS $SOURCES"
pyext_cxxcom = "$PYEXTCXX -o $TARGET -c $PYEXTCSHARED "\
"$PYEXTCXXFLAGS $PYEXTCCFLAGS $_CCCOMCOM "\
"$_PYEXTCPPINCFLAGS $SOURCES"
pyext_linkcom = "$PYEXTLINK -o $TARGET $PYEXTLINKFLAGS "\
"$SOURCES $_LIBDIRFLAGS $_LIBFLAGS $_PYEXTRUNTIME"
if platform == 'darwin':
pyext_linkcom += ' $_FRAMEWORKPATH $_FRAMEWORKS $FRAMEWORKSFLAGS'
return pyext_cccom, pyext_cxxcom, pyext_linkcom
def set_basic_vars(env):
# Set construction variables which are independent on whether we are using
# distutils or not.
env['PYEXTCPPPATH'] = SCons.Util.CLVar('$PYEXTINCPATH')
env['_PYEXTCPPINCFLAGS'] = '$( ${_concat(INCPREFIX, PYEXTCPPPATH, '\
'INCSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)'
env['PYEXTOBJSUFFIX'] = '$SHOBJSUFFIX'
env['PYEXTOBJPREFIX'] = '$SHOBJPREFIX'
env['PYEXTRUNTIME'] = SCons.Util.CLVar("")
# XXX: this should be handled with different flags
env['_PYEXTRUNTIME'] = '$( ${_concat(LIBLINKPREFIX, PYEXTRUNTIME, '\
'LIBLINKSUFFIX, __env__)} $)'
# XXX: This won't work in all cases (using mingw, for example). To make
# this work, we need to know whether PYEXTCC accepts /c and /Fo or -c -o.
# This is difficult with the current way tools work in scons.
pycc, pycxx, pylink = pyext_coms(sys.platform)
env['PYEXTLINKFLAGSEND'] = SCons.Util.CLVar('$LINKFLAGSEND')
env['PYEXTCCCOM'] = pycc
env['PYEXTCXXCOM'] = pycxx
env['PYEXTLINKCOM'] = pylink
def _set_configuration_nodistutils(env):
# Set env variables to sensible values when not using distutils
def_cfg = {'PYEXTCC' : '$SHCC',
'PYEXTCFLAGS' : '$SHCFLAGS',
'PYEXTCCFLAGS' : '$SHCCFLAGS',
'PYEXTCXX' : '$SHCXX',
'PYEXTCXXFLAGS' : '$SHCXXFLAGS',
'PYEXTLINK' : '$LDMODULE',
'PYEXTSUFFIX' : '$LDMODULESUFFIX',
'PYEXTPREFIX' : ''}
if sys.platform == 'darwin':
def_cfg['PYEXTSUFFIX'] = '.so'
for k, v in def_cfg.items():
ifnotset(env, k, v)
ifnotset(env, 'PYEXT_ALLOW_UNDEFINED',
SCons.Util.CLVar('$ALLOW_UNDEFINED'))
ifnotset(env, 'PYEXTLINKFLAGS', SCons.Util.CLVar('$LDMODULEFLAGS'))
env.AppendUnique(PYEXTLINKFLAGS = env['PYEXT_ALLOW_UNDEFINED'])
def ifnotset(env, name, value):
if name not in env:
env[name] = value
def set_configuration(env, use_distutils):
"""Set construction variables which are platform dependants.
If use_distutils == True, use distutils configuration. Otherwise, use
'sensible' default.
Any variable already defined is untouched."""
# We define commands as strings so that we can either execute them using
# eval (same python for scons and distutils) or by executing them through
# the shell.
dist_cfg = {'PYEXTCC': ("sysconfig.get_config_var('CC')", False),
'PYEXTCFLAGS': ("sysconfig.get_config_var('CFLAGS')", True),
'PYEXTCCSHARED': ("sysconfig.get_config_var('CCSHARED')", False),
'PYEXTLINKFLAGS': ("sysconfig.get_config_var('LDFLAGS')", True),
'PYEXTLINK': ("sysconfig.get_config_var('LDSHARED')", False),
'PYEXTINCPATH': ("sysconfig.get_python_inc()", False),
'PYEXTSUFFIX': ("sysconfig.get_config_var('SO')", False)}
from distutils import sysconfig
# We set the python path even when not using distutils, because we rarely
# want to change this, even if not using distutils
ifnotset(env, 'PYEXTINCPATH', sysconfig.get_python_inc())
if use_distutils:
for k, (v, should_split) in dist_cfg.items():
val = eval(v)
if should_split:
val = val.split()
ifnotset(env, k, val)
else:
_set_configuration_nodistutils(env)
def generate(env):
"""Add Builders and construction variables for python extensions to an
Environment."""
if 'PYEXT_USE_DISTUTILS' not in env:
env['PYEXT_USE_DISTUTILS'] = False
# This sets all constructions variables used for pyext builders.
set_basic_vars(env)
set_configuration(env, env['PYEXT_USE_DISTUTILS'])
# Create the PythonObject builder
pyobj = createPythonObjectBuilder(env)
action = SCons.Action.Action("$PYEXTCCCOM", "$PYEXTCCCOMSTR")
pyobj.add_emitter('.c', SCons.Defaults.SharedObjectEmitter)
pyobj.add_action('.c', action)
action = SCons.Action.Action("$PYEXTCXXCOM", "$PYEXTCXXCOMSTR")
pyobj.add_emitter('$CXXFILESUFFIX', SCons.Defaults.SharedObjectEmitter)
pyobj.add_action('$CXXFILESUFFIX', action)
# Create the PythonExtension builder
createPythonExtensionBuilder(env)
def exists(env):
try:
# This is not quite right: if someone defines all variables by himself,
# it would work without distutils
from distutils import sysconfig
return True
except ImportError:
return False
|
31692beb0b08ef83410bcc5e6c9a38ac01172428
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/keywordCompletion/keywordAfterComment.after.py
|
48a7d223aa7b2258d20db20986a7ac1320718f2b
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 20
|
py
|
keywordAfterComment.after.py
|
# foo
import <caret>
|
1bea411ff759301784dd4ce0ba9c857b9eda89cc
|
90b974771cff3addd43ded8c62c3667c64045976
|
/tensorflow_data_validation/statistics/generators/natural_language_stats_generator.py
|
838e47c5cbd7b2b4f66fe82091788105ac326df0
|
[
"Apache-2.0"
] |
permissive
|
tensorflow/data-validation
|
84066c9c8db76d6b49bab0cec95113508d86dbdb
|
606cd26b69f648e1f151c024d38baa6ab1d7d0c8
|
refs/heads/master
| 2023-09-01T11:54:32.797061
| 2023-08-21T19:21:20
| 2023-08-21T19:21:48
| 139,463,182
| 736
| 180
|
Apache-2.0
| 2023-08-14T06:28:47
| 2018-07-02T15:47:02
|
Python
|
UTF-8
|
Python
| false
| false
| 27,413
|
py
|
natural_language_stats_generator.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module that computes statistics for features of natural language type.
This module computes natural language statistics for features where the
natural_language_domain is specified. These statistics are stored as
custom_stats entries of the FeatureNameStatistics message corresponding to the
specified feature. We store a custom_stats called nl_statistics that contains
a populated tensorflow.metadata.v0.NaturalLanguageStatistics proto.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from typing import Dict, Iterable, List, Optional, Set, Text, Union
import pyarrow as pa
import six
from tensorflow_data_validation import types
from tensorflow_data_validation.statistics.generators import stats_generator
from tensorflow_data_validation.utils import quantiles_util
from tensorflow_data_validation.utils import schema_util
from tensorflow_data_validation.utils import stats_util
from tensorflow_data_validation.utils import vocab_util
from tfx_bsl import sketches
from tensorflow_metadata.proto.v0 import schema_pb2
from tensorflow_metadata.proto.v0 import statistics_pb2
# TODO(https://issues.apache.org/jira/browse/SPARK-22674): Switch to
# `collections.namedtuple` or `typing.NamedTuple` once the Spark issue is
# resolved.
from tfx_bsl.types import tfx_namedtuple # pylint: disable=g-bad-import-order
_NL_DOMAIN = 'natural_language_domain'
_INT_VALUE = 'int_value'
_NUM_MISRAGRIES_SKETCH_BUCKETS = 16384
_QUANTILES_SKETCH_ERROR = 0.01
_QUANTILES_SKETCH_NUM_ELEMENTS = 2 ^ 32
_QUANTILES_SKETCH_NUM_STREAMS = 1
_NUM_REPORTED_SEQUENCES_PER_TYPE = 5
_ReportedSequence = tfx_namedtuple.namedtuple(
'_ReportedSequence', ['sequence', 'hash_value', 'metric'])
def _sort_and_truncate_reported_sequence(sequence: List[_ReportedSequence]):
sequence.sort(key=lambda x: x.metric)
deduped_values = []
hash_values = set()
for s in sequence:
if s.hash_value in hash_values:
continue
hash_values.add(s.hash_value)
deduped_values.append(s)
return deduped_values[:_NUM_REPORTED_SEQUENCES_PER_TYPE]
class _TokenStats(object):
"""Tracks statistics for individual tokens."""
def __init__(self):
self.frequency = 0
self.num_sequences = 0
self.per_sequence_min_frequency = None
self.per_sequence_max_frequency = None
self.positions = collections.Counter()
def __iadd__(self, other: '_TokenStats') -> '_TokenStats':
"""Merge two _TokenStats."""
self.frequency += other.frequency
self.num_sequences += other.num_sequences
for attr, fn in [('per_sequence_min_frequency', min),
('per_sequence_max_frequency', max)]:
self_freq = getattr(self, attr)
other_freq = getattr(other, attr)
if (self_freq is not None and other_freq is not None):
setattr(self, attr, fn(self_freq, other_freq))
elif self_freq is None:
setattr(self, attr, other_freq)
self.positions += other.positions
return self
# TODO(b/175875824): Determine if we should remove NL features from the default
# Top-K computation which is largely redundant.
class _PartialNLStats(object):
"""Partial feature stats for natural language."""
def __init__(self,
invalidate=False,
num_in_vocab_tokens: int = 0,
total_num_tokens: int = 0,
sum_in_vocab_token_lengths: int = 0,
num_examples: int = 0) -> None:
# True only if this feature should never be considered, e.g: some
# value_lists have inconsistent types or feature doesn't have an
# NL domain.
self.invalidate = invalidate
self.num_in_vocab_tokens = num_in_vocab_tokens
self.total_num_tokens = total_num_tokens
self.sum_in_vocab_token_lengths = sum_in_vocab_token_lengths
self.num_examples = num_examples
self.vocab_token_length_quantiles = sketches.QuantilesSketch(
_QUANTILES_SKETCH_ERROR, _QUANTILES_SKETCH_NUM_ELEMENTS,
_QUANTILES_SKETCH_NUM_STREAMS)
self.min_sequence_length = None
self.max_sequence_length = None
self.sequence_length_quantiles = sketches.QuantilesSketch(
_QUANTILES_SKETCH_ERROR, _QUANTILES_SKETCH_NUM_ELEMENTS,
_QUANTILES_SKETCH_NUM_STREAMS)
self.token_occurrence_counts = sketches.MisraGriesSketch(
_NUM_MISRAGRIES_SKETCH_BUCKETS)
self.token_statistics = collections.defaultdict(_TokenStats)
self.reported_sequences_coverage = []
self.reported_sequences_avg_token_length = []
def __iadd__(self, other: '_PartialNLStats') -> '_PartialNLStats':
"""Merge two partial natual language stats."""
self.invalidate |= other.invalidate
self.num_in_vocab_tokens += other.num_in_vocab_tokens
self.total_num_tokens += other.total_num_tokens
self.sum_in_vocab_token_lengths += other.sum_in_vocab_token_lengths
self.num_examples += other.num_examples
self.vocab_token_length_quantiles.Merge(other.vocab_token_length_quantiles)
if self.min_sequence_length is None:
self.min_sequence_length = other.min_sequence_length
elif other.min_sequence_length is not None:
self.min_sequence_length = min(self.min_sequence_length,
other.min_sequence_length)
if self.max_sequence_length is None:
self.max_sequence_length = other.max_sequence_length
elif other.max_sequence_length is not None:
self.max_sequence_length = max(self.max_sequence_length,
other.max_sequence_length)
self.sequence_length_quantiles.Merge(other.sequence_length_quantiles)
self.token_occurrence_counts.Merge(other.token_occurrence_counts)
for t in other.token_statistics:
if t not in self.token_statistics:
self.token_statistics[t] = other.token_statistics[t]
else:
self.token_statistics[t] += other.token_statistics[t]
for list_name in [
'reported_sequences_coverage', 'reported_sequences_avg_token_length'
]:
cur_list = getattr(self, list_name)
cur_list += getattr(other, list_name)
cur_list = _sort_and_truncate_reported_sequence(cur_list)
setattr(self, list_name, cur_list)
return self
def _update_accumulator_with_in_vocab_string_tokens(
accumulator: _PartialNLStats, token_list: List[Text]):
accumulator.num_in_vocab_tokens += len(token_list)
accumulator.token_occurrence_counts.AddValues(pa.array(token_list))
token_len_list = [len(t) for t in token_list]
accumulator.sum_in_vocab_token_lengths += sum(token_len_list)
accumulator.vocab_token_length_quantiles.AddValues(pa.array(token_len_list))
def _update_accumulator_with_token_statistics(accumulator: _PartialNLStats,
row: List[Union[int, Text]],
tokens: Union[Set[int],
Set[Text]],
num_histogram_buckets):
"""Compute token statistics for a specific row."""
for t in tokens:
norm_indices = [float(i) / len(row) for i, v in enumerate(row) if v == t]
num_occur = len(norm_indices)
accumulator.token_statistics[t].frequency += num_occur
accumulator.token_statistics[t].num_sequences += (1 if num_occur else 0)
for attr, fn in [('per_sequence_min_frequency', min),
('per_sequence_max_frequency', max)]:
accum_freq = getattr(accumulator.token_statistics[t], attr)
if accum_freq is not None:
setattr(accumulator.token_statistics[t], attr,
fn(accum_freq, num_occur))
else:
setattr(accumulator.token_statistics[t], attr, num_occur)
for i in norm_indices:
accumulator.token_statistics[t].positions[int(i *
num_histogram_buckets)] += 1
def _update_accumulator_reported_sequences(accumulator: _PartialNLStats,
resolved_entry: List[Union[Text,
int]],
oov_string_tokens: Set[Text]):
"""Update reported sequences in accumulator."""
token_lens = [
len(i) for i in resolved_entry
if (isinstance(i, str) and i not in oov_string_tokens)
]
coverage = (float(len(token_lens)) / len(resolved_entry))
if token_lens:
avg_token_len = float(sum(token_lens)) / len(token_lens)
else:
avg_token_len = 0
for attr, metric in [('reported_sequences_coverage', coverage),
('reported_sequences_avg_token_length', avg_token_len)]:
cur_list = getattr(accumulator, attr)
cur_list.append(
_ReportedSequence(
sequence=resolved_entry,
hash_value=hash(str(resolved_entry)),
metric=metric))
cur_list = _sort_and_truncate_reported_sequence(cur_list)
setattr(accumulator, attr, cur_list)
def _update_accumulator_with_sequence_lengths(
accumulator: _PartialNLStats, sequence_length_excluded_int_tokens: Set[int],
sequence_length_excluded_string_tokens: Set[Text], max_sequence_length: int,
int_row: Optional[List[Union[int, Text]]],
string_row: Optional[List[Union[Text, int]]]):
"""Update sequence length quantiles in accumulator.
We expect that int_row and string row preserve the position of the the token
within the seqence and hence allow the lists to contain both ints and strings.
Args:
accumulator: The accumulator to update.
sequence_length_excluded_int_tokens: The int tokens to not consider when
calculating the length.
sequence_length_excluded_string_tokens: The string tokens to not consider
when calculating the length.
max_sequence_length: The max sequence length to use if no excluded tokens
are present.
int_row: The row of integer tokens. Note: the row can include strings if
there is an incomplete mapping from strings to ints (this preserves the
position).
string_row: The row of string tokens. Note: the row can include ints if if
there is an incomplete mapping from ints to strings (this preserves the
position).
"""
sequence_length = max_sequence_length
if int_row is not None:
matches = [e for e in int_row if e in sequence_length_excluded_int_tokens]
sequence_length -= len(matches)
if string_row is not None:
matches = [
e for e in string_row if e in sequence_length_excluded_string_tokens
]
sequence_length -= len(matches)
accumulator.sequence_length_quantiles.AddValues(pa.array([sequence_length]))
accumulator.min_sequence_length = (
sequence_length if not accumulator.min_sequence_length else min(
accumulator.min_sequence_length, sequence_length))
accumulator.max_sequence_length = (
sequence_length if not accumulator.max_sequence_length else max(
accumulator.max_sequence_length, sequence_length))
def _compute_int_statistics(
row: List[int], accumulator: _PartialNLStats,
excluded_string_tokens: Set[Text], excluded_int_tokens: Set[int],
oov_string_tokens: Set[Text], unused_vocab: Optional[Dict[Text, int]],
rvocab: Optional[Dict[int, Text]], int_tokens: Set[int],
string_tokens: Set[Text], sequence_length_excluded_int_tokens: Set[int],
sequence_length_excluded_string_tokens: Set[Text],
num_histogram_buckets: int):
"""Compute statistics for an integer entry."""
accumulator.num_examples += 1
if row:
_update_accumulator_with_token_statistics(accumulator, row, int_tokens,
num_histogram_buckets)
string_row = None
if rvocab:
string_row = [rvocab.get(r, r) for r in row]
_update_accumulator_with_token_statistics(accumulator, string_row,
string_tokens,
num_histogram_buckets)
_update_accumulator_reported_sequences(accumulator,
string_row if string_row else row,
oov_string_tokens)
_update_accumulator_with_sequence_lengths(
accumulator, sequence_length_excluded_int_tokens,
sequence_length_excluded_string_tokens, len(row), row, string_row)
filtered_entry_str_list = []
for entry in row:
if entry in excluded_int_tokens:
continue
# Vocabulary exists.
if rvocab is not None:
if entry in rvocab:
entry_str = rvocab[entry]
if entry_str in excluded_string_tokens:
continue
if entry_str not in oov_string_tokens:
filtered_entry_str_list.append(entry_str)
accumulator.total_num_tokens += 1
if filtered_entry_str_list:
_update_accumulator_with_in_vocab_string_tokens(accumulator,
filtered_entry_str_list)
def _compute_str_statistics(
row: List[Text], accumulator: _PartialNLStats,
excluded_string_tokens: Set[Text], excluded_int_tokens: Set[int],
oov_string_tokens: Set[Text], vocab: Optional[Dict[Text, int]],
unused_rvocab: Optional[Dict[int, Text]], int_tokens: Set[int],
string_tokens: Set[Text], sequence_length_excluded_int_tokens: Set[int],
sequence_length_excluded_string_tokens: Set[Text], num_histogram_buckets):
"""Compute statistics for string features."""
accumulator.num_examples += 1
row = [six.ensure_text(e) for e in row]
if row:
_update_accumulator_with_token_statistics(accumulator, row, string_tokens,
num_histogram_buckets)
_update_accumulator_reported_sequences(accumulator, row, oov_string_tokens)
int_row = None
if vocab:
int_row = [vocab.get(r, r) for r in row]
_update_accumulator_with_token_statistics(accumulator, int_row,
int_tokens,
num_histogram_buckets)
_update_accumulator_with_sequence_lengths(
accumulator, sequence_length_excluded_int_tokens,
sequence_length_excluded_string_tokens, len(row), int_row, row)
filtered_entry_list = []
for entry in row:
if entry in excluded_string_tokens:
continue
if (vocab is not None and entry in vocab and
vocab[entry] in excluded_int_tokens):
continue
if entry not in oov_string_tokens:
filtered_entry_list.append(entry)
accumulator.total_num_tokens += 1
if filtered_entry_list:
_update_accumulator_with_in_vocab_string_tokens(accumulator,
filtered_entry_list)
def _populate_token_length_histogram(
nls: statistics_pb2.NaturalLanguageStatistics, accumulator: _PartialNLStats,
num_quantiles_histogram_buckets: int):
"""Populate the token length histogram."""
quantiles, weights = (
accumulator.vocab_token_length_quantiles.GetQuantilesAndCumulativeWeights(
num_quantiles_histogram_buckets))
quantiles = quantiles.flatten().to_numpy(zero_copy_only=False)
weights = weights.flatten().to_numpy(zero_copy_only=False)
if quantiles.size:
quantiles_histogram = quantiles_util.generate_quantiles_histogram(
quantiles, weights)
nls.token_length_histogram.CopyFrom(quantiles_histogram)
def _populate_sequence_length_histogram(
nls: statistics_pb2.NaturalLanguageStatistics, accumulator: _PartialNLStats,
num_quantiles_histogram_buckets: int):
"""Populate sequence length histogram."""
quantiles, weights = (
accumulator.sequence_length_quantiles.GetQuantilesAndCumulativeWeights(
num_quantiles_histogram_buckets))
quantiles = quantiles.flatten().to_numpy(zero_copy_only=False)
weights = weights.flatten().to_numpy(zero_copy_only=False)
if quantiles.size:
quantiles_histogram = quantiles_util.generate_quantiles_histogram(
quantiles, weights)
nls.sequence_length_histogram.CopyFrom(quantiles_histogram)
def _populate_token_rank_histogram(
nls: statistics_pb2.NaturalLanguageStatistics, accumulator: _PartialNLStats,
num_rank_histogram_buckets: int):
"""Populate the token rank histogram."""
entries = accumulator.token_occurrence_counts.Estimate().to_pylist()
for i, e in enumerate(entries[:num_rank_histogram_buckets]):
nls.rank_histogram.buckets.add(
low_rank=i, high_rank=i, label=e['values'], sample_count=e['counts'])
def _populate_token_position_histogram(
token_proto: statistics_pb2.NaturalLanguageStatistics.TokenStatistics,
stats: _TokenStats, num_histogram_buckets: int):
"""Populate the token position histogram."""
positions = list(stats.positions.items())
positions.sort(key=lambda x: x[0])
for k, v in positions:
low_value = float(k) / num_histogram_buckets
high_value = float(k + 1) / num_histogram_buckets
token_proto.positions.buckets.add(
low_value=low_value, high_value=high_value, sample_count=v)
def _populate_token_statistics(
name: Text,
num_histogram_buckets: int,
num_examples: int,
token_proto: statistics_pb2.NaturalLanguageStatistics.TokenStatistics,
stats: _TokenStats):
"""Populates the token statistics for a specified token."""
if isinstance(name, int):
token_proto.int_token = name
else:
token_proto.string_token = name
if stats.num_sequences:
token_proto.frequency = stats.frequency
token_proto.fraction_of_sequences = (
float(stats.num_sequences) / num_examples)
token_proto.per_sequence_min_frequency = stats.per_sequence_min_frequency
token_proto.per_sequence_max_frequency = stats.per_sequence_max_frequency
token_proto.per_sequence_avg_frequency = (
float(stats.frequency) / stats.num_sequences)
_populate_token_position_histogram(token_proto, stats,
num_histogram_buckets)
class NLStatsGenerator(stats_generator.CombinerFeatureStatsGenerator):
"""Generates feature level statistics for natural language stats.
A combiner that computes statistics based on the specified
natural_language_domain.
"""
def __init__(self, schema: Optional[schema_pb2.Schema],
vocab_paths: Optional[Dict[Text, Text]],
num_histogram_buckets: int, num_quantiles_histogram_buckets: int,
num_rank_histogram_buckets: int) -> None:
"""Initializes a NLStatsGenerator.
Args:
schema: An optional schema for the dataset.
vocab_paths: A dictonary mapping vocab names to vocab paths.
num_histogram_buckets: Number of buckets to use for histograms.
num_quantiles_histogram_buckets: Number of quantiles to use for
histograms.
num_rank_histogram_buckets: Number of buckets to allow for rank
histograms.
"""
self._schema = schema
self._vocab_paths = vocab_paths
self._num_histogram_buckets = num_histogram_buckets
self._num_quantiles_histogram_buckets = num_quantiles_histogram_buckets
assert num_rank_histogram_buckets <= _NUM_MISRAGRIES_SKETCH_BUCKETS, (
'num_rank_histogram_buckets cannot be greater than %d' %
_NUM_MISRAGRIES_SKETCH_BUCKETS)
self._num_rank_histogram_buckets = num_rank_histogram_buckets
self._nld_vocabularies = {}
self._nld_excluded_string_tokens = {}
self._nld_excluded_int_tokens = {}
self._nld_oov_string_tokens = {}
self._nld_specified_int_tokens = collections.defaultdict(set)
self._nld_specified_str_tokens = collections.defaultdict(set)
self._nld_sequence_length_excluded_int_tokens = {}
self._nld_sequence_length_excluded_string_tokens = {}
self._vocabs = {}
self._rvocabs = {}
self._feature_type_fns = {
statistics_pb2.FeatureNameStatistics.INT: _compute_int_statistics,
statistics_pb2.FeatureNameStatistics.STRING: _compute_str_statistics
}
self._valid_feature_paths = set()
def setup(self) -> None:
"""Prepares an instance for combining."""
if self._schema is not None:
for k, v in schema_util.get_all_leaf_features(self._schema):
if v.WhichOneof('domain_info') == _NL_DOMAIN:
nld = v.natural_language_domain
self._nld_vocabularies[k] = nld.vocabulary
coverage_constraints = nld.coverage
self._nld_excluded_string_tokens[k] = set(
coverage_constraints.excluded_string_tokens)
self._nld_excluded_int_tokens[k] = set(
coverage_constraints.excluded_int_tokens)
self._nld_oov_string_tokens[k] = set(
coverage_constraints.oov_string_tokens)
sequence_length_constraints = nld.sequence_length_constraints
self._nld_sequence_length_excluded_int_tokens[k] = set(
sequence_length_constraints.excluded_int_value)
self._nld_sequence_length_excluded_string_tokens[k] = set(
sequence_length_constraints.excluded_string_value)
if (self._nld_vocabularies[k] or
self._nld_excluded_string_tokens[k] or
self._nld_excluded_int_tokens[k] or
self._nld_oov_string_tokens[k]):
self._valid_feature_paths.add(k)
for t in nld.token_constraints:
if t.WhichOneof('value') == _INT_VALUE:
self._nld_specified_int_tokens[k].add(t.int_value)
else:
self._nld_specified_str_tokens[k].add(t.string_value)
if self._vocab_paths is not None:
for k, v in self._vocab_paths.items():
self._vocabs[k], self._rvocabs[k] = vocab_util.load_vocab(v)
def create_accumulator(self) -> _PartialNLStats:
"""Return a fresh, empty accumulator.
Returns:
An empty accumulator.
"""
return _PartialNLStats()
def add_input(self, accumulator: _PartialNLStats,
feature_path: types.FeaturePath,
feature_array: pa.Array) -> _PartialNLStats:
"""Return result of folding a batch of inputs into accumulator.
Args:
accumulator: The current accumulator.
feature_path: The path of the feature.
feature_array: An arrow Array representing a batch of feature values which
should be added to the accumulator.
Returns:
The accumulator after updating the statistics for the batch of inputs.
"""
if feature_path not in self._valid_feature_paths:
accumulator.invalidate = True
return accumulator
feature_type = stats_util.get_feature_type_from_arrow_type(
feature_path, feature_array.type)
# Ignore null array.
if feature_type is None:
return accumulator
if feature_type not in self._feature_type_fns:
accumulator.invalidate = True
return accumulator
feature_type_fn = self._feature_type_fns[feature_type]
vocab = None
rvocab = None
if self._nld_vocabularies[feature_path]:
vocab_name = self._nld_vocabularies[feature_path]
vocab = self._vocabs[vocab_name]
rvocab = self._rvocabs[vocab_name]
excluded_string_tokens = self._nld_excluded_string_tokens[feature_path]
excluded_int_tokens = self._nld_excluded_int_tokens[feature_path]
oov_string_tokens = self._nld_oov_string_tokens[feature_path]
int_tokens = self._nld_specified_int_tokens[feature_path]
string_tokens = self._nld_specified_str_tokens[feature_path]
sequence_length_excluded_int_tokens = (
self._nld_sequence_length_excluded_int_tokens[feature_path])
sequence_length_excluded_string_tokens = (
self._nld_sequence_length_excluded_string_tokens[feature_path])
# TODO(b/175875824): Benchmark and optimize performance.
for row in feature_array.to_pylist():
if row is not None:
feature_type_fn(row, accumulator, excluded_string_tokens,
excluded_int_tokens, oov_string_tokens, vocab, rvocab,
int_tokens, string_tokens,
sequence_length_excluded_int_tokens,
sequence_length_excluded_string_tokens,
self._num_histogram_buckets)
return accumulator
def merge_accumulators(
self, accumulators: Iterable[_PartialNLStats]) -> _PartialNLStats:
"""Merges several accumulators to a single accumulator value.
Args:
accumulators: The accumulators to merge.
Returns:
The merged accumulator.
"""
it = iter(accumulators)
result = next(it)
for accumulator in it:
result += accumulator
return result
def compact(self, accumulator: _PartialNLStats) -> _PartialNLStats:
accumulator.vocab_token_length_quantiles.Compact()
accumulator.sequence_length_quantiles.Compact()
return accumulator
def extract_output(
self,
accumulator: _PartialNLStats) -> statistics_pb2.FeatureNameStatistics:
"""Return result of converting accumulator into the output value.
Args:
accumulator: The final accumulator value.
Returns:
A proto representing the result of this stats generator.
"""
result = statistics_pb2.FeatureNameStatistics()
if accumulator.invalidate:
return result
nls = statistics_pb2.NaturalLanguageStatistics()
if accumulator.total_num_tokens:
nls.feature_coverage = (
float(accumulator.num_in_vocab_tokens) / accumulator.total_num_tokens)
if accumulator.num_in_vocab_tokens:
nls.avg_token_length = (
float(accumulator.sum_in_vocab_token_lengths) /
accumulator.num_in_vocab_tokens)
if accumulator.min_sequence_length:
nls.min_sequence_length = accumulator.min_sequence_length
if accumulator.max_sequence_length:
nls.max_sequence_length = accumulator.max_sequence_length
if self._num_quantiles_histogram_buckets:
_populate_token_length_histogram(nls, accumulator,
self._num_quantiles_histogram_buckets)
_populate_sequence_length_histogram(nls, accumulator,
self._num_quantiles_histogram_buckets)
if self._num_rank_histogram_buckets:
_populate_token_rank_histogram(nls, accumulator,
self._num_rank_histogram_buckets)
if accumulator.token_statistics:
for name, stats in accumulator.token_statistics.items():
_populate_token_statistics(name, self._num_histogram_buckets,
accumulator.num_examples,
nls.token_statistics.add(), stats)
for r in (accumulator.reported_sequences_coverage +
accumulator.reported_sequences_avg_token_length):
str_seq = str(r[0])
nls.reported_sequences.append(str_seq)
custom_nl_stats = result.custom_stats.add(name='nl_statistics')
custom_nl_stats.any.Pack(nls)
return result
|
803b1a8dc9a30246ab2945a731f3ad2a092d7583
|
5917ffcb780cfcfe4e2b87b11fca1f68f387b239
|
/plenum/test/checkpoints/test_lagged_checkpoint_completion.py
|
7fa201a86fbf0d7b080baa9edb59a42f1c3e299d
|
[
"Apache-2.0"
] |
permissive
|
hyperledger/indy-plenum
|
6ff9f705af80dfa28d4cb92743683f78bb937aa3
|
698b9500ad3a7a15993af72a1c35a406c5673262
|
refs/heads/main
| 2023-08-29T01:32:26.384729
| 2023-06-20T16:42:11
| 2023-06-20T16:42:11
| 51,585,028
| 171
| 420
|
Apache-2.0
| 2023-06-20T16:42:14
| 2016-02-12T12:03:16
|
Python
|
UTF-8
|
Python
| false
| false
| 2,812
|
py
|
test_lagged_checkpoint_completion.py
|
from plenum.test import waits
from plenum.test.checkpoints.helper import check_num_received_checkpoints, \
check_received_checkpoint_votes, check_stable_checkpoint, check_num_unstable_checkpoints
from plenum.test.delayers import cDelay
from plenum.test.helper import sdk_send_random_and_check
from stp_core.loop.eventually import eventually
CHK_FREQ = 5
def test_lagged_checkpoint_completion(chkFreqPatched, looper, txnPoolNodeSet,
sdk_wallet_client, sdk_pool_handle):
"""
One node in a pool lags to order the last 3PC-batch in a checkpoint so that
when it eventually orders this 3PC-batch and thus completes the checkpoint
it has already received and stashed the corresponding checkpoint messages
from all the other nodes. The test verifies that the node successfully
processes the stashed checkpoint messages and stabilizes the checkpoint.
"""
slow_node = txnPoolNodeSet[-1]
# All the nodes in the pool normally orders all the 3PC-batches in a
# checkpoint except the last 3PC-batch. The last 3PC-batch in the
# checkpoint is ordered by all the nodes except one slow node because this
# node lags to receive Commits.
sdk_send_random_and_check(looper, txnPoolNodeSet,
sdk_pool_handle, sdk_wallet_client, 4)
slow_node.nodeIbStasher.delay(cDelay())
sdk_send_random_and_check(looper, txnPoolNodeSet,
sdk_pool_handle, sdk_wallet_client, 1)
# All the other nodes complete the checkpoint and send Checkpoint messages
# to others. The slow node receives and stashes these messages because it
# has not completed the checkpoint.
def check():
for replica in slow_node.replicas.values():
check_stable_checkpoint(replica, 0)
check_num_unstable_checkpoints(replica, 0)
check_num_received_checkpoints(replica, 1)
check_received_checkpoint_votes(replica,
pp_seq_no=5,
num_votes=len(txnPoolNodeSet) - 1)
stabilization_timeout = \
waits.expectedTransactionExecutionTime(len(txnPoolNodeSet))
looper.run(eventually(check, timeout=stabilization_timeout))
# Eventually the slow node receives Commits, orders the last 3PC-batch in
# the checkpoint and thus completes it, processes the stashed checkpoint
# messages and stabilizes the checkpoint.
slow_node.nodeIbStasher.reset_delays_and_process_delayeds()
looper.runFor(waits.expectedOrderingTime(len(txnPoolNodeSet)))
for replica in slow_node.replicas.values():
check_stable_checkpoint(replica, 5)
check_num_unstable_checkpoints(replica, 0)
check_num_received_checkpoints(replica, 0)
|
1c5db60f3bc14f81ff9e18a66c3be97be0590e9c
|
549270020f6c8724e2ef1b12e38d11b025579f8d
|
/recipes/ninja/all/conanfile.py
|
6fede8df5ba1b52a2b573a4a4c1a77b10b9588ee
|
[
"MIT"
] |
permissive
|
conan-io/conan-center-index
|
1bcec065ccd65aa38b1fed93fbd94d9d5fe6bc43
|
3b17e69bb4e5601a850b6e006e44775e690bac33
|
refs/heads/master
| 2023-08-31T11:34:45.403978
| 2023-08-31T11:13:23
| 2023-08-31T11:13:23
| 204,671,232
| 844
| 1,820
|
MIT
| 2023-09-14T21:22:42
| 2019-08-27T09:43:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,885
|
py
|
conanfile.py
|
from conan import ConanFile, conan_version
from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
from conan.tools.files import copy, get
from conan.tools.scm import Version
import os
required_conan_version = ">=1.52.0"
class NinjaConan(ConanFile):
name = "ninja"
package_type = "application"
description = "Ninja is a small build system with a focus on speed"
license = "Apache-2.0"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/ninja-build/ninja"
topics = ("ninja", "build")
settings = "os", "arch", "compiler", "build_type"
def layout(self):
cmake_layout(self, src_folder="src")
def package_id(self):
del self.info.settings.compiler
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
tc = CMakeToolchain(self)
tc.variables["BUILD_TESTING"] = False
if self.settings.os == "Linux" and "libstdc++" in self.settings.compiler.libcxx:
# Link C++ library statically on Linux so that it can run on systems
# with an older C++ runtime
tc.cache_variables["CMAKE_EXE_LINKER_FLAGS"] = "-static-libstdc++ -static-libgcc"
tc.generate()
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
copy(self, "COPYING", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
cmake = CMake(self)
cmake.install()
def package_info(self):
self.cpp_info.includedirs = []
self.cpp_info.libdirs = []
# TODO: to remove in conan v2
if Version(conan_version).major < 2:
self.env_info.PATH.append(os.path.join(self.package_folder, "bin"))
self.env_info.CONAN_CMAKE_GENERATOR = "Ninja"
|
23b3e5b1c999fb7d1b2b3bf461db4535b009dd74
|
e196fe807b2720eb7f08ad9ca914887341bd9b44
|
/src/streamlink/plugins/dogus.py
|
3664a1ecc546e829d20a60c7a3d51f959fd18995
|
[
"BSD-2-Clause"
] |
permissive
|
streamlink/streamlink
|
ab2ce4a8d71d2abd67f300628f04ce960e7696d0
|
561f7ef854e3ec076e5bd3efb3e7f8efe5df32df
|
refs/heads/master
| 2023-08-29T15:03:17.008502
| 2023-08-26T19:24:39
| 2023-08-27T11:02:30
| 68,402,336
| 9,529
| 1,385
|
BSD-2-Clause
| 2023-09-13T13:37:33
| 2016-09-16T17:52:50
|
Python
|
UTF-8
|
Python
| false
| false
| 2,040
|
py
|
dogus.py
|
"""
$description Turkish live TV channels from Dogus Group, including Euro Star, Star and NTV.
$url eurostartv.com.tr
$url kralmuzik.com.tr
$url ntv.com.tr
$url startv.com.tr
$type live
"""
import re
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.plugin.api import validate
from streamlink.stream.hls import HLSStream
@pluginmatcher(re.compile(r"https?://(?:www\.)?eurostartv\.com\.tr/canli-izle"))
@pluginmatcher(re.compile(r"https?://(?:www\.)?kralmuzik\.com\.tr/tv/.+"))
@pluginmatcher(re.compile(r"https?://(?:www\.)?ntv\.com\.tr/canli-yayin/ntv"))
@pluginmatcher(re.compile(r"https?://(?:www\.)?startv\.com\.tr/canli-yayin"))
class Dogus(Plugin):
_re_live_hls = re.compile(r"'(https?://[^']+/live/hls/[^']+)'")
_re_yt_script = re.compile(r"youtube\.init\('([\w-]{11})'")
def _get_streams(self):
root = self.session.http.get(self.url, schema=validate.Schema(validate.parse_html()))
# https://www.ntv.com.tr/canli-yayin/ntv?youtube=true
yt_iframe = root.xpath("string(.//iframe[contains(@src,'youtube.com')][1]/@src)")
# https://www.startv.com.tr/canli-yayin
dm_iframe = root.xpath("string(.//iframe[contains(@src,'dailymotion.com')][1]/@src)")
# https://www.kralmuzik.com.tr/tv/kral-tv
# https://www.kralmuzik.com.tr/tv/kral-pop-tv
yt_script = root.xpath("string(.//script[contains(text(), 'youtube.init')][1]/text())")
if yt_script:
m = self._re_yt_script.search(yt_script)
if m:
yt_iframe = f"https://www.youtube.com/watch?v={m.group(1)}"
iframe = yt_iframe or dm_iframe
if iframe:
return self.session.streams(iframe)
# http://eurostartv.com.tr/canli-izle
dd_script = root.xpath("string(.//script[contains(text(), '/live/hls/')][1]/text())")
if dd_script:
m = self._re_live_hls.search(dd_script)
if m:
return HLSStream.parse_variant_playlist(self.session, m.group(1))
__plugin__ = Dogus
|
fdff9dab427d38d9fc1857b809849ffde41063d8
|
4daab5ba90185bae65169ebb8183c635385ab3f8
|
/tests/test_ts/test_ts_guess.py
|
9f90d450338483d31499686f7a01c52ced325041
|
[
"MIT"
] |
permissive
|
duartegroup/autodE
|
bcf69440bd04411f97d39df0df0ae1f2bf6feb8c
|
4d6667592f083dfcf38de6b75c4222c0a0e7b60b
|
refs/heads/master
| 2023-09-01T15:08:16.028378
| 2023-07-25T08:09:05
| 2023-07-25T08:09:05
| 196,085,570
| 132
| 42
|
MIT
| 2023-09-12T15:20:54
| 2019-07-09T21:20:27
|
Python
|
UTF-8
|
Python
| false
| false
| 371
|
py
|
test_ts_guess.py
|
from autode.atoms import Atom
from autode.species.molecule import Molecule
from autode.transition_states.ts_guess import TSguess
def test_that_a_molecules_solvent_is_inherited():
mol = Molecule(atoms=[Atom("H")], mult=2, solvent_name="water")
assert mol.solvent.smiles == "O"
ts_guess = TSguess.from_species(mol)
assert ts_guess.solvent.smiles == "O"
|
0328f8dcc3a648b10f39a593d158a00cd2c5ddd5
|
26cadb387da6dc71f5536b9d74ad44b7b974d26d
|
/test_launch_testing/test/test_launch_testing/actions/test_pytest.py
|
350fef87b8fd52385f3d0ea8d65259efbc2a1993
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
ros2/launch
|
84971e86f6131976bdfaf872fca12f1a6a377cd6
|
f2b232555900d62c3cec839a49afd4cdc01cda58
|
refs/heads/rolling
| 2023-08-24T14:33:18.237122
| 2023-08-23T17:12:30
| 2023-08-23T17:12:30
| 32,485,326
| 116
| 139
|
Apache-2.0
| 2023-09-14T12:07:30
| 2015-03-18T21:27:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,330
|
py
|
test_pytest.py
|
# Copyright 2019 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the PyTest Action."""
from launch import LaunchDescription
from launch import LaunchService
from launch.actions import EmitEvent
from launch.events import Shutdown
from launch_testing.actions import PyTest
def launch_pytest(test_path):
"""Launch a pytest."""
ld = LaunchDescription([
PyTest(path=str(test_path), timeout=5.0, on_exit=[EmitEvent(event=Shutdown())])
])
ls = LaunchService()
ls.include_launch_description(ld)
assert 0 == ls.run()
def test_pytest_locking():
"""Test running a locking pytest with timeout."""
launch_pytest('locking.py')
def test_pytest_non_locking():
"""Test running a non-locking pytest with timeout."""
launch_pytest('dummy.py')
|
9d4b18b641e3fd6bcece14004ee78e4b8490d573
|
73f4f74f678fadee409560b78ffacb7aec38c545
|
/Tests/test_interactive.py
|
930cec76dad37343457078c76714854068b2191f
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
IronLanguages/ironpython3
|
14ec38566d7c27675215042d72e38f6a979011ab
|
e8ed79bd7f0f33eb2af1a538dd7e98767c86c211
|
refs/heads/master
| 2023-09-03T03:36:51.590171
| 2023-09-02T19:02:51
| 2023-09-02T19:02:51
| 17,266,066
| 2,396
| 349
|
Apache-2.0
| 2023-09-09T01:46:11
| 2014-02-27T21:50:49
|
Python
|
UTF-8
|
Python
| false
| false
| 34,241
|
py
|
test_interactive.py
|
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
from iptest.assert_util import *
skiptest("win32")
from iptest.console_util import IronPythonInstance
remove_ironpython_dlls(testpath.public_testdir)
from sys import executable
from System import Environment
from sys import exec_prefix
extraArgs = ""
if "-X:LightweightScopes" in Environment.GetCommandLineArgs():
extraArgs += " -X:LightweightScopes"
def test_strings():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
# String exception
response = ipi.ExecuteLine("raise 'foo'", True)
AreEqual(response.replace("\r\r\n", "\n").replace("\r", ""),
"""Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: exceptions must be classes, or instances, not str""")
# Multi-line string literal
ipi.ExecutePartialLine("\"\"\"Hello")
ipi.ExecutePartialLine("")
ipi.ExecutePartialLine("")
AreEqual("'Hello\\n\\n\\nWorld'", ipi.ExecuteLine("World\"\"\""))
ipi.ExecutePartialLine("if False: print 3")
ipi.ExecutePartialLine("else: print 'hello'")
AreEqual(r'hello', ipi.ExecuteLine(""))
# Empty line
AreEqual("", ipi.ExecuteLine(""))
ipi.End()
def test_exceptions():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
# parameterless exception
response = ipi.ExecuteLine("raise Exception", True)
AreEqual(response,
'''Traceback (most recent call last):
File "<stdin>", line 1, in <module>
Exception'''.replace("\n", "\r\r\n") + "\r")
ipi.End()
def test_exceptions_nested():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("def a(): return b()")
ipi.ExecuteLine("")
ipi.ExecutePartialLine("def b(): return 1/0")
ipi.ExecuteLine("")
response = ipi.ExecuteLine("a()", True)
response = response.replace("\r\r\n", "\n").strip()
Assert(response.startswith('''Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<stdin>", line 1, in a
File "<stdin>", line 1, in b
ZeroDivisionError:'''), response)
ipi.End()
###############################################################################
# Test "ipy.exe -i script.py"
def test_interactive_mode():
inputScript = testpath.test_inputs_dir + "\\simpleCommand.py"
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " -i \"" + inputScript + "\"")
AreEqual(ipi.Start(), True)
ipi.EnsureInteractive()
AreEqual("1", ipi.ExecuteLine("x"))
ipi.End()
inputScript = testpath.test_inputs_dir + "\\raise.py"
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " -i \"" + inputScript + "\"")
AreEqual(ipi.Start(), True)
ipi.ReadError()
ipi.EnsureInteractive()
AreEqual("1", ipi.ExecuteLine("x"))
ipi.End()
inputScript = testpath.test_inputs_dir + "\\syntaxError.py"
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " -i \"" + inputScript + "\"")
AreEqual(ipi.Start(), True)
# ipi.EnsureInteractive()
AssertContains(ipi.ExecuteLine("x", True), "NameError")
ipi.End()
inputScript = testpath.test_inputs_dir + "\\exit.py"
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " -i \"" + inputScript + "\"")
(result, output, output2, exitCode) = ipi.StartAndRunToCompletion()
AreEqual(exitCode, 0)
ipi.End()
# interactive + -c
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " -i -c x=2")
AreEqual(ipi.Start(), True)
ipi.EnsureInteractive()
Assert(ipi.ExecuteLine("x", True).find("2") != -1)
ipi.End()
###############################################################################
# Test sys.exitfunc
def test_sys_exitfunc():
import clr
inputScript = testpath.test_inputs_dir + "\\exitFuncRuns.py"
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " \"" + inputScript + "\"")
(result, output, output2, exitCode) = ipi.StartAndRunToCompletion()
AreEqual(exitCode, 0)
AreEqual(output.find('hello world') > -1, True)
ipi.End()
args = extraArgs
if clr.GetCurrentRuntime().Configuration.DebugMode:
args = "-D " + args
inputScript = testpath.test_inputs_dir + "\\exitFuncRaises.py"
ipi = IronPythonInstance(executable, exec_prefix, args + " \"" + inputScript + "\"")
(result, output, output2, exitCode) = ipi.StartAndRunToCompletion()
AreEqual(exitCode, 0)
AreEqual(output2.find('Error in sys.exitfunc:') > -1, True)
AreEqual(output2.find('exitFuncRaises.py", line 19, in foo') > -1, True)
ipi.End()
# verify sys.exit(True) and sys.exit(False) return 1 and 0
ipi = IronPythonInstance(executable, exec_prefix, '-c "import sys; sys.exit(False)"')
res = ipi.StartAndRunToCompletion()
AreEqual(res[0], True) # should have started
AreEqual(res[1], '') # no std out
AreEqual(res[2], '') # no std err
AreEqual(res[3], 0) # should return 0
ipi = IronPythonInstance(executable, exec_prefix, '-c "import sys; sys.exit(True)"')
res = ipi.StartAndRunToCompletion()
AreEqual(res[0], True) # should have started
AreEqual(res[1], '') # no std out
AreEqual(res[2], '') # no std err
AreEqual(res[3], 1) # should return 0
# and verify it works at the interactive console as well
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
# parameterless exception
ipi.ExecuteLine("import sys")
AreEqual(ipi.ExecuteAndExit("sys.exit(False)"), 0)
# and verify it works at the interactive console as well
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
# parameterless exception
ipi.ExecuteLine("import sys")
AreEqual(ipi.ExecuteAndExit("sys.exit(True)"), 1)
#############################################################################
# verify we need to dedent to a previous valid indentation level
def test_indentation():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("if False:")
ipi.ExecutePartialLine(" print 'hello'")
response = ipi.ExecuteLine(" print 'goodbye'", True)
AreEqual(response.find('IndentationError') > 1, True)
ipi.End()
#############################################################################
# verify we dump exception details
def test_dump_exception():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " -X:ExceptionDetail")
AreEqual(ipi.Start(), True)
response = ipi.ExecuteLine("raise 'goodbye'", True)
AreEqual(response.count("IronPython.Hosting") >= 1, True)
ipi.End()
#############################################################################
# make sure we can enter try/except blocks
def test_try_except():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("try:")
ipi.ExecutePartialLine(" raise Exception('foo')")
ipi.ExecutePartialLine("except Exception, e:")
ipi.ExecutePartialLine(" if e.message=='foo':")
ipi.ExecutePartialLine(" print 'okay'")
response = ipi.ExecuteLine("")
Assert(response.find('okay') > -1)
ipi.End()
###########################################################
# Throw on "complete" incomplete syntax bug #864
def test_incomplate_syntax():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("class K:")
response = ipi.ExecuteLine("", True)
Assert("IndentationError:" in response)
ipi.End()
def test_incomplate_syntax_backslash():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
for i in range(4):
for j in range(i):
ipi.ExecutePartialLine("\\")
ipi.ExecutePartialLine("1 + \\")
for j in range(i):
ipi.ExecutePartialLine("\\")
response = ipi.ExecuteLine("2", True)
Assert("3" in response)
ipi.End()
###########################################################
# if , while, try, for and then EOF.
def test_missing_test():
for x in ['if', 'while', 'for', 'try']:
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
response = ipi.ExecuteLine(x, True)
Assert("SyntaxError:" in response)
ipi.End()
##########################################################
# Support multiple-levels of indentation
def test_indentation_levels():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("class K:")
ipi.ExecutePartialLine(" def M(self):")
ipi.ExecutePartialLine(" if 1:")
ipi.ExecutePartialLine(" pass")
response = ipi.ExecuteLine("")
ipi.End()
##########################################################
# Support partial lists
def test_partial_lists():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("[1")
ipi.ExecutePartialLine(" ,")
ipi.ExecutePartialLine(" 2")
response = ipi.ExecuteLine("]")
Assert("[1, 2]" in response)
ipi.ExecutePartialLine("[")
ipi.ExecutePartialLine("")
ipi.ExecutePartialLine("")
response = ipi.ExecuteLine("]")
Assert("[]" in response)
ipi.End()
def test_partial_lists_cp3530():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
try:
ipi.ExecutePartialLine("[{'a':None},")
response = ipi.ExecuteLine("]")
Assert("[{'a': None}]" in response, response)
ipi.ExecutePartialLine("[{'a'")
response = ipi.ExecutePartialLine(":None},")
response = ipi.ExecuteLine("]")
Assert("[{'a': None}]" in response, response)
ipi.ExecutePartialLine("[{'a':None},")
ipi.ExecutePartialLine("1,")
response = ipi.ExecuteLine("2]")
Assert("[{'a': None}, 1, 2]" in response, response)
finally:
ipi.End()
##########################################################
# Support partial tuples
def test_partial_tuples():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("(2")
ipi.ExecutePartialLine(" ,")
ipi.ExecutePartialLine(" 3")
response = ipi.ExecuteLine(")")
Assert("(2, 3)" in response)
ipi.ExecutePartialLine("(")
response = ipi.ExecuteLine(")")
Assert("()" in response)
ipi.ExecutePartialLine("'abc %s %s %s %s %s' % (")
ipi.ExecutePartialLine(" 'def'")
ipi.ExecutePartialLine(" ,'qrt',")
ipi.ExecutePartialLine(" 'jkl'")
ipi.ExecutePartialLine(",'jkl'")
ipi.ExecutePartialLine("")
ipi.ExecutePartialLine(",")
ipi.ExecutePartialLine("")
ipi.ExecutePartialLine("")
ipi.ExecutePartialLine("'123'")
response = ipi.ExecuteLine(")")
Assert("'abc def qrt jkl jkl 123'" in response)
ipi.ExecutePartialLine("a = (")
ipi.ExecutePartialLine(" 1")
ipi.ExecutePartialLine(" , ")
ipi.ExecuteLine(")")
response = ipi.ExecuteLine("a")
Assert("(1,)" in response)
ipi.ExecutePartialLine("(")
ipi.ExecutePartialLine("'joe'")
ipi.ExecutePartialLine(" ")
ipi.ExecutePartialLine(" #")
ipi.ExecutePartialLine(",")
ipi.ExecutePartialLine("2")
response = ipi.ExecuteLine(")")
Assert("('joe', 2)" in response)
ipi.ExecutePartialLine("(")
ipi.ExecutePartialLine("")
ipi.ExecutePartialLine("")
response = ipi.ExecuteLine(")")
Assert("()" in response)
ipi.End()
##########################################################
# Support partial dicts
def test_partial_dicts():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("{2:2")
ipi.ExecutePartialLine(" ,")
ipi.ExecutePartialLine(" 2:2")
response = ipi.ExecuteLine("}")
Assert("{2: 2}" in response)
ipi.ExecutePartialLine("{")
response = ipi.ExecuteLine("}")
Assert("{}" in response)
ipi.ExecutePartialLine("a = {")
ipi.ExecutePartialLine(" None:2")
ipi.ExecutePartialLine(" , ")
ipi.ExecuteLine("}")
response = ipi.ExecuteLine("a")
Assert("{None: 2}" in response)
ipi.ExecutePartialLine("{")
ipi.ExecutePartialLine("'joe'")
ipi.ExecutePartialLine(": ")
ipi.ExecutePartialLine(" 42")
ipi.ExecutePartialLine(",")
ipi.ExecutePartialLine("3:45")
response = ipi.ExecuteLine("}")
Assert(repr({'joe':42, 3:45}) in response)
ipi.ExecutePartialLine("{")
ipi.ExecutePartialLine("")
ipi.ExecutePartialLine("")
response = ipi.ExecuteLine("}")
Assert("{}" in response)
ipi.End()
###########################################################
# Some whitespace wackiness
def test_whitespace():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecuteLine(" ")
response = ipi.ExecuteLine("")
ipi.End()
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecuteLine(" ")
response = ipi.ExecuteLine("2")
Assert("2" in response)
ipi.End()
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecuteLine(" ")
response = ipi.ExecuteLine(" 2", True)
Assert("SyntaxError:" in response)
ipi.End()
###########################################################
# test the indentation error in the interactive mode
def test_indentation_interactive():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("class C:pass")
response = ipi.ExecuteLine("")
AreEqual(response, "")
ipi.ExecutePartialLine("class D(C):")
response = ipi.ExecuteLine("", True)
Assert("IndentationError:" in response)
ipi.End()
###########################################################
# test /mta w/ no other args
def test_mta():
ipi = IronPythonInstance(executable, exec_prefix, '-X:MTA')
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("class C:pass")
response = ipi.ExecuteLine("")
AreEqual(response, "")
ipi.ExecutePartialLine("class D(C):")
response = ipi.ExecuteLine("", True)
Assert("IndentationError:" in response)
ipi.End()
###########################################################
# test for comments in interactive input
def test_comments():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
response = ipi.ExecuteLine("# this is some comment line")
AreEqual(response, "")
response = ipi.ExecuteLine(" # this is some comment line")
AreEqual(response, "")
response = ipi.ExecuteLine("# this is some more comment line")
AreEqual(response, "")
ipi.ExecutePartialLine("if 100:")
ipi.ExecutePartialLine(" print 100")
ipi.ExecutePartialLine("# this is some more comment line inside if")
ipi.ExecutePartialLine("# this is some indented comment line inside if")
ipi.ExecutePartialLine(" print 200")
response = ipi.ExecuteLine("")
AreEqual(response, "100" + newline + "200")
ipi.End()
def test_global_values():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecuteLine("import clr")
response = ipi.ExecuteLine("[x for x in globals().values()]")
Assert(response.startswith('['))
d = eval(ipi.ExecuteLine("globals().fromkeys(['a', 'b'], 'c')"))
AreEqual(d, {'a':'c', 'b':'c'})
def test_globals8961():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
response = ipi.ExecuteLine("print globals().keys()")
res = set(eval(response))
AreEqual(res, set(['__builtins__', '__name__', '__doc__']))
ipi.ExecuteLine("a = None")
response = ipi.ExecuteLine("print globals().keys()")
res = set(eval(response))
AreEqual(res, set(['__builtins__', '__name__', '__doc__', 'a']))
response = ipi.ExecuteLine("print globals().values()")
l = eval(response.replace("<module '__builtin__' (built-in)>", '"builtin"'))
res = set(l)
AreEqual(len(l), 4)
AreEqual(res, set(['builtin', '__main__', None]))
ipi.ExecuteLine("b = None")
response = ipi.ExecuteLine("print globals().values()")
l = eval(response.replace("<module '__builtin__' (built-in)>", '"builtin"'))
res = set(l)
AreEqual(len(l), 5)
AreEqual(res, set(['builtin', '__main__', None]))
def test_console_input_output():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
input_output = [
("x=100",""),
("x=200\n",""),
("\nx=300",""),
("\nx=400\n",""),
("500","500"),
("600\n\n\n\n\n\n\n\n\n\n\n","600"),
("valid=3;more_valid=4;valid","3"),
("valid=5;more_valid=6;more_valid\n\n\n\n\n","6"),
("valid=7;more_valid=8;#valid",""),
("valid=9;valid;# more_valid\n","9"),
("valid=11;more_valid=12;more_valid# should be valid input\n\n\n\n","12"),
]
for x in input_output:
AreEqual(ipi.Start(), True)
AreEqual(ipi.ExecuteLine(x[0]),x[1])
ipi.End()
# expect a clean exception message/stack from thread
def test_thrown_from_thread():
inputScript = path_combine(testpath.temporary_dir, "throwingfromthread.py")
write_to_file(inputScript, '''
def f(): raise AssertionError, 'hello'
import thread, time
thread.start_new_thread(f, tuple())
time.sleep(2)
''')
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " " + inputScript)
(result, output, output2, exitCode) = ipi.StartAndRunToCompletion()
AreEqual(exitCode, 0)
Assert("AssertionError: hello" in output2)
Assert("IronPython." not in output2) # '.' is necessary here
ipi.End()
def test_aform_feeds():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
response = ipi.ExecuteLine("\fprint 'hello'")
AreEqual(response, "hello")
response = ipi.ExecuteLine(" \fprint 'hello'")
AreEqual(response, "hello")
ipi.ExecutePartialLine("def f():")
ipi.ExecutePartialLine("\f print 'hello'")
ipi.ExecuteLine('')
response = ipi.ExecuteLine('f()')
AreEqual(response, "hello")
# \f resets indent to 0
ipi.ExecutePartialLine("def f():")
ipi.ExecutePartialLine(" \f x = 'hello'")
ipi.ExecutePartialLine("\f print x")
ipi.ExecuteLine('')
response = ipi.ExecuteLine('f()')
AreEqual(response, "hello")
# \f resets indent to 0
ipi.ExecutePartialLine("def f():")
ipi.ExecutePartialLine(" \f x = 'hello'")
ipi.ExecutePartialLine(" print x")
ipi.ExecuteLine('')
response = ipi.ExecuteLine('f()')
AreEqual(response, "hello")
def test_ipy_dash_S():
"""ipy -S should still install Lib into sys.path"""
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " -S")
AreEqual(ipi.Start(), True)
response = ipi.ExecuteLine("import sys")
response = ipi.ExecuteLine("print sys.path")
Assert(response.find('Lib') != -1)
def test_startup_dir():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
response = ipi.ExecuteLine("print dir()")
AreEqual(sorted(eval(response)), sorted(['__builtins__', '__doc__', '__name__']))
def test_ipy_dash_m():
import sys
for path in sys.path:
if path.find('Lib') != -1:
filename = System.IO.Path.Combine(path, 'somemodule.py')
break
try:
f = file(filename, 'w')
f.write('print "hello"\n')
f.write('import sys\n')
f.write('print sys.argv')
f.close()
# need to run these tests where we have access to runpy.py
path = System.IO.FileInfo(__file__).DirectoryName
# simple case works
ipi = IronPythonInstance(executable, path, extraArgs + " -m somemodule")
res, output, err, exit = ipi.StartAndRunToCompletion()
AreEqual(res, True) # run should have worked
AreEqual(exit, 0) # should have returned 0
output = output.replace('\r\n', '\n')
lines = output.split('\n')
AreEqual(lines[0], 'hello')
Assert(samefile(eval(lines[1])[0],
filename))
# we receive any arguments in sys.argv
ipi = IronPythonInstance(executable, path, extraArgs + " -m somemodule foo bar")
res, output, err, exit = ipi.StartAndRunToCompletion()
AreEqual(res, True) # run should have worked
AreEqual(exit, 0) # should have returned 0
output = output.replace('\r\n', '\n')
lines = output.split('\n')
AreEqual(lines[0], 'hello')
AreEqual(eval(lines[1]), [filename, 'foo', 'bar'])
f = file(filename, 'w')
f.write('print "hello"\n')
f.write('import sys\n')
f.write('sys.exit(1)')
f.close()
# sys.exit works
ipi = IronPythonInstance(executable, path, extraArgs + " -m somemodule")
res, output, err, exit = ipi.StartAndRunToCompletion()
AreEqual(res, True) # run should have worked
AreEqual(exit, 1) # should have returned 0
output = output.replace('\r\n', '\n')
lines = output.split('\n')
AreEqual(lines[0], 'hello')
finally:
nt.unlink(filename)
@disabled("CodePlex Work Item 10925")
def test_ipy_dash_m_negative():
# builtin modules should not work
for modname in [ "sys", "datetime" ]:
ipi = IronPythonInstance(executable, exec_prefix,
extraArgs + " -m " + modname)
res, output, err, exit = ipi.StartAndRunToCompletion()
AreEqual(exit, -1)
# Modules within packages should not work
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " -m testpkg1.mod1")
res, output, err, exit = ipi.StartAndRunToCompletion()
AreEqual(res, True) # run should have worked
AreEqual(exit, 1) # should have returned 0
Assert("SyntaxError: invalid syntax" in err,
"stderr is:" + str(err))
def test_ipy_dash_m_pkgs():
# Python packages work
import nt
Assert("testpkg1" in [x.lower() for x in nt.listdir(nt.getcwd())], nt.getcwd())
old_ipy_path = get_environ_variable("IRONPYTHONPATH")
try:
nt.environ["IRONPYTHONPATH"] = nt.getcwd()
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " -m testpkg1")
res, output, err, exit = ipi.StartAndRunToCompletion()
AreEqual(res, True) # run should have worked
AreEqual(exit, 0) # should have returned 0
AreEqual(output, "")
# Bad module names should not work
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " -m libxyz")
res, output, err, exit = ipi.StartAndRunToCompletion()
AreEqual(res, True) # run should have worked
AreEqual(exit, 1) # should have returned 0
Assert("ImportError: No module named libxyz" in err,
"stderr is:" + str(err))
finally:
nt.environ["IRONPYTHONPATH"] = old_ipy_path
def test_ipy_dash_c():
"""verify ipy -c cmd doesn't print expression statements"""
ipi = IronPythonInstance(executable, exec_prefix, "-c True;False")
res = ipi.StartAndRunToCompletion()
AreEqual(res[0], True) # should have started
AreEqual(res[1], '') # no std out
AreEqual(res[2], '') # no std err
AreEqual(res[3], 0) # should return 0
#############################################################################
# CP11924 - verify 'from __future__ import division' works
def test_future_division():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecuteLine("from __future__ import division")
response = ipi.ExecuteLine("11/4")
AreEqual(response, "2.75")
ipi.End()
#############################################################################
# CP2206
def test_future_with():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("class K(object):")
ipi.ExecutePartialLine(" def __enter__(self): return 3.14")
ipi.ExecutePartialLine(" def __exit__(self, type, value, tb): return False")
ipi.ExecuteLine("")
ipi.ExecutePartialLine("with K() as d:")
ipi.ExecutePartialLine(" print d")
response = ipi.ExecuteLine("")
AreEqual(response, "3.14")
ipi.End()
#############################################################################
# Merlin 148481
def test_ipy_dash():
#Verify that typing a - in the arguments starts an interactive session
ipi = IronPythonInstance(executable, exec_prefix, "-")
AreEqual(ipi.Start(), True)
response = ipi.ExecuteLine("42")
AreEqual(response, "42")
ipi.End()
#############################################################################
def test_mta():
ipi = IronPythonInstance(executable, exec_prefix, '-X:MTA')
AreEqual(ipi.Start(), True)
ipi.ExecuteLine("import System")
response = ipi.ExecuteLine("str(System.Threading.Thread.CurrentThread.ApartmentState)")
AreEqual(response, "'MTA'")
ipi.ExecutePartialLine("class C:pass")
response = ipi.ExecuteLine("")
AreEqual(response, "")
response = ipi.ExecuteLine("str(System.Threading.Thread.CurrentThread.ApartmentState)")
AreEqual(response, "'MTA'")
ipi.End()
def test_displayhook():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
# parameterless exception
ipi.ExecuteLine("import sys")
ipi.ExecutePartialLine("def f(x): print 'foo', x")
ipi.ExecuteLine("")
response = ipi.ExecuteLine("sys.displayhook = f")
response = ipi.ExecuteLine("42")
AreEqual(response, "foo 42")
def test_excepthook():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
# parameterless exception
ipi.ExecuteLine("import sys")
ipi.ExecutePartialLine("def f(*args): print 'foo', args")
ipi.ExecuteLine("")
response = ipi.ExecuteLine("sys.excepthook = f")
response = ipi.ExecuteLine("raise Exception", True)
AssertContains(response, "foo (<type 'exceptions.Exception'>, Exception(), <traceback object at")
def test_last_exception():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
# parameterless exception
ipi.ExecuteLine("import sys")
response = ipi.ExecuteLine("hasattr(sys, 'last_value')")
AreEqual(response, 'False')
AssertContains(ipi.ExecuteLine("x", True), "NameError")
response = ipi.ExecuteLine("sys.last_value")
AreEqual(response, "NameError(\"name 'x' is not defined\",)")
response = ipi.ExecuteLine("sys.last_type")
AreEqual(response, "<type 'exceptions.NameError'>")
response = ipi.ExecuteLine("sys.last_traceback")
AssertContains(response, "<traceback object at ")
def test_sta_sleep_Warning():
ipi = IronPythonInstance(executable, exec_prefix, '-c "from System.Threading import Thread;Thread.Sleep(100)"')
retval, stdouttext, stderrtext, exitcode = ipi.StartAndRunToCompletion()
Assert(stderrtext.endswith("RuntimeWarning: Calling Thread.Sleep on an STA thread doesn't pump messages. Use Thread.CurrentThread.Join instead.\r\n"))
def test_newline():
ipi = IronPythonInstance(executable, exec_prefix, "")
ipi.proc.Start()
ipi.reader = ipi.proc.StandardOutput
output = ipi.EatToPrompt()
Assert('\r\r\n' not in output)
Assert('\r\n' in output)
#############################################################################
# Remote console tests
from System.Diagnostics import Process
def get_process_ids(ipi):
ipi.EnsureInteractiveRemote()
ipi.proc.Refresh()
consoleProcessId = ipi.proc.Id
ipi.ExecuteLine("import System")
remoteRuntimeProcessId = ipi.ExecuteLineRemote("System.Diagnostics.Process.GetCurrentProcess().Id")
Assert(remoteRuntimeProcessId.isdigit(), "remoteRuntimeProcessId is '%s'" % remoteRuntimeProcessId)
return consoleProcessId, int(remoteRuntimeProcessId)
def start_remote_console(args = ""):
inputScript = testpath.test_inputs_dir + "\\RemoteConsole.py"
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " \"" + inputScript + "\" -X:ExceptionDetail " + args)
AreEqual(ipi.Start(), True)
return ipi
# Basic check that the remote console actually uses two processes
def test_remote_console_processes():
# First check that a simple local console uses a single process
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
consoleProcessId, remoteRuntimeProcessId = get_process_ids(ipi)
AreEqual(consoleProcessId, remoteRuntimeProcessId)
ipi.End()
# Now use the remote console
ipi = start_remote_console()
consoleProcessId, remoteRuntimeProcessId = get_process_ids(ipi)
AreNotEqual(consoleProcessId, remoteRuntimeProcessId)
ipi.End()
# The remote runtime should terminate when the console terminates
def test_remote_runtime_normal_exit():
ipi = start_remote_console()
consoleProcessId, remoteRuntimeProcessId = get_process_ids(ipi)
runtimeProcess = Process.GetProcessById(remoteRuntimeProcessId)
Assert(not runtimeProcess.HasExited)
ipi.End()
runtimeProcess.WaitForExit() # The test is that this wait succeeds
# Stress the input-output streams
def test_remote_io():
ipi = start_remote_console()
for i in range(100):
AreEqual(ipi.ExecuteLineRemote("2+2"), "4")
ipi.End()
# Kill the remote runtime and ensure that another process starts up again
def test_remote_server_restart():
ipi = start_remote_console()
consoleProcessId, remoteRuntimeProcessId = get_process_ids(ipi)
runtimeProcess = Process.GetProcessById(remoteRuntimeProcessId)
AreNotEqual(runtimeProcess, consoleProcessId)
runtimeProcess.Kill()
runtimeProcess.WaitForExit()
# The Process.Exited event is fired asynchronously, and might take sometime to fire.
# Hence, we need to block for a known marker
ipi.EatToMarker("Remote runtime terminated")
# We need to press Enter to nudge the old console out of the ReadLine...
restartMessage = ipi.ExecuteLine("", True)
ipi.ReadError()
consoleProcessId2, remoteRuntimeProcessId2 = get_process_ids(ipi)
AreEqual(consoleProcessId, consoleProcessId2)
# This is technically not a 100% correct as there is a small chance the the process id might get reused
AreNotEqual(remoteRuntimeProcessId, remoteRuntimeProcessId2)
ipi.End()
# Check that an exception can be remoted back over the reverse channel
# Note that exceptions are not written to stdout by the remote process
def test_remote_console_exception():
ipi = start_remote_console()
zeroDivisionErrorOutput = ipi.ExecuteLine("1/0", True)
AssertContains(zeroDivisionErrorOutput, "ZeroDivisionError")
ipi.End()
def test_remote_startup_script():
ipi = start_remote_console("-i " + testpath.test_inputs_dir + "\\simpleCommand.py")
AreEqual(ipi.ExecuteLine("x"), "1")
ipi.End()
def get_abort_command_output():
ipi = start_remote_console()
ipi.ExecuteLine("import System")
ipi.ExecutePartialLine ("def Hang():")
ipi.ExecutePartialLine (" print 'ABORT ME!!!' # This string token should trigger an abort...")
ipi.ExecutePartialLine (" infinite = System.Threading.Timeout.Infinite")
ipi.ExecutePartialLine (" System.Threading.Thread.CurrentThread.Join(infinite)")
ipi.ExecuteLine ("")
result = ipi.ExecuteLine("Hang()", True)
ipi.End()
return result
def test_remote_abort_command():
for i in range(10):
output = get_abort_command_output()
if "KeyboardInterrupt" in output:
AssertDoesNotContain(output, "Thread was being aborted.") # ThreadAbortException
return
else:
# Rarely, under stress conditions, ThreadAbortException leaks through.
# Keep retrying until we actually get KeyboardInterrupt
AssertContains(output, "Thread was being aborted.") # ThreadAbortException
continue
Assert(False, "KeyboardInterrupt not thrown. Only KeyboardInterrupt was thrown")
def test_exception_slicing_warning():
ipi = IronPythonInstance(executable, exec_prefix, '-c "print Exception(*range(2))[1]"')
res = ipi.StartAndRunToCompletion()
AreEqual(res[0], True) # should have started
AreEqual(res[1], '1\r\n') # some std out
AreEqual(res[2], '') # no std err
AreEqual(res[3], 0) # should return 0
ipi = IronPythonInstance(executable, exec_prefix,
'-3 -c "import warnings;'
'warnings.filters.reverse();'
'warnings.filters.pop();'
'print Exception(*range(2))[1]"')
res = ipi.StartAndRunToCompletion()
AreEqual(res[0], True) # should have started
AreEqual(res[1], '1\r\n') # std out
Assert(res[2].endswith('DeprecationWarning: __getitem__ not supported for exception classes in 3.x; use args attribute\r\n')) #std err
AreEqual(res[3], 0) # should return 0
#------------------------------------------------------------------------------
run_test(__name__)
|
0bb3df189458c2b54010916702636e962aabd3a6
|
b7163b44b679e082fe97cf7fcd0c73b2fcdb38eb
|
/orchestration/dbnd-spark/benchmark/process.py
|
b2364b0413c33583b7458d4ae541ed8788cc6578
|
[
"Apache-2.0"
] |
permissive
|
databand-ai/dbnd
|
70c95d95e12bfb8ab471a6dce27691ed658cb92d
|
d59c99dcdcd280d7eec36a693dd80f8c8c831ea2
|
refs/heads/develop
| 2023-06-24T18:07:56.524526
| 2023-05-28T07:57:36
| 2023-05-28T07:57:36
| 231,361,064
| 257
| 33
|
Apache-2.0
| 2023-08-06T08:30:28
| 2020-01-02T10:42:47
|
Python
|
UTF-8
|
Python
| false
| false
| 2,827
|
py
|
process.py
|
# © Copyright Databand.ai, an IBM Company 2022
import random
import time
from typing import Tuple
from pyspark.sql import DataFrame, SparkSession
from dbnd import log_dataframe, log_metric, task
@task
def unit_imputations(raw_data: DataFrame, value: int) -> DataFrame:
counter = int(raw_data.describe().first().phone)
noise = random.randint(-counter, counter)
log_metric("Replaced NaNs", counter + noise)
return raw_data.na.fill(value)
@task
def dedup_records(
data: DataFrame,
key_columns: list,
to_pandas: bool,
with_histograms: bool,
sampling_type: str,
sampling_fraction: float,
) -> Tuple[DataFrame, tuple]:
data = data.dropDuplicates(key_columns)
if sampling_type is not None:
if sampling_type == "random":
data = data.sample(False, sampling_fraction)
if sampling_type == "first":
data = data.limit(int(data.count() * sampling_fraction))
inputs_shape = (data.count(), len(data.columns))
if to_pandas:
log_dataframe("data", data.toPandas(), with_histograms=with_histograms)
else:
log_dataframe("data", data, with_histograms=with_histograms)
return data, inputs_shape
@task
def create_report(data: DataFrame) -> DataFrame:
log_metric("Column Count", len(data.columns))
log_metric(
"Avg Score",
int(
data.agg({"score": "sum"}).collect()[0][0]
+ random.randint(-2 * len(data.columns), 2 * len(data.columns))
),
)
return data
@task
def augment_data(data: DataFrame, multiplicator: int) -> DataFrame:
for i in range(multiplicator - 1):
for column in data.columns:
data = data.withColumn(f"{column}_{i}", data[column])
return data
@task
def process_customer_data(
app_name: str,
input_file: str,
output_file: str,
to_pandas: bool,
with_histograms: bool,
sampling_type: str,
sampling_fraction: float,
columns_number_multiplicator: int,
) -> Tuple[str, tuple]:
key_columns = ["name"]
spark = (
SparkSession.builder.appName(app_name)
.master("yarn")
.config("spark.submit.deployMode", "client")
.config("spark.driver.memory", "10g")
.getOrCreate()
)
app_id = spark._jsc.sc().applicationId()
data = spark.read.csv(input_file, inferSchema=True, header=True, sep=",")
data = augment_data(data, columns_number_multiplicator)
imputed = unit_imputations(data, value=0)
clean, inputs_shape = dedup_records(
imputed,
key_columns,
to_pandas,
with_histograms,
sampling_type,
sampling_fraction,
)
report = create_report(clean)
report.write.csv(f"{output_file}/{str(round(time.time()))}")
spark.stop()
return app_id, inputs_shape
|
9b6b7f43a9a8c489c22d854f64183248a2da6f8f
|
974d04d2ea27b1bba1c01015a98112d2afb78fe5
|
/test/legacy_test/test_nms_op.py
|
7c1e4dbcedcf12c31edb1ed065754ad4263b24a4
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/Paddle
|
b3d2583119082c8e4b74331dacc4d39ed4d7cff0
|
22a11a60e0e3d10a3cf610077a3d9942a6f964cb
|
refs/heads/develop
| 2023-08-17T21:27:30.568889
| 2023-08-17T12:38:22
| 2023-08-17T12:38:22
| 65,711,522
| 20,414
| 5,891
|
Apache-2.0
| 2023-09-14T19:20:51
| 2016-08-15T06:59:08
|
C++
|
UTF-8
|
Python
| false
| false
| 2,930
|
py
|
test_nms_op.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from eager_op_test import OpTest
import paddle
def iou(box_a, box_b):
"""Apply intersection-over-union overlap between box_a and box_b"""
xmin_a = min(box_a[0], box_a[2])
ymin_a = min(box_a[1], box_a[3])
xmax_a = max(box_a[0], box_a[2])
ymax_a = max(box_a[1], box_a[3])
xmin_b = min(box_b[0], box_b[2])
ymin_b = min(box_b[1], box_b[3])
xmax_b = max(box_b[0], box_b[2])
ymax_b = max(box_b[1], box_b[3])
area_a = (ymax_a - ymin_a) * (xmax_a - xmin_a)
area_b = (ymax_b - ymin_b) * (xmax_b - xmin_b)
if area_a <= 0 and area_b <= 0:
return 0.0
xa = max(xmin_a, xmin_b)
ya = max(ymin_a, ymin_b)
xb = min(xmax_a, xmax_b)
yb = min(ymax_a, ymax_b)
inter_area = max(xb - xa, 0.0) * max(yb - ya, 0.0)
iou_ratio = inter_area / (area_a + area_b - inter_area)
return iou_ratio
def nms(boxes, nms_threshold):
selected_indices = np.zeros(boxes.shape[0], dtype=np.int64)
keep = np.ones(boxes.shape[0], dtype=int)
io_ratio = np.ones((boxes.shape[0], boxes.shape[0]), dtype=np.float64)
cnt = 0
for i in range(boxes.shape[0]):
if keep[i] == 0:
continue
selected_indices[cnt] = i
cnt += 1
for j in range(i + 1, boxes.shape[0]):
io_ratio[i][j] = iou(boxes[i], boxes[j])
if keep[j]:
overlap = iou(boxes[i], boxes[j])
keep[j] = 1 if overlap <= nms_threshold else 0
else:
continue
return selected_indices[:cnt]
class TestNMSOp(OpTest):
def setUp(self):
self.op_type = 'nms'
self.python_api = paddle.vision.ops.nms
self.dtype = np.float64
self.init_dtype_type()
boxes = np.random.rand(32, 4).astype(self.dtype)
boxes[:, 2] = boxes[:, 0] + boxes[:, 2]
boxes[:, 3] = boxes[:, 1] + boxes[:, 3]
paddle.disable_static()
self.inputs = {'Boxes': boxes}
self.attrs = {'iou_threshold': 0.5}
out_py = nms(boxes, self.attrs['iou_threshold'])
self.outputs = {'KeepBoxesIdxs': out_py}
paddle.enable_static()
def init_dtype_type(self):
pass
def test_check_output(self):
self.check_output()
if __name__ == "__main__":
unittest.main()
|
d3b25de64807268ba580d9b8ed86e73ecb705e65
|
3779caa500c53f0ee12baa039cda4cde1e7391b9
|
/Power of Two.py
|
6df7e1ebc01ae42e8ff0393d5a5143ec5abf87aa
|
[] |
no_license
|
kongzhidea/leetcode
|
924d66bcbc8b7b0f793399184870d4f8da309dba
|
57fc31718ee9cd6a4282c752382e538e42ff02ce
|
refs/heads/master
| 2021-10-29T07:07:41.603363
| 2021-10-27T13:49:29
| 2021-10-27T13:49:29
| 63,241,496
| 125
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 212
|
py
|
Power of Two.py
|
class Solution(object):
def isPowerOfTwo(self, n):
if n <= 0 :
return False
count = 0
while n > 0:
count += n&1
n = n >> 1
return count == 1
|
d3f0fbf874e7007e5656bc01c69cffcc110a1cf5
|
5a08383f434b56b65ff327e50a7cb8e94a43eeaf
|
/tests/test___main__.py
|
e9b49172cef454825433bf55d6fc697d80a2cf14
|
[
"Apache-2.0"
] |
permissive
|
python/bedevere
|
698e64c8424b658b21974ee4dd2d7e08d499a22f
|
34b378d02ed7e961c6230e35a01a65637ab8a3f7
|
refs/heads/main
| 2023-08-28T20:43:35.463799
| 2023-08-18T22:52:12
| 2023-08-18T22:52:12
| 87,574,304
| 121
| 69
|
Apache-2.0
| 2023-09-11T12:09:51
| 2017-04-07T18:15:00
|
Python
|
UTF-8
|
Python
| false
| false
| 1,240
|
py
|
test___main__.py
|
from aiohttp import web
import pytest
from bedevere import __main__ as main
async def test_ping(aiohttp_client):
app = web.Application()
app.router.add_post("/", main.main)
client = await aiohttp_client(app)
headers = {"x-github-event": "ping",
"x-github-delivery": "1234"}
data = {"zen": "testing is good"}
response = await client.post("/", headers=headers, json=data)
assert response.status == 200
async def test_success(aiohttp_client):
app = web.Application()
app.router.add_post("/", main.main)
client = await aiohttp_client(app)
headers = {"x-github-event": "project",
"x-github-delivery": "1234"}
# Sending a payload that shouldn't trigger any networking, but no errors
# either.
data = {"action": "created"}
response = await client.post("/", headers=headers, json=data)
assert response.status == 200
async def test_failure(aiohttp_client):
"""Even in the face of an exception, the server should not crash."""
app = web.Application()
app.router.add_post("/", main.main)
client = await aiohttp_client(app)
# Missing key headers.
response = await client.post("/", headers={})
assert response.status == 500
|
af8afa1fa475becb603d31a1ab78954f8b8bb94a
|
7f24023d365e013ec0924844c1a872edfb0c75b4
|
/pyxb/bundles/opengis/sos_1_0.py
|
617d9febc73e432977a4c44e6cbc3e9ed65dea02
|
[
"Python-2.0",
"MIT",
"Apache-2.0"
] |
permissive
|
pabigot/pyxb
|
cd42c024607572c6363682d389e9296caf3f2857
|
5ee5ba54c9f702dc9c9efc2731ee547ecd4dae4a
|
refs/heads/next
| 2023-05-11T03:23:19.599756
| 2023-04-29T20:38:15
| 2023-04-29T20:45:13
| 20,547,850
| 130
| 63
|
Apache-2.0
| 2021-08-19T16:52:18
| 2014-06-06T01:49:03
|
Python
|
UTF-8
|
Python
| false
| false
| 47
|
py
|
sos_1_0.py
|
from pyxb.bundles.opengis.raw.sos_1_0 import *
|
82bb17920c8a8cec14926d2e3a369b7a48c9e3b9
|
3bc139860403ebd05e278c95fca26e24d5189271
|
/tests/core/daemon/test_daemon.py
|
a366a6249765316d0511976eeb05811c5ba0ce3e
|
[
"Apache-2.0"
] |
permissive
|
Chia-Network/chia-blockchain
|
a09183b7240b159419b45f8373a41a1062f77ef3
|
d966f3f9e63aed52dbd73544164202a9f11ce3d2
|
refs/heads/main
| 2023-08-31T09:37:13.741283
| 2023-08-30T18:27:22
| 2023-08-30T18:27:22
| 197,153,676
| 12,936
| 2,474
|
Apache-2.0
| 2023-09-14T19:08:51
| 2019-07-16T08:32:40
|
Python
|
UTF-8
|
Python
| false
| false
| 62,234
|
py
|
test_daemon.py
|
from __future__ import annotations
import asyncio
import json
import logging
from dataclasses import dataclass, field, replace
from typing import Any, Dict, List, Optional, Tuple, Type, Union, cast
import aiohttp
import pkg_resources
import pytest
from aiohttp.web_ws import WebSocketResponse
from chia.daemon.client import connect_to_daemon
from chia.daemon.keychain_server import (
DeleteLabelRequest,
GetKeyRequest,
GetKeyResponse,
GetKeysResponse,
GetPublicKeyResponse,
GetPublicKeysResponse,
SetLabelRequest,
)
from chia.daemon.server import WebSocketServer, plotter_log_path, service_plotter
from chia.server.outbound_message import NodeType
from chia.simulator.block_tools import BlockTools
from chia.simulator.keyring import TempKeyring
from chia.simulator.time_out_assert import time_out_assert, time_out_assert_custom_interval
from chia.types.peer_info import PeerInfo
from chia.util.config import load_config
from chia.util.ints import uint16
from chia.util.json_util import dict_to_json_str
from chia.util.keychain import Keychain, KeyData, supports_os_passphrase_storage
from chia.util.keyring_wrapper import DEFAULT_PASSPHRASE_IF_NO_MASTER_PASSPHRASE, KeyringWrapper
from chia.util.ws_message import create_payload, create_payload_dict
from chia.wallet.derive_keys import master_sk_to_farmer_sk, master_sk_to_pool_sk
from tests.core.node_height import node_height_at_least
from tests.util.misc import Marks, datacases
chiapos_version = pkg_resources.get_distribution("chiapos").version
@dataclass
class RouteCase:
route: str
description: str
request: Dict[str, Any]
response: Dict[str, Any]
marks: Marks = ()
@property
def id(self) -> str:
return f"{self.route}: {self.description}"
@dataclass
class WalletAddressCase:
id: str
request: Dict[str, Any]
response: Dict[str, Any]
pubkeys_only: bool = field(default=False)
marks: Marks = ()
@dataclass
class KeysForPlotCase:
id: str
request: Dict[str, Any]
response: Dict[str, Any]
marks: Marks = ()
# Simple class that responds to a poll() call used by WebSocketServer.is_running()
@dataclass
class Service:
running: bool
def poll(self) -> Optional[int]:
return None if self.running else 1
# Mock daemon server that forwards to WebSocketServer
@dataclass
class Daemon:
# Instance variables used by WebSocketServer.is_running()
services: Dict[str, Union[List[Service], Service]]
connections: Dict[str, Optional[List[Any]]]
# Instance variables used by WebSocketServer.get_wallet_addresses()
net_config: Dict[str, Any] = field(default_factory=dict)
def get_command_mapping(self) -> Dict[str, Any]:
return {
"get_routes": None,
"example_one": None,
"example_two": None,
"example_three": None,
}
def is_service_running(self, service_name: str) -> bool:
return WebSocketServer.is_service_running(cast(WebSocketServer, self), service_name)
async def running_services(self) -> Dict[str, Any]:
return await WebSocketServer.running_services(cast(WebSocketServer, self))
async def is_running(self, request: Dict[str, Any]) -> Dict[str, Any]:
return await WebSocketServer.is_running(cast(WebSocketServer, self), request)
async def get_routes(self, request: Dict[str, Any]) -> Dict[str, Any]:
return await WebSocketServer.get_routes(
cast(WebSocketServer, self), websocket=WebSocketResponse(), request=request
)
async def get_wallet_addresses(self, request: Dict[str, Any]) -> Dict[str, Any]:
return await WebSocketServer.get_wallet_addresses(
cast(WebSocketServer, self), websocket=WebSocketResponse(), request=request
)
async def get_keys_for_plotting(self, request: Dict[str, Any]) -> Dict[str, Any]:
return await WebSocketServer.get_keys_for_plotting(
cast(WebSocketServer, self), websocket=WebSocketResponse(), request=request
)
test_key_data = KeyData.from_mnemonic(
"grief lock ketchup video day owner torch young work "
"another venue evidence spread season bright private "
"tomato remind jaguar original blur embody project can"
)
test_key_data_no_secrets = replace(test_key_data, secrets=None)
test_key_data_2 = KeyData.from_mnemonic(
"banana boat fragile ghost fortune beyond aerobic access "
"hammer stable page grunt venture purse canyon discover "
"egg vivid spare immune awake code announce message"
)
success_response_data = {
"success": True,
}
plotter_request_ref = {
"service": "chia_plotter",
"plotter": "chiapos",
"k": 25,
"r": 2,
"u": 128,
"e": True,
"parallel": False,
"n": 1,
"queue": "default",
"d": "unknown",
"t": "unknown",
"t2": "",
"f": "",
"plotNFTContractAddr": "",
"x": True,
"b": 512,
"overrideK": True,
"delay": 0,
"a": 3598820529,
"c": "xxx",
}
def add_private_key_response_data(fingerprint: int) -> Dict[str, object]:
return {
"success": True,
"fingerprint": fingerprint,
}
def fingerprint_missing_response_data(request_type: Type[object]) -> Dict[str, object]:
return {
"success": False,
"error": "malformed request",
"error_details": {"message": f"1 field missing for {request_type.__name__}: fingerprint"},
}
def fingerprint_not_found_response_data(fingerprint: int) -> Dict[str, object]:
return {
"success": False,
"error": "key not found",
"error_details": {
"fingerprint": fingerprint,
},
}
def get_key_response_data(key: KeyData) -> Dict[str, object]:
return {"success": True, **GetKeyResponse(key=key).to_json_dict()}
def get_keys_response_data(keys: List[KeyData]) -> Dict[str, object]:
return {"success": True, **GetKeysResponse(keys=keys).to_json_dict()}
def get_public_key_response_data(key: KeyData) -> Dict[str, object]:
return {"success": True, **GetPublicKeyResponse(key=key).to_json_dict()}
def get_public_keys_response_data(keys: List[KeyData]) -> Dict[str, object]:
return {"success": True, **GetPublicKeysResponse(keys=keys).to_json_dict()}
def label_missing_response_data(request_type: Type[Any]) -> Dict[str, Any]:
return {
"success": False,
"error": "malformed request",
"error_details": {"message": f"1 field missing for {request_type.__name__}: label"},
}
def label_exists_response_data(fingerprint: int, label: str) -> Dict[str, Any]:
return {
"success": False,
"error": "malformed request",
"error_details": {"message": f"label {label!r} already exists for fingerprint {str(fingerprint)!r}"},
}
label_empty_response_data = {
"success": False,
"error": "malformed request",
"error_details": {"message": "label can't be empty or whitespace only"},
}
label_too_long_response_data = {
"success": False,
"error": "malformed request",
"error_details": {"message": "label exceeds max length: 66/65"},
}
label_newline_or_tab_response_data = {
"success": False,
"error": "malformed request",
"error_details": {"message": "label can't contain newline or tab"},
}
def assert_response(
response: aiohttp.http_websocket.WSMessage, expected_response_data: Dict[str, Any], request_id: Optional[str] = None
) -> None:
# Expect: JSON response
assert response.type == aiohttp.WSMsgType.TEXT
message = json.loads(response.data.strip())
# Expect: daemon handled the request
assert message["ack"] is True
if request_id is not None:
assert message["request_id"] == request_id
# Expect: data matches the expected data
assert message["data"] == expected_response_data
def assert_response_success_only(response: aiohttp.http_websocket.WSMessage, request_id: Optional[str] = None) -> None:
# Expect: JSON response
assert response.type == aiohttp.WSMsgType.TEXT
message = json.loads(response.data.strip())
# Expect: {"success": True}
if request_id is not None:
assert message["request_id"] == request_id
assert message["data"]["success"] is True
def assert_running_services_response(response_dict: Dict[str, Any], expected_response_dict: Dict[str, Any]) -> None:
for k, v in expected_response_dict.items():
if k == "running_services":
# Order of services is not guaranteed
assert len(response_dict[k]) == len(v)
assert set(response_dict[k]) == set(v)
else:
assert response_dict[k] == v
@pytest.fixture(scope="session")
def mock_lonely_daemon():
# Mock daemon server without any registered services/connections
return Daemon(services={}, connections={}, net_config={})
@pytest.fixture(scope="session")
def mock_daemon_with_services():
# Mock daemon server with a couple running services, a plotter, and one stopped service
return Daemon(
services={
"my_refrigerator": [Service(True)],
"the_river": [Service(True)],
"your_nose": [Service(False)],
"chia_plotter": [Service(True), Service(True)],
},
connections={},
net_config={},
)
@pytest.fixture(scope="session")
def mock_daemon_with_services_and_connections():
# Mock daemon server with a couple running services, a plotter, and a couple active connections
return Daemon(
services={
"my_refrigerator": [Service(True)],
"chia_plotter": [Service(True), Service(True)],
"apple": [Service(True)],
},
connections={
"apple": [1],
"banana": [1, 2],
},
net_config={},
)
@pytest.fixture(scope="function")
def get_keychain_for_function():
with TempKeyring() as keychain:
yield keychain
KeyringWrapper.cleanup_shared_instance()
@pytest.fixture(scope="function")
def mock_daemon_with_config_and_keys(get_keychain_for_function, root_path_populated_with_config):
root_path = root_path_populated_with_config
config = load_config(root_path, "config.yaml")
keychain = Keychain()
# populate the keychain with some test keys
keychain.add_private_key(test_key_data.mnemonic_str())
keychain.add_private_key(test_key_data_2.mnemonic_str())
# Mock daemon server with net_config set for mainnet
return Daemon(services={}, connections={}, net_config=config)
@pytest.fixture(scope="function")
async def daemon_client_with_config_and_keys(get_keychain_for_function, get_daemon, bt):
keychain = Keychain()
# populate the keychain with some test keys
keychain.add_private_key(test_key_data.mnemonic_str())
keychain.add_private_key(test_key_data_2.mnemonic_str())
daemon = get_daemon
client = await connect_to_daemon(
daemon.self_hostname,
daemon.daemon_port,
50 * 1000 * 1000,
bt.get_daemon_ssl_context(),
heartbeat=daemon.heartbeat,
)
return client
@pytest.mark.asyncio
async def test_daemon_simulation(self_hostname, daemon_simulation):
deamon_and_nodes, get_b_tools, bt = daemon_simulation
node1, node2, _, _, _, _, _, _, _, _, daemon1 = deamon_and_nodes
server1 = node1.full_node.server
node2_port = node2.full_node.server.get_port()
await server1.start_client(PeerInfo(self_hostname, uint16(node2_port)))
async def num_connections():
count = len(node2.server.get_connections(NodeType.FULL_NODE))
return count
await time_out_assert_custom_interval(60, 1, num_connections, 1)
await time_out_assert(1500, node_height_at_least, True, node2, 1)
session = aiohttp.ClientSession()
log = logging.getLogger()
log.warning(f"Connecting to daemon on port {daemon1.daemon_port}")
ws = await session.ws_connect(
f"wss://127.0.0.1:{daemon1.daemon_port}",
autoclose=True,
autoping=True,
ssl_context=get_b_tools.get_daemon_ssl_context(),
max_msg_size=100 * 1024 * 1024,
)
service_name = "test_service_name"
data = {"service": service_name}
payload = create_payload("register_service", data, service_name, "daemon")
await ws.send_str(payload)
message_queue = asyncio.Queue()
async def reader(ws, queue):
while True:
# ClientWebSocketReponse::receive() internally handles PING, PONG, and CLOSE messages
msg = await ws.receive()
if msg.type == aiohttp.WSMsgType.TEXT:
message = msg.data.strip()
message = json.loads(message)
await queue.put(message)
else:
if msg.type == aiohttp.WSMsgType.ERROR:
await ws.close()
elif msg.type == aiohttp.WSMsgType.CLOSED:
pass
break
read_handler = asyncio.create_task(reader(ws, message_queue))
data = {}
payload = create_payload("get_blockchain_state", data, service_name, "chia_full_node")
await ws.send_str(payload)
await asyncio.sleep(5)
blockchain_state_found = False
while not message_queue.empty():
message = await message_queue.get()
if message["command"] == "get_blockchain_state":
blockchain_state_found = True
await ws.close()
read_handler.cancel()
assert blockchain_state_found
@pytest.mark.parametrize(
"service, expected_result",
[
(
"my_refrigerator",
False,
),
(
service_plotter,
False,
),
],
)
def test_is_service_running_no_services(mock_lonely_daemon, service, expected_result):
daemon = mock_lonely_daemon
assert daemon.is_service_running(service) == expected_result
@pytest.mark.parametrize(
"service, expected_result",
[
(
"my_refrigerator",
True,
),
(
service_plotter,
True,
),
(
"your_nose",
False,
),
(
"the_river",
True,
),
(
"the_clock",
False,
),
],
)
def test_is_service_running_with_services(mock_daemon_with_services, service, expected_result):
daemon = mock_daemon_with_services
assert daemon.is_service_running(service) == expected_result
@pytest.mark.parametrize(
"service, expected_result",
[
(
"my_refrigerator",
True,
),
(
service_plotter,
True,
),
(
"apple",
True,
),
(
"banana",
True,
),
(
"orange",
False,
),
],
)
def test_is_service_running_with_services_and_connections(
mock_daemon_with_services_and_connections, service, expected_result
):
daemon = mock_daemon_with_services_and_connections
assert daemon.is_service_running(service) == expected_result
@pytest.mark.asyncio
async def test_running_services_no_services(mock_lonely_daemon):
daemon = mock_lonely_daemon
response = await daemon.running_services()
assert_running_services_response(response, {"success": True, "running_services": []})
@pytest.mark.asyncio
async def test_running_services_with_services(mock_daemon_with_services):
daemon = mock_daemon_with_services
response = await daemon.running_services()
assert_running_services_response(
response, {"success": True, "running_services": ["my_refrigerator", "the_river", service_plotter]}
)
@pytest.mark.asyncio
async def test_running_services_with_services_and_connections(mock_daemon_with_services_and_connections):
daemon = mock_daemon_with_services_and_connections
response = await daemon.running_services()
assert_running_services_response(
response, {"success": True, "running_services": ["my_refrigerator", "apple", "banana", service_plotter]}
)
@pytest.mark.asyncio
async def test_get_routes(mock_lonely_daemon):
daemon = mock_lonely_daemon
response = await daemon.get_routes({})
assert response == {
"success": True,
"routes": ["get_routes", "example_one", "example_two", "example_three"],
}
@datacases(
WalletAddressCase(
id="no params",
request={},
response={
"success": True,
"wallet_addresses": {
test_key_data.fingerprint: [
{
"address": "xch1zze67l3jgxuvyaxhjhu7326sezxxve7lgzvq0497ddggzhff7c9s2pdcwh",
"hd_path": "m/12381/8444/2/0",
},
],
test_key_data_2.fingerprint: [
{
"address": "xch1fra5h0qnsezrxenjyslyxx7y4l268gq52m0rgenh58vn8f577uzswzvk4v",
"hd_path": "m/12381/8444/2/0",
}
],
},
},
),
WalletAddressCase(
id="list of fingerprints",
request={"fingerprints": [test_key_data.fingerprint]},
response={
"success": True,
"wallet_addresses": {
test_key_data.fingerprint: [
{
"address": "xch1zze67l3jgxuvyaxhjhu7326sezxxve7lgzvq0497ddggzhff7c9s2pdcwh",
"hd_path": "m/12381/8444/2/0",
},
],
},
},
),
WalletAddressCase(
id="count and index",
request={"fingerprints": [test_key_data.fingerprint], "count": 2, "index": 1},
response={
"success": True,
"wallet_addresses": {
test_key_data.fingerprint: [
{
"address": "xch16jqcaguq27z8xvpu89j7eaqfzn6k89hdrrlm0rffku85n8n7m7sqqmmahh",
"hd_path": "m/12381/8444/2/1",
},
{
"address": "xch1955vj0gx5tqe7v5tceajn2p4z4pup8d4g2exs0cz4xjqses8ru6qu8zp3y",
"hd_path": "m/12381/8444/2/2",
},
]
},
},
),
WalletAddressCase(
id="hardened derivations",
request={"fingerprints": [test_key_data.fingerprint], "non_observer_derivation": True},
response={
"success": True,
"wallet_addresses": {
test_key_data.fingerprint: [
{
"address": "xch1k996a7h3agygjhqtrf0ycpa7wfd6k5ye2plkf54ukcmdj44gkqkq880l7n",
"hd_path": "m/12381n/8444n/2n/0n",
}
]
},
},
),
WalletAddressCase(
id="invalid fingerprint",
request={"fingerprints": [999999]},
response={
"success": False,
"error": "key(s) not found for fingerprint(s) {999999}",
},
),
WalletAddressCase(
id="missing private key",
request={"fingerprints": [test_key_data.fingerprint]},
response={
"success": False,
"error": f"missing private key for key with fingerprint {test_key_data.fingerprint}",
},
pubkeys_only=True,
),
)
@pytest.mark.asyncio
async def test_get_wallet_addresses(
mock_daemon_with_config_and_keys,
monkeypatch,
case: WalletAddressCase,
):
daemon = mock_daemon_with_config_and_keys
original_get_keys = Keychain.get_keys
def get_keys_no_secrets(self, include_secrets):
return original_get_keys(self, include_secrets=False)
# in the pubkeys_only case, we're ensuring that only pubkeys are returned by get_keys,
# which will have the effect of causing get_wallet_addresses to raise an exception
if case.pubkeys_only:
# monkeypatch Keychain.get_keys() to always call get_keys() with include_secrets=False
monkeypatch.setattr(Keychain, "get_keys", get_keys_no_secrets)
assert case.response == await daemon.get_wallet_addresses(case.request)
@datacases(
KeysForPlotCase(
id="no params",
# When not specifying exact fingerprints, `get_keys_for_plotting` returns
# all farmer_pk/pool_pk data for available fingerprints
request={},
response={
"success": True,
"keys": {
test_key_data.fingerprint: {
"farmer_public_key": bytes(master_sk_to_farmer_sk(test_key_data.private_key).get_g1()).hex(),
"pool_public_key": bytes(master_sk_to_pool_sk(test_key_data.private_key).get_g1()).hex(),
},
test_key_data_2.fingerprint: {
"farmer_public_key": bytes(master_sk_to_farmer_sk(test_key_data_2.private_key).get_g1()).hex(),
"pool_public_key": bytes(master_sk_to_pool_sk(test_key_data_2.private_key).get_g1()).hex(),
},
},
},
),
KeysForPlotCase(
id="list of fingerprints",
request={"fingerprints": [test_key_data.fingerprint]},
response={
"success": True,
"keys": {
test_key_data.fingerprint: {
"farmer_public_key": bytes(master_sk_to_farmer_sk(test_key_data.private_key).get_g1()).hex(),
"pool_public_key": bytes(master_sk_to_pool_sk(test_key_data.private_key).get_g1()).hex(),
},
},
},
),
KeysForPlotCase(
id="invalid fingerprint",
request={"fingerprints": [999999]},
response={
"success": False,
"error": "key(s) not found for fingerprint(s) {999999}",
},
),
)
@pytest.mark.asyncio
async def test_get_keys_for_plotting(
mock_daemon_with_config_and_keys,
monkeypatch,
case: KeysForPlotCase,
):
daemon = mock_daemon_with_config_and_keys
assert case.response == await daemon.get_keys_for_plotting(case.request)
@datacases(
KeysForPlotCase(
id="invalid request format",
request={"fingerprints": test_key_data.fingerprint},
response={},
),
)
@pytest.mark.asyncio
async def test_get_keys_for_plotting_error(
mock_daemon_with_config_and_keys,
monkeypatch,
case: KeysForPlotCase,
):
daemon = mock_daemon_with_config_and_keys
with pytest.raises(ValueError, match="fingerprints must be a list of integer"):
await daemon.get_keys_for_plotting(case.request)
@pytest.mark.asyncio
async def test_get_keys_for_plotting_client(daemon_client_with_config_and_keys):
client = await daemon_client_with_config_and_keys
response = await client.get_keys_for_plotting()
assert response["data"]["success"] is True
assert len(response["data"]["keys"]) == 2
assert str(test_key_data.fingerprint) in response["data"]["keys"]
assert str(test_key_data_2.fingerprint) in response["data"]["keys"]
response = await client.get_keys_for_plotting([test_key_data.fingerprint])
assert response["data"]["success"] is True
assert len(response["data"]["keys"]) == 1
assert str(test_key_data.fingerprint) in response["data"]["keys"]
assert str(test_key_data_2.fingerprint) not in response["data"]["keys"]
await client.close()
@pytest.mark.asyncio
@pytest.mark.parametrize(
"service_request, expected_result, expected_exception",
[
({}, None, KeyError),
(
{"service": "my_refrigerator"},
{"success": True, "service_name": "my_refrigerator", "is_running": False},
None,
),
],
)
async def test_is_running_no_services(mock_lonely_daemon, service_request, expected_result, expected_exception):
daemon = mock_lonely_daemon
if expected_exception is not None:
with pytest.raises(expected_exception):
await daemon.is_running(service_request)
else:
response = await daemon.is_running(service_request)
assert response == expected_result
@pytest.mark.asyncio
@pytest.mark.parametrize(
"service_request, expected_result, expected_exception",
[
({}, None, KeyError),
(
{"service": "my_refrigerator"},
{"success": True, "service_name": "my_refrigerator", "is_running": True},
None,
),
(
{"service": "your_nose"},
{"success": True, "service_name": "your_nose", "is_running": False},
None,
),
(
{"service": "the_river"},
{"success": True, "service_name": "the_river", "is_running": True},
None,
),
(
{"service": service_plotter},
{"success": True, "service_name": service_plotter, "is_running": True},
None,
),
],
)
async def test_is_running_with_services(
mock_daemon_with_services, service_request, expected_result, expected_exception
):
daemon = mock_daemon_with_services
if expected_exception is not None:
with pytest.raises(expected_exception):
await daemon.is_running(service_request)
else:
response = await daemon.is_running(service_request)
assert response == expected_result
@pytest.mark.asyncio
@pytest.mark.parametrize(
"service_request, expected_result, expected_exception",
[
({}, None, KeyError),
(
{"service": "my_refrigerator"},
{"success": True, "service_name": "my_refrigerator", "is_running": True},
None,
),
(
{"service": "your_nose"},
{"success": True, "service_name": "your_nose", "is_running": False},
None,
),
(
{"service": "apple"},
{"success": True, "service_name": "apple", "is_running": True},
None,
),
(
{"service": "banana"},
{"success": True, "service_name": "banana", "is_running": True},
None,
),
(
{"service": "orange"},
{"success": True, "service_name": "orange", "is_running": False},
None,
),
],
)
async def test_is_running_with_services_and_connections(
mock_daemon_with_services_and_connections, service_request, expected_result, expected_exception
):
daemon = mock_daemon_with_services_and_connections
if expected_exception is not None:
with pytest.raises(expected_exception):
await daemon.is_running(service_request)
else:
response = await daemon.is_running(service_request)
assert response == expected_result
@pytest.mark.asyncio
async def test_validate_keyring_passphrase_rpc(daemon_connection_and_temp_keychain):
ws, keychain = daemon_connection_and_temp_keychain
# When: the keychain has a master passphrase set
keychain.set_master_passphrase(
current_passphrase=DEFAULT_PASSPHRASE_IF_NO_MASTER_PASSPHRASE, new_passphrase="the correct passphrase"
)
bad_passphrase_case_response_data = {
"success": False,
"error": None,
}
missing_passphrase_response_data = {
"success": False,
"error": "missing key",
}
empty_passphrase_response_data = {
"success": False,
"error": None,
}
# When: using the correct passphrase
await ws.send_str(
create_payload("validate_keyring_passphrase", {"key": "the correct passphrase"}, "test", "daemon")
)
# Expect: validation succeeds
# TODO: unify error responses in the server, sometimes we add `error: None` sometimes not.
assert_response(await ws.receive(), {**success_response_data, "error": None})
# When: using the wrong passphrase
await ws.send_str(create_payload("validate_keyring_passphrase", {"key": "the wrong passphrase"}, "test", "daemon"))
# Expect: validation failure
assert_response(await ws.receive(), bad_passphrase_case_response_data)
# When: not including the passphrase in the payload
await ws.send_str(create_payload("validate_keyring_passphrase", {}, "test", "daemon"))
# Expect: validation failure
assert_response(await ws.receive(), missing_passphrase_response_data)
# When: including an empty passphrase in the payload
await ws.send_str(create_payload("validate_keyring_passphrase", {"key": ""}, "test", "daemon"))
# Expect: validation failure
assert_response(await ws.receive(), empty_passphrase_response_data)
@pytest.mark.asyncio
async def test_add_private_key(daemon_connection_and_temp_keychain):
ws, keychain = daemon_connection_and_temp_keychain
mnemonic_with_typo = f"{test_key_data.mnemonic_str()}xyz" # intentional typo: can -> canxyz
mnemonic_with_missing_word = " ".join(test_key_data.mnemonic_str()[:-1]) # missing last word
missing_mnemonic_response_data = {
"success": False,
"error": "malformed request",
"error_details": {"message": "missing mnemonic"},
}
mnemonic_with_typo_response_data = {
"success": False,
"error": "'canxyz' is not in the mnemonic dictionary; may be misspelled",
}
invalid_mnemonic_length_response_data = {
"success": False,
"error": "Invalid mnemonic length",
}
invalid_mnemonic_response_data = {
"success": False,
"error": "Invalid order of mnemonic words",
}
# Expect the key hasn't been added yet
assert keychain.get_private_key_by_fingerprint(test_key_data.fingerprint) is None
await ws.send_str(create_payload("add_private_key", {"mnemonic": test_key_data.mnemonic_str()}, "test", "daemon"))
# Expect: key was added successfully
assert_response(await ws.receive(), add_private_key_response_data(test_key_data.fingerprint))
# When: missing mnemonic
await ws.send_str(create_payload("add_private_key", {}, "test", "daemon"))
# Expect: Failure due to missing mnemonic
assert_response(await ws.receive(), missing_mnemonic_response_data)
# When: using a mmnemonic with an incorrect word (typo)
await ws.send_str(create_payload("add_private_key", {"mnemonic": mnemonic_with_typo}, "test", "daemon"))
# Expect: Failure due to misspelled mnemonic
assert_response(await ws.receive(), mnemonic_with_typo_response_data)
# When: using a mnemonic with an incorrect word count
await ws.send_str(create_payload("add_private_key", {"mnemonic": mnemonic_with_missing_word}, "test", "daemon"))
# Expect: Failure due to invalid mnemonic
assert_response(await ws.receive(), invalid_mnemonic_length_response_data)
# When: using an incorrect mnemnonic
await ws.send_str(create_payload("add_private_key", {"mnemonic": " ".join(["abandon"] * 24)}, "test", "daemon"))
# Expect: Failure due to checksum error
assert_response(await ws.receive(), invalid_mnemonic_response_data)
@pytest.mark.asyncio
async def test_add_private_key_label(daemon_connection_and_temp_keychain):
ws, keychain = daemon_connection_and_temp_keychain
async def assert_add_private_key_with_label(
key_data: KeyData, request: Dict[str, object], add_private_key_response: Dict[str, object]
) -> None:
await ws.send_str(create_payload("add_private_key", request, "test", "daemon"))
assert_response(await ws.receive(), add_private_key_response)
await ws.send_str(
create_payload("get_key", {"fingerprint": key_data.fingerprint, "include_secrets": True}, "test", "daemon")
)
assert_response(await ws.receive(), get_key_response_data(key_data))
# without `label` parameter
key_data_0 = KeyData.generate()
await assert_add_private_key_with_label(
key_data_0,
{"mnemonic": key_data_0.mnemonic_str()},
add_private_key_response_data(key_data_0.fingerprint),
)
# with `label=None`
key_data_1 = KeyData.generate()
await assert_add_private_key_with_label(
key_data_1,
{"mnemonic": key_data_1.mnemonic_str(), "label": None},
add_private_key_response_data(key_data_1.fingerprint),
)
# with `label="key_2"`
key_data_2 = KeyData.generate("key_2")
await assert_add_private_key_with_label(
key_data_1,
{"mnemonic": key_data_2.mnemonic_str(), "label": key_data_2.label},
add_private_key_response_data(key_data_2.fingerprint),
)
@pytest.mark.asyncio
async def test_get_key(daemon_connection_and_temp_keychain):
ws, keychain = daemon_connection_and_temp_keychain
await ws.send_str(create_payload("get_key", {"fingerprint": test_key_data.fingerprint}, "test", "daemon"))
assert_response(await ws.receive(), fingerprint_not_found_response_data(test_key_data.fingerprint))
keychain.add_private_key(test_key_data.mnemonic_str())
# without `include_secrets`
await ws.send_str(create_payload("get_key", {"fingerprint": test_key_data.fingerprint}, "test", "daemon"))
assert_response(await ws.receive(), get_key_response_data(test_key_data_no_secrets))
# with `include_secrets=False`
await ws.send_str(
create_payload(
"get_key", {"fingerprint": test_key_data.fingerprint, "include_secrets": False}, "test", "daemon"
)
)
assert_response(await ws.receive(), get_key_response_data(test_key_data_no_secrets))
# with `include_secrets=True`
await ws.send_str(
create_payload("get_key", {"fingerprint": test_key_data.fingerprint, "include_secrets": True}, "test", "daemon")
)
assert_response(await ws.receive(), get_key_response_data(test_key_data))
await ws.send_str(create_payload("get_key", {}, "test", "daemon"))
assert_response(await ws.receive(), fingerprint_missing_response_data(GetKeyRequest))
await ws.send_str(create_payload("get_key", {"fingerprint": 123456}, "test", "daemon"))
assert_response(await ws.receive(), fingerprint_not_found_response_data(123456))
@pytest.mark.asyncio
async def test_get_keys(daemon_connection_and_temp_keychain):
ws, keychain = daemon_connection_and_temp_keychain
# empty keychain
await ws.send_str(create_payload("get_keys", {}, "test", "daemon"))
assert_response(await ws.receive(), get_keys_response_data([]))
keys = [KeyData.generate() for _ in range(5)]
keys_added = []
for key_data in keys:
keychain.add_private_key(key_data.mnemonic_str())
keys_added.append(key_data)
get_keys_response_data_without_secrets = get_keys_response_data(
[replace(key, secrets=None) for key in keys_added]
)
# without `include_secrets`
await ws.send_str(create_payload("get_keys", {}, "test", "daemon"))
assert_response(await ws.receive(), get_keys_response_data_without_secrets)
# with `include_secrets=False`
await ws.send_str(create_payload("get_keys", {"include_secrets": False}, "test", "daemon"))
assert_response(await ws.receive(), get_keys_response_data_without_secrets)
# with `include_secrets=True`
await ws.send_str(create_payload("get_keys", {"include_secrets": True}, "test", "daemon"))
assert_response(await ws.receive(), get_keys_response_data(keys_added))
@pytest.mark.asyncio
async def test_get_public_key(daemon_connection_and_temp_keychain):
ws, keychain = daemon_connection_and_temp_keychain
# empty keychain
await ws.send_str(create_payload("get_public_key", {"fingerprint": test_key_data.fingerprint}, "test", "daemon"))
assert_response(await ws.receive(), fingerprint_not_found_response_data(test_key_data.fingerprint))
keychain.add_private_key(test_key_data.mnemonic_str())
await ws.send_str(create_payload("get_public_key", {"fingerprint": test_key_data.fingerprint}, "test", "daemon"))
response = await ws.receive()
assert_response(response, get_public_key_response_data(test_key_data))
# Only allowed_keys are allowed in the key dict
key_dict = json.loads(response.data)["data"]["key"]
keys_in_response = [key for key in key_dict.keys()]
allowed_keys = ["fingerprint", "public_key", "label"]
for key in keys_in_response:
assert key in allowed_keys, f"Unexpected key '{key}' found in response."
@pytest.mark.asyncio
async def test_get_public_keys(daemon_connection_and_temp_keychain):
ws, keychain = daemon_connection_and_temp_keychain
# empty keychain
await ws.send_str(create_payload("get_public_keys", {}, "test", "daemon"))
assert_response(await ws.receive(), get_public_keys_response_data([]))
# populate keychain
keys = [KeyData.generate() for _ in range(5)]
keys_added = []
for key_data in keys:
keychain.add_private_key(key_data.mnemonic_str())
keys_added.append(key_data)
get_public_keys_response = get_public_keys_response_data(keys_added)
await ws.send_str(create_payload("get_public_keys", {}, "test", "daemon"))
response = await ws.receive()
assert_response(response, get_public_keys_response)
# Only allowed_keys are allowed in the key dict
allowed_keys = ["fingerprint", "public_key", "label"]
keys_array = json.loads(response.data)["data"]["keys"]
for key_dict in keys_array:
keys_in_response = [key for key in key_dict.keys()]
for key in keys_in_response:
assert key in allowed_keys, f"Unexpected key '{key}' found in response."
@pytest.mark.asyncio
async def test_key_renaming(daemon_connection_and_temp_keychain):
ws, keychain = daemon_connection_and_temp_keychain
keychain.add_private_key(test_key_data.mnemonic_str())
# Rename the key three times
for i in range(3):
key_data = replace(test_key_data_no_secrets, label=f"renaming_{i}")
await ws.send_str(
create_payload(
"set_label", {"fingerprint": key_data.fingerprint, "label": key_data.label}, "test", "daemon"
)
)
assert_response(await ws.receive(), success_response_data)
await ws.send_str(create_payload("get_key", {"fingerprint": key_data.fingerprint}, "test", "daemon"))
assert_response(
await ws.receive(),
{
"success": True,
"key": key_data.to_json_dict(),
},
)
@pytest.mark.asyncio
async def test_key_label_deletion(daemon_connection_and_temp_keychain):
ws, keychain = daemon_connection_and_temp_keychain
keychain.add_private_key(test_key_data.mnemonic_str(), "key_0")
assert keychain.get_key(test_key_data.fingerprint).label == "key_0"
await ws.send_str(create_payload("delete_label", {"fingerprint": test_key_data.fingerprint}, "test", "daemon"))
assert_response(await ws.receive(), success_response_data)
assert keychain.get_key(test_key_data.fingerprint).label is None
await ws.send_str(create_payload("delete_label", {"fingerprint": test_key_data.fingerprint}, "test", "daemon"))
assert_response(await ws.receive(), fingerprint_not_found_response_data(test_key_data.fingerprint))
@pytest.mark.parametrize(
"method, parameter, response_data_dict",
[
(
"set_label",
{"fingerprint": test_key_data.fingerprint, "label": "new_label"},
success_response_data,
),
(
"set_label",
{"label": "new_label"},
fingerprint_missing_response_data(SetLabelRequest),
),
(
"set_label",
{"fingerprint": test_key_data.fingerprint},
label_missing_response_data(SetLabelRequest),
),
(
"set_label",
{"fingerprint": test_key_data.fingerprint, "label": ""},
label_empty_response_data,
),
(
"set_label",
{"fingerprint": test_key_data.fingerprint, "label": "a" * 66},
label_too_long_response_data,
),
(
"set_label",
{"fingerprint": test_key_data.fingerprint, "label": "a\nb"},
label_newline_or_tab_response_data,
),
(
"set_label",
{"fingerprint": test_key_data.fingerprint, "label": "a\tb"},
label_newline_or_tab_response_data,
),
(
"set_label",
{"fingerprint": test_key_data.fingerprint, "label": "key_0"},
label_exists_response_data(test_key_data.fingerprint, "key_0"),
),
(
"delete_label",
{"fingerprint": test_key_data.fingerprint},
success_response_data,
),
(
"delete_label",
{},
fingerprint_missing_response_data(DeleteLabelRequest),
),
(
"delete_label",
{"fingerprint": 123456},
fingerprint_not_found_response_data(123456),
),
],
)
@pytest.mark.asyncio
async def test_key_label_methods(
daemon_connection_and_temp_keychain, method: str, parameter: Dict[str, Any], response_data_dict: Dict[str, Any]
) -> None:
ws, keychain = daemon_connection_and_temp_keychain
keychain.add_private_key(test_key_data.mnemonic_str(), "key_0")
await ws.send_str(create_payload(method, parameter, "test", "daemon"))
assert_response(await ws.receive(), response_data_dict)
@pytest.mark.asyncio
async def test_bad_json(daemon_connection_and_temp_keychain: Tuple[aiohttp.ClientWebSocketResponse, Keychain]) -> None:
ws, _ = daemon_connection_and_temp_keychain
await ws.send_str("{doo: '12'}") # send some bad json
response = await ws.receive()
# check for error response
assert response.type == aiohttp.WSMsgType.TEXT
message = json.loads(response.data.strip())
assert message["data"]["success"] is False
assert message["data"]["error"].startswith("Expecting property name")
# properly register a service
service_name = "test_service"
data = {"service": service_name}
payload = create_payload("register_service", data, service_name, "daemon")
await ws.send_str(payload)
await ws.receive()
# send some more bad json
await ws.send_str("{doo: '12'}") # send some bad json
response = await ws.receive()
assert response.type == aiohttp.WSMsgType.TEXT
message = json.loads(response.data.strip())
assert message["command"] != "register_service"
assert message["data"]["success"] is False
assert message["data"]["error"].startswith("Expecting property name")
@datacases(
RouteCase(
route="register_service",
description="no service name",
request={
"fred": "barney",
},
response={"success": False},
),
RouteCase(
route="register_service",
description="chia_plotter",
request={
"service": "chia_plotter",
},
response={"success": True, "service": "chia_plotter", "queue": []},
),
RouteCase(
route="unknown_command",
description="non-existant route",
request={},
response={"success": False, "error": "unknown_command unknown_command"},
),
RouteCase(
route="running_services",
description="successful",
request={},
response={"success": True, "running_services": []},
),
RouteCase(
route="keyring_status",
description="successful",
request={},
response={
"can_save_passphrase": supports_os_passphrase_storage(),
"can_set_passphrase_hint": True,
"is_keyring_locked": False,
"passphrase_hint": "",
"passphrase_requirements": {"is_optional": True, "min_length": 8},
"success": True,
"user_passphrase_is_set": False,
},
),
RouteCase(
route="get_status",
description="successful",
request={},
response={"success": True, "genesis_initialized": True},
),
RouteCase(
route="get_plotters",
description="successful",
request={},
response={
"success": True,
"plotters": {
"bladebit": {
"can_install": True,
"cuda_support": False,
"display_name": "BladeBit Plotter",
"installed": False,
},
"chiapos": {"display_name": "Chia Proof of Space", "installed": True, "version": chiapos_version},
"madmax": {"can_install": True, "display_name": "madMAx Plotter", "installed": False},
},
},
),
)
@pytest.mark.asyncio
async def test_misc_daemon_ws(
daemon_connection_and_temp_keychain: Tuple[aiohttp.ClientWebSocketResponse, Keychain],
case: RouteCase,
) -> None:
ws, _ = daemon_connection_and_temp_keychain
payload = create_payload(case.route, case.request, "service_name", "daemon")
await ws.send_str(payload)
response = await ws.receive()
assert_response(response, case.response)
@pytest.mark.asyncio
async def test_unexpected_json(
daemon_connection_and_temp_keychain: Tuple[aiohttp.ClientWebSocketResponse, Keychain]
) -> None:
ws, _ = daemon_connection_and_temp_keychain
await ws.send_str('{"this": "is valid but not expected"}') # send some valid but unexpected json
response = await ws.receive()
# check for error response
assert response.type == aiohttp.WSMsgType.TEXT
message = json.loads(response.data.strip())
assert message["data"]["success"] is False
assert message["data"]["error"].startswith("'command'")
@pytest.mark.parametrize(
"command_to_test",
[("start_service"), ("stop_service"), ("start_plotting"), ("stop_plotting"), ("is_running"), ("register_service")],
)
@pytest.mark.asyncio
async def test_commands_with_no_data(
daemon_connection_and_temp_keychain: Tuple[aiohttp.ClientWebSocketResponse, Keychain], command_to_test: str
) -> None:
ws, _ = daemon_connection_and_temp_keychain
payload = create_payload(command_to_test, {}, "service_name", "daemon")
await ws.send_str(payload)
response = await ws.receive()
assert_response(response, {"success": False, "error": f'{command_to_test} requires "data"'})
@datacases(
RouteCase(
route="set_keyring_passphrase",
description="no passphrase",
request={
"passphrase_hint": "this is a hint",
"save_passphrase": False,
},
response={"success": False, "error": "missing new_passphrase"},
),
RouteCase(
route="set_keyring_passphrase",
description="incorrect type",
request={
"passphrase_hint": "this is a hint",
"save_passphrase": False,
"new_passphrase": True,
},
response={"success": False, "error": "missing new_passphrase"},
),
RouteCase(
route="set_keyring_passphrase",
description="correct",
request={
"passphrase_hint": "this is a hint",
"new_passphrase": "this is a passphrase",
},
response={"success": True, "error": None},
),
)
@pytest.mark.asyncio
async def test_set_keyring_passphrase_ws(
daemon_connection_and_temp_keychain: Tuple[aiohttp.ClientWebSocketResponse, Keychain],
case: RouteCase,
) -> None:
ws, _ = daemon_connection_and_temp_keychain
payload = create_payload(case.route, case.request, "service_name", "daemon")
await ws.send_str(payload)
response = await ws.receive()
assert_response(response, case.response)
@datacases(
RouteCase(
route="remove_keyring_passphrase",
description="wrong current passphrase",
request={"current_passphrase": "wrong passphrase"},
response={"success": False, "error": "current passphrase is invalid"},
),
RouteCase(
route="remove_keyring_passphrase",
description="incorrect type",
request={"current_passphrase": True},
response={"success": False, "error": "missing current_passphrase"},
),
RouteCase(
route="remove_keyring_passphrase",
description="missing current passphrase",
request={},
response={"success": False, "error": "missing current_passphrase"},
),
RouteCase(
route="remove_keyring_passphrase",
description="correct",
request={"current_passphrase": "this is a passphrase"},
response={"success": True, "error": None},
),
RouteCase(
route="unlock_keyring",
description="wrong current passphrase",
request={"key": "wrong passphrase"},
response={"success": False, "error": "bad passphrase"},
),
RouteCase(
route="unlock_keyring",
description="incorrect type",
request={"key": True},
response={"success": False, "error": "missing key"},
),
RouteCase(
route="unlock_keyring",
description="missing data",
request={},
response={"success": False, "error": "missing key"},
),
RouteCase(
route="unlock_keyring",
description="correct",
request={"key": "this is a passphrase"},
response={"success": True, "error": None},
),
RouteCase(
route="set_keyring_passphrase",
description="no current passphrase",
request={
"save_passphrase": False,
"new_passphrase": "another new passphrase",
},
response={"success": False, "error": "missing current_passphrase"},
),
RouteCase(
route="set_keyring_passphrase",
description="incorrect current passphrase",
request={
"save_passphrase": False,
"current_passphrase": "none",
"new_passphrase": "another new passphrase",
},
response={"success": False, "error": "current passphrase is invalid"},
),
RouteCase(
route="set_keyring_passphrase",
description="incorrect type",
request={
"save_passphrase": False,
"current_passphrase": False,
"new_passphrase": "another new passphrase",
},
response={"success": False, "error": "missing current_passphrase"},
),
RouteCase(
route="set_keyring_passphrase",
description="correct",
request={
"save_passphrase": False,
"current_passphrase": "this is a passphrase",
"new_passphrase": "another new passphrase",
},
response={"success": True, "error": None},
),
)
@pytest.mark.asyncio
async def test_passphrase_apis(
daemon_connection_and_temp_keychain: Tuple[aiohttp.ClientWebSocketResponse, Keychain],
case: RouteCase,
) -> None:
ws, keychain = daemon_connection_and_temp_keychain
keychain.set_master_passphrase(
current_passphrase=DEFAULT_PASSPHRASE_IF_NO_MASTER_PASSPHRASE, new_passphrase="this is a passphrase"
)
payload = create_payload(
case.route,
case.request,
"service_name",
"daemon",
)
await ws.send_str(payload)
response = await ws.receive()
assert_response(response, case.response)
@datacases(
RouteCase(
route="unlock_keyring",
description="exception",
request={"key": "this is a passphrase"},
response={"success": False, "error": "validation exception"},
),
RouteCase(
route="validate_keyring_passphrase",
description="exception",
request={"key": "this is a passphrase"},
response={"success": False, "error": "validation exception"},
),
)
@pytest.mark.asyncio
async def test_keyring_file_deleted(
daemon_connection_and_temp_keychain: Tuple[aiohttp.ClientWebSocketResponse, Keychain],
case: RouteCase,
) -> None:
ws, keychain = daemon_connection_and_temp_keychain
keychain.set_master_passphrase(
current_passphrase=DEFAULT_PASSPHRASE_IF_NO_MASTER_PASSPHRASE, new_passphrase="this is a passphrase"
)
keychain.keyring_wrapper.keyring.keyring_path.unlink()
payload = create_payload(
case.route,
case.request,
"service_name",
"daemon",
)
await ws.send_str(payload)
response = await ws.receive()
assert_response(response, case.response)
@datacases(
RouteCase(
route="start_plotting",
description="chiapos - missing k",
request={k: v for k, v in plotter_request_ref.items() if k != "k"},
response={"success": False, "error": "'k'"},
),
RouteCase(
route="start_plotting",
description="chiapos - missing d",
request={k: v for k, v in plotter_request_ref.items() if k != "d"},
response={"success": False, "error": "'d'"},
),
RouteCase(
route="start_plotting",
description="chiapos - missing t",
request={k: v for k, v in plotter_request_ref.items() if k != "t"},
response={"success": False, "error": "'t'"},
),
RouteCase(
route="start_plotting",
description="chiapos - both c and p",
request={
**plotter_request_ref,
"c": "hello",
"p": "goodbye",
},
response={
"success": False,
"service_name": "chia_plotter",
"error": "Choose one of pool_contract_address and pool_public_key",
},
),
)
@pytest.mark.asyncio
async def test_plotter_errors(
daemon_connection_and_temp_keychain: Tuple[aiohttp.ClientWebSocketResponse, Keychain], case: RouteCase
) -> None:
ws, keychain = daemon_connection_and_temp_keychain
payload = create_payload(
case.route,
case.request,
"test_service_name",
"daemon",
)
await ws.send_str(payload)
response = await ws.receive()
assert_response(response, case.response)
@datacases(
RouteCase(
route="start_plotting",
description="bladebit - ramplot",
request={
**plotter_request_ref,
"plotter": "bladebit",
"plot_type": "ramplot",
"w": True,
"m": True,
"no_cpu_affinity": True,
"e": False,
},
response={
"success": True,
},
),
RouteCase(
route="start_plotting",
description="bladebit - diskplot",
request={
**plotter_request_ref,
"plotter": "bladebit",
"plot_type": "diskplot",
"w": True,
"m": True,
"no_cpu_affinity": True,
"e": False,
"cache": "cache",
"f1_threads": 5,
"fp_threads": 6,
"c_threads": 4,
"p2_threads": 4,
"p3_threads": 4,
"alternate": True,
"no_t1_direct": True,
"no_t2_direct": True,
},
response={
"success": True,
},
),
RouteCase(
route="start_plotting",
description="madmax",
request={
**plotter_request_ref,
"plotter": "madmax",
"w": True,
"m": True,
"no_cpu_affinity": True,
"t2": "testing",
"v": 128,
},
response={
"success": True,
},
),
)
@pytest.mark.asyncio
async def test_plotter_options(
daemon_connection_and_temp_keychain: Tuple[aiohttp.ClientWebSocketResponse, Keychain],
get_b_tools: BlockTools,
case: RouteCase,
) -> None:
ws, keychain = daemon_connection_and_temp_keychain
# register for chia_plotter events
service_name = "chia_plotter"
data = {"service": service_name}
payload = create_payload("register_service", data, "chia_plotter", "daemon")
await ws.send_str(payload)
response = await ws.receive()
assert_response_success_only(response)
case.request["t"] = str(get_b_tools.root_path)
case.request["d"] = str(get_b_tools.root_path)
payload_rpc = create_payload_dict(
case.route,
case.request,
"test_service_name",
"daemon",
)
payload = dict_to_json_str(payload_rpc)
await ws.send_str(payload)
response = await ws.receive()
assert_response_success_only(response, payload_rpc["request_id"])
def assert_plot_queue_response(
response: aiohttp.http_websocket.WSMessage,
expected_command: str,
expected_message_state: str,
expected_plot_id: str,
expected_plot_state: str,
) -> None:
assert response.type == aiohttp.WSMsgType.TEXT
message = json.loads(response.data.strip())
assert message["command"] == expected_command
assert message["data"]["state"] == expected_message_state
plot_info = message["data"]["queue"][0]
assert plot_info["id"] == expected_plot_id
assert plot_info["state"] == expected_plot_state
def check_plot_queue_log(
response: aiohttp.http_websocket.WSMessage,
expected_command: str,
expected_message_state: str,
expected_plot_id: str,
expected_plot_state: str,
expected_log_entry: str,
) -> bool:
assert_plot_queue_response(
response, expected_command, expected_message_state, expected_plot_id, expected_plot_state
)
message = json.loads(response.data.strip())
plot_info = message["data"]["queue"][0]
return plot_info["log_new"].startswith(expected_log_entry)
@pytest.mark.asyncio
async def test_plotter_roundtrip(
daemon_connection_and_temp_keychain: Tuple[aiohttp.ClientWebSocketResponse, Keychain], get_b_tools: BlockTools
) -> None:
ws, keychain = daemon_connection_and_temp_keychain
# register for chia_plotter events
service_name = "chia_plotter"
data = {"service": service_name}
payload = create_payload("register_service", data, "chia_plotter", "daemon")
await ws.send_str(payload)
response = await ws.receive()
assert_response_success_only(response)
root_path = get_b_tools.root_path
plotting_request: Dict[str, Any] = {
**plotter_request_ref,
"d": str(root_path),
"t": str(root_path),
"p": "xxx",
}
plotting_request.pop("c", None)
payload_rpc = create_payload_dict(
"start_plotting",
plotting_request,
"test_service_name",
"daemon",
)
payload = dict_to_json_str(payload_rpc)
await ws.send_str(payload)
# should first get response to start_plottin
response = await ws.receive()
assert response.type == aiohttp.WSMsgType.TEXT
message = json.loads(response.data.strip())
assert message["data"]["success"] is True
assert message["request_id"] == payload_rpc["request_id"]
plot_id = message["data"]["ids"][0]
# 1) Submitted
response = await ws.receive()
assert_plot_queue_response(response, "state_changed", "state_changed", plot_id, "SUBMITTED")
# 2) Running
response = await ws.receive()
assert_plot_queue_response(response, "state_changed", "state_changed", plot_id, "RUNNING")
# Write chiapos magic words to the log file to signal finished
plot_log_path = plotter_log_path(root_path, plot_id)
with open(plot_log_path, "a") as f:
f.write("Renamed final file")
f.flush()
# 3) log_changed
final_log_entry = False
while not final_log_entry:
response = await ws.receive()
final_log_entry = check_plot_queue_log(
response, "state_changed", "log_changed", plot_id, "RUNNING", "Renamed final file"
)
if not final_log_entry:
with open(plot_log_path, "a") as f:
f.write("Renamed final file")
f.flush()
# 4) Finished
response = await ws.receive()
assert_plot_queue_response(response, "state_changed", "state_changed", plot_id, "FINISHED")
@pytest.mark.asyncio
async def test_plotter_stop_plotting(
daemon_connection_and_temp_keychain: Tuple[aiohttp.ClientWebSocketResponse, Keychain], get_b_tools: BlockTools
) -> None:
ws, keychain = daemon_connection_and_temp_keychain
# register for chia_plotter events
service_name = "chia_plotter"
data = {"service": service_name}
payload = create_payload("register_service", data, "chia_plotter", "daemon")
await ws.send_str(payload)
response = await ws.receive()
assert_response_success_only(response)
root_path = get_b_tools.root_path
plotting_request: Dict[str, Any] = {
**plotter_request_ref,
"d": str(root_path),
"t": str(root_path),
}
payload_rpc = create_payload_dict(
"start_plotting",
plotting_request,
"test_service_name",
"daemon",
)
payload = dict_to_json_str(payload_rpc)
await ws.send_str(payload)
# should first get response to start_plotting
response = await ws.receive()
assert response.type == aiohttp.WSMsgType.TEXT
message = json.loads(response.data.strip())
assert message["data"]["success"] is True
# make sure matches the start_plotting request
assert message["request_id"] == payload_rpc["request_id"]
plot_id = message["data"]["ids"][0]
# 1) Submitted
response = await ws.receive()
assert_plot_queue_response(response, "state_changed", "state_changed", plot_id, "SUBMITTED")
# 2) Running
response = await ws.receive()
assert_plot_queue_response(response, "state_changed", "state_changed", plot_id, "RUNNING")
payload_rpc = create_payload_dict(
"stop_plotting",
{"id": plot_id},
"service_name",
"daemon",
)
stop_plotting_request_id = payload_rpc["request_id"]
payload = dict_to_json_str(payload_rpc)
await ws.send_str(payload)
# 3) removing
response = await ws.receive()
assert_plot_queue_response(response, "state_changed", "state_changed", plot_id, "REMOVING")
# 4) Finished
response = await ws.receive()
assert_plot_queue_response(response, "state_changed", "state_changed", plot_id, "FINISHED")
# 5) Finally, get the "ack" for the stop_plotting payload
response = await ws.receive()
assert_response(response, {"success": True}, stop_plotting_request_id)
|
7180bb4ca6ca0c11b9d6aab3766c69da05be9a9b
|
a28d672c50faf9632983287d206e8691282cab51
|
/basicsr/train.py
|
e02d98fe07f8c2924dda5b49f95adfa21990fa91
|
[
"Apache-2.0"
] |
permissive
|
XPixelGroup/BasicSR
|
42cf240fbc91bee10cfa12930ab86820969e854c
|
033cd6896d898fdd3dcda32e3102a792efa1b8f4
|
refs/heads/master
| 2023-06-07T15:16:21.940587
| 2023-02-02T07:07:47
| 2023-02-02T07:07:47
| 130,259,654
| 2,088
| 300
|
Apache-2.0
| 2023-09-14T00:50:17
| 2018-04-19T18:58:00
|
Python
|
UTF-8
|
Python
| false
| false
| 9,672
|
py
|
train.py
|
import datetime
import logging
import math
import time
import torch
from os import path as osp
from basicsr.data import build_dataloader, build_dataset
from basicsr.data.data_sampler import EnlargedSampler
from basicsr.data.prefetch_dataloader import CPUPrefetcher, CUDAPrefetcher
from basicsr.models import build_model
from basicsr.utils import (AvgTimer, MessageLogger, check_resume, get_env_info, get_root_logger, get_time_str,
init_tb_logger, init_wandb_logger, make_exp_dirs, mkdir_and_rename, scandir)
from basicsr.utils.options import copy_opt_file, dict2str, parse_options
def init_tb_loggers(opt):
# initialize wandb logger before tensorboard logger to allow proper sync
if (opt['logger'].get('wandb') is not None) and (opt['logger']['wandb'].get('project')
is not None) and ('debug' not in opt['name']):
assert opt['logger'].get('use_tb_logger') is True, ('should turn on tensorboard when using wandb')
init_wandb_logger(opt)
tb_logger = None
if opt['logger'].get('use_tb_logger') and 'debug' not in opt['name']:
tb_logger = init_tb_logger(log_dir=osp.join(opt['root_path'], 'tb_logger', opt['name']))
return tb_logger
def create_train_val_dataloader(opt, logger):
# create train and val dataloaders
train_loader, val_loaders = None, []
for phase, dataset_opt in opt['datasets'].items():
if phase == 'train':
dataset_enlarge_ratio = dataset_opt.get('dataset_enlarge_ratio', 1)
train_set = build_dataset(dataset_opt)
train_sampler = EnlargedSampler(train_set, opt['world_size'], opt['rank'], dataset_enlarge_ratio)
train_loader = build_dataloader(
train_set,
dataset_opt,
num_gpu=opt['num_gpu'],
dist=opt['dist'],
sampler=train_sampler,
seed=opt['manual_seed'])
num_iter_per_epoch = math.ceil(
len(train_set) * dataset_enlarge_ratio / (dataset_opt['batch_size_per_gpu'] * opt['world_size']))
total_iters = int(opt['train']['total_iter'])
total_epochs = math.ceil(total_iters / (num_iter_per_epoch))
logger.info('Training statistics:'
f'\n\tNumber of train images: {len(train_set)}'
f'\n\tDataset enlarge ratio: {dataset_enlarge_ratio}'
f'\n\tBatch size per gpu: {dataset_opt["batch_size_per_gpu"]}'
f'\n\tWorld size (gpu number): {opt["world_size"]}'
f'\n\tRequire iter number per epoch: {num_iter_per_epoch}'
f'\n\tTotal epochs: {total_epochs}; iters: {total_iters}.')
elif phase.split('_')[0] == 'val':
val_set = build_dataset(dataset_opt)
val_loader = build_dataloader(
val_set, dataset_opt, num_gpu=opt['num_gpu'], dist=opt['dist'], sampler=None, seed=opt['manual_seed'])
logger.info(f'Number of val images/folders in {dataset_opt["name"]}: {len(val_set)}')
val_loaders.append(val_loader)
else:
raise ValueError(f'Dataset phase {phase} is not recognized.')
return train_loader, train_sampler, val_loaders, total_epochs, total_iters
def load_resume_state(opt):
resume_state_path = None
if opt['auto_resume']:
state_path = osp.join('experiments', opt['name'], 'training_states')
if osp.isdir(state_path):
states = list(scandir(state_path, suffix='state', recursive=False, full_path=False))
if len(states) != 0:
states = [float(v.split('.state')[0]) for v in states]
resume_state_path = osp.join(state_path, f'{max(states):.0f}.state')
opt['path']['resume_state'] = resume_state_path
else:
if opt['path'].get('resume_state'):
resume_state_path = opt['path']['resume_state']
if resume_state_path is None:
resume_state = None
else:
device_id = torch.cuda.current_device()
resume_state = torch.load(resume_state_path, map_location=lambda storage, loc: storage.cuda(device_id))
check_resume(opt, resume_state['iter'])
return resume_state
def train_pipeline(root_path):
# parse options, set distributed setting, set random seed
opt, args = parse_options(root_path, is_train=True)
opt['root_path'] = root_path
torch.backends.cudnn.benchmark = True
# torch.backends.cudnn.deterministic = True
# load resume states if necessary
resume_state = load_resume_state(opt)
# mkdir for experiments and logger
if resume_state is None:
make_exp_dirs(opt)
if opt['logger'].get('use_tb_logger') and 'debug' not in opt['name'] and opt['rank'] == 0:
mkdir_and_rename(osp.join(opt['root_path'], 'tb_logger', opt['name']))
# copy the yml file to the experiment root
copy_opt_file(args.opt, opt['path']['experiments_root'])
# WARNING: should not use get_root_logger in the above codes, including the called functions
# Otherwise the logger will not be properly initialized
log_file = osp.join(opt['path']['log'], f"train_{opt['name']}_{get_time_str()}.log")
logger = get_root_logger(logger_name='basicsr', log_level=logging.INFO, log_file=log_file)
logger.info(get_env_info())
logger.info(dict2str(opt))
# initialize wandb and tb loggers
tb_logger = init_tb_loggers(opt)
# create train and validation dataloaders
result = create_train_val_dataloader(opt, logger)
train_loader, train_sampler, val_loaders, total_epochs, total_iters = result
# create model
model = build_model(opt)
if resume_state: # resume training
model.resume_training(resume_state) # handle optimizers and schedulers
logger.info(f"Resuming training from epoch: {resume_state['epoch']}, iter: {resume_state['iter']}.")
start_epoch = resume_state['epoch']
current_iter = resume_state['iter']
else:
start_epoch = 0
current_iter = 0
# create message logger (formatted outputs)
msg_logger = MessageLogger(opt, current_iter, tb_logger)
# dataloader prefetcher
prefetch_mode = opt['datasets']['train'].get('prefetch_mode')
if prefetch_mode is None or prefetch_mode == 'cpu':
prefetcher = CPUPrefetcher(train_loader)
elif prefetch_mode == 'cuda':
prefetcher = CUDAPrefetcher(train_loader, opt)
logger.info(f'Use {prefetch_mode} prefetch dataloader')
if opt['datasets']['train'].get('pin_memory') is not True:
raise ValueError('Please set pin_memory=True for CUDAPrefetcher.')
else:
raise ValueError(f"Wrong prefetch_mode {prefetch_mode}. Supported ones are: None, 'cuda', 'cpu'.")
# training
logger.info(f'Start training from epoch: {start_epoch}, iter: {current_iter}')
data_timer, iter_timer = AvgTimer(), AvgTimer()
start_time = time.time()
for epoch in range(start_epoch, total_epochs + 1):
train_sampler.set_epoch(epoch)
prefetcher.reset()
train_data = prefetcher.next()
while train_data is not None:
data_timer.record()
current_iter += 1
if current_iter > total_iters:
break
# update learning rate
model.update_learning_rate(current_iter, warmup_iter=opt['train'].get('warmup_iter', -1))
# training
model.feed_data(train_data)
model.optimize_parameters(current_iter)
iter_timer.record()
if current_iter == 1:
# reset start time in msg_logger for more accurate eta_time
# not work in resume mode
msg_logger.reset_start_time()
# log
if current_iter % opt['logger']['print_freq'] == 0:
log_vars = {'epoch': epoch, 'iter': current_iter}
log_vars.update({'lrs': model.get_current_learning_rate()})
log_vars.update({'time': iter_timer.get_avg_time(), 'data_time': data_timer.get_avg_time()})
log_vars.update(model.get_current_log())
msg_logger(log_vars)
# save models and training states
if current_iter % opt['logger']['save_checkpoint_freq'] == 0:
logger.info('Saving models and training states.')
model.save(epoch, current_iter)
# validation
if opt.get('val') is not None and (current_iter % opt['val']['val_freq'] == 0):
if len(val_loaders) > 1:
logger.warning('Multiple validation datasets are *only* supported by SRModel.')
for val_loader in val_loaders:
model.validation(val_loader, current_iter, tb_logger, opt['val']['save_img'])
data_timer.start()
iter_timer.start()
train_data = prefetcher.next()
# end of iter
# end of epoch
consumed_time = str(datetime.timedelta(seconds=int(time.time() - start_time)))
logger.info(f'End of training. Time consumed: {consumed_time}')
logger.info('Save the latest model.')
model.save(epoch=-1, current_iter=-1) # -1 stands for the latest
if opt.get('val') is not None:
for val_loader in val_loaders:
model.validation(val_loader, current_iter, tb_logger, opt['val']['save_img'])
if tb_logger:
tb_logger.close()
if __name__ == '__main__':
root_path = osp.abspath(osp.join(__file__, osp.pardir, osp.pardir))
train_pipeline(root_path)
|
df8911eda4bca82b68a1db2a1a31b01dca77fd09
|
de2392fb0c9dec8ad5bfc85576ff7f992f15ab72
|
/test/imports_test.py
|
e745ed5baacd0a8a7a141891e3fb497ad537b81c
|
[
"Apache-2.0"
] |
permissive
|
remicres/otbtf
|
a4b9ef16905c7f4a8551e4f59c9302bb6b6636a0
|
2cfff86912ea47ed07185bc7e0289acc4fed2855
|
refs/heads/develop
| 2023-09-01T21:05:24.552844
| 2023-08-26T19:34:10
| 2023-08-26T19:34:10
| 136,026,679
| 156
| 45
|
Apache-2.0
| 2021-11-19T22:53:55
| 2018-06-04T13:09:09
|
C++
|
UTF-8
|
Python
| false
| false
| 944
|
py
|
imports_test.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pytest
import unittest
class ImportsTest(unittest.TestCase):
def test_import_both1(self):
import tensorflow
self.assertTrue(tensorflow.__version__)
import otbApplication
self.assertTrue(otbApplication.Registry_GetAvailableApplications())
def test_import_both2(self):
import otbApplication
self.assertTrue(otbApplication.Registry_GetAvailableApplications())
import tensorflow
self.assertTrue(tensorflow.__version__)
def test_import_all(self):
import otbApplication
self.assertTrue(otbApplication.Registry_GetAvailableApplications())
import tensorflow
self.assertTrue(tensorflow.__version__)
from osgeo import gdal
self.assertTrue(gdal.__version__)
import numpy
self.assertTrue(numpy.__version__)
if __name__ == '__main__':
unittest.main()
|
7b0cdd58cadcdd96fd42874d2bbe78fc117ba732
|
b11113d4e20d5c2dca31a7e9a1fe33c8242c9416
|
/palm-nfts/scripts/deploy_vikings.py
|
da1315e3959c9b208b70956fde0ba20f16827ee0
|
[] |
no_license
|
MoralisWeb3/youtube-tutorials
|
ce182b6b70443af36891ea3f3c25bde6914b9dd9
|
4f6fa634344614840b80f7a3942976f09837a77d
|
refs/heads/main
| 2023-07-06T10:22:32.342053
| 2023-07-04T22:26:39
| 2023-07-04T22:26:39
| 346,786,715
| 639
| 1,821
| null | 2023-05-21T12:00:47
| 2021-03-11T17:40:31
|
C#
|
UTF-8
|
Python
| false
| false
| 254
|
py
|
deploy_vikings.py
|
from brownie import PalmNFT
from scripts.helpful_scripts import get_account
def deploy_random_nft():
account = get_account()
nft_contract = PalmNFT.deploy({"from": account})
print(nft_contract.address)
def main():
deploy_random_nft()
|
54edbb470f3d4ce64920a670b5b02ed1fcbd6e48
|
dd221d1ab80a49190a0c93277e2471debaa2db95
|
/hanlp/components/parsers/ud/ud_parser.py
|
704d137211dcc07c11066a996e348ba675692435
|
[
"Apache-2.0",
"CC-BY-NC-SA-4.0"
] |
permissive
|
hankcs/HanLP
|
29a22d4e240617e4dc67929c2f9760a822402cf7
|
be2f04905a12990a527417bd47b79b851874a201
|
refs/heads/doc-zh
| 2023-08-18T12:48:43.533453
| 2020-02-15T17:19:28
| 2023-03-14T02:46:03
| 24,976,755
| 32,454
| 9,770
|
Apache-2.0
| 2023-08-13T03:11:39
| 2014-10-09T06:36:16
|
Python
|
UTF-8
|
Python
| false
| false
| 15,674
|
py
|
ud_parser.py
|
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-12-14 20:34
import logging
from copy import deepcopy
from typing import Union, List, Callable
import torch
from torch.utils.data import DataLoader
from hanlp_common.constant import IDX
from hanlp.common.dataset import PadSequenceDataLoader, SortingSamplerBuilder
from hanlp.common.structure import History
from hanlp.common.torch_component import TorchComponent
from hanlp.common.transform import FieldLength, PunctuationMask
from hanlp.common.vocab import Vocab
from hanlp.components.classifiers.transformer_classifier import TransformerComponent
from hanlp.components.parsers.biaffine.biaffine_dep import BiaffineDependencyParser
from hanlp_common.conll import CoNLLUWord, CoNLLSentence
from hanlp.components.parsers.ud.ud_model import UniversalDependenciesModel
from hanlp.components.parsers.ud.util import generate_lemma_rule, append_bos, sample_form_missing
from hanlp.components.parsers.ud.lemma_edit import apply_lemma_rule
from hanlp.datasets.parsing.loaders.conll_dataset import CoNLLParsingDataset
from hanlp.layers.embeddings.contextual_word_embedding import ContextualWordEmbedding
from hanlp.metrics.accuracy import CategoricalAccuracy
from hanlp.metrics.metric import Metric
from hanlp.metrics.mtl import MetricDict
from hanlp.metrics.parsing.attachmentscore import AttachmentScore
from hanlp.utils.time_util import CountdownTimer
from hanlp.utils.torch_util import clip_grad_norm, lengths_to_mask
from hanlp_common.util import merge_locals_kwargs, merge_dict, reorder
class UniversalDependenciesParser(TorchComponent):
def __init__(self, **kwargs) -> None:
"""Universal Dependencies Parsing (lemmatization, features, PoS tagging and dependency parsing) implementation
of "75 Languages, 1 Model: Parsing Universal Dependencies Universally" (:cite:`kondratyuk-straka-2019-75`).
Args:
**kwargs: Predefined config.
"""
super().__init__(**kwargs)
self.model: UniversalDependenciesModel = self.model
def build_dataloader(self,
data,
batch_size,
shuffle=False,
device=None,
logger: logging.Logger = None,
sampler_builder=None,
gradient_accumulation=1,
transformer: ContextualWordEmbedding = None,
**kwargs) -> DataLoader:
transform = [generate_lemma_rule, append_bos, self.vocabs, transformer.transform(), FieldLength('token')]
if not self.config.punct:
transform.append(PunctuationMask('token', 'punct_mask'))
dataset = self.build_dataset(data, transform)
if self.vocabs.mutable:
# noinspection PyTypeChecker
self.build_vocabs(dataset, logger)
lens = [len(x['token_input_ids']) for x in dataset]
if sampler_builder:
sampler = sampler_builder.build(lens, shuffle, gradient_accumulation)
else:
sampler = SortingSamplerBuilder(batch_size).build(lens, shuffle, gradient_accumulation)
return PadSequenceDataLoader(dataset, batch_size, shuffle, device=device, batch_sampler=sampler,
pad={'arc': 0}, )
def build_vocabs(self, trn, logger, **kwargs):
self.vocabs.pos = Vocab(unk_token=None, pad_token=None)
self.vocabs.rel = Vocab(unk_token=None, pad_token=None)
self.vocabs.lemma = Vocab(unk_token=None, pad_token=None)
self.vocabs.feat = Vocab(unk_token=None, pad_token=None)
timer = CountdownTimer(len(trn))
max_seq_len = 0
for each in trn:
max_seq_len = max(max_seq_len, len(each['token']))
timer.log(f'Building vocab [blink][yellow]...[/yellow][/blink] (longest sequence: {max_seq_len})')
for v in self.vocabs.values():
v.set_unk_as_safe_unk()
self.vocabs.lock()
self.vocabs.summary(logger)
def build_dataset(self, data, transform):
dataset = CoNLLParsingDataset(data, transform=transform, prune=sample_form_missing, cache=isinstance(data, str))
return dataset
def build_optimizer(self, trn, **kwargs):
# noinspection PyCallByClass,PyTypeChecker
return TransformerComponent.build_optimizer(self, trn, **kwargs)
def build_criterion(self, **kwargs):
pass
def build_metric(self, **kwargs):
return MetricDict({
'lemmas': CategoricalAccuracy(),
'upos': CategoricalAccuracy(),
'deps': AttachmentScore(),
'feats': CategoricalAccuracy(),
})
def evaluate_dataloader(self,
data: DataLoader,
criterion: Callable,
metric: MetricDict = None,
output=False,
logger=None,
ratio_width=None,
**kwargs):
metric.reset()
self.model.eval()
timer = CountdownTimer(len(data))
total_loss = 0
for idx, batch in enumerate(data):
out, mask = self.feed_batch(batch)
loss = out['loss']
total_loss += loss.item()
self.decode_output(out, mask, batch)
self.update_metrics(metric, batch, out, mask)
report = f'loss: {total_loss / (idx + 1):.4f} {metric.cstr()}'
timer.log(report, logger=logger, ratio_percentage=False, ratio_width=ratio_width)
del loss
del out
del mask
return total_loss / len(data), metric
# noinspection PyMethodOverriding
def build_model(self,
transformer: ContextualWordEmbedding,
n_mlp_arc,
n_mlp_rel,
mlp_dropout,
mix_embedding,
layer_dropout,
training=True,
**kwargs) -> torch.nn.Module:
assert bool(transformer.scalar_mix) == bool(mix_embedding), 'transformer.scalar_mix has to be 1 ' \
'when mix_embedding is non-zero.'
# noinspection PyTypeChecker
return UniversalDependenciesModel(transformer.module(training=training),
n_mlp_arc,
n_mlp_rel,
mlp_dropout,
len(self.vocabs.rel),
len(self.vocabs.lemma),
len(self.vocabs.pos),
len(self.vocabs.feat),
mix_embedding,
layer_dropout)
def predict(self, data: Union[List[str], List[List[str]]], batch_size: int = None, **kwargs):
if not data:
return []
flat = self.input_is_flat(data)
if flat:
data = [data]
samples = self.build_samples(data)
if not batch_size:
batch_size = self.config.batch_size
dataloader = self.build_dataloader(samples,
device=self.devices[0], shuffle=False,
**merge_dict(self.config,
batch_size=batch_size,
overwrite=True,
**kwargs))
order = []
outputs = []
for batch in dataloader:
out, mask = self.feed_batch(batch)
self.decode_output(out, mask, batch)
outputs.extend(self.prediction_to_human(out, batch))
order.extend(batch[IDX])
outputs = reorder(outputs, order)
if flat:
return outputs[0]
return outputs
def build_samples(self, data: List[List[str]]):
return [{'FORM': x} for x in data]
def fit(self,
trn_data,
dev_data,
save_dir,
transformer: ContextualWordEmbedding,
sampler_builder=None,
mix_embedding: int = 13,
layer_dropout: int = 0.1,
n_mlp_arc=768,
n_mlp_rel=256,
mlp_dropout=.33,
lr=1e-3,
transformer_lr=2.5e-5,
patience=0.1,
batch_size=32,
epochs=30,
gradient_accumulation=1,
adam_epsilon=1e-8,
weight_decay=0,
warmup_steps=0.1,
grad_norm=1.0,
tree=False,
proj=False,
punct=False,
logger=None,
verbose=True,
devices: Union[float, int, List[int]] = None, **kwargs):
return super().fit(**merge_locals_kwargs(locals(), kwargs))
def execute_training_loop(self, trn: DataLoader, dev: DataLoader, epochs, criterion, optimizer, metric, save_dir,
logger: logging.Logger, devices, ratio_width=None, patience=0.5, eval_trn=True, **kwargs):
if isinstance(patience, float):
patience = int(patience * epochs)
best_epoch, best_metric = 0, -1
timer = CountdownTimer(epochs)
history = History()
for epoch in range(1, epochs + 1):
logger.info(f"[yellow]Epoch {epoch} / {epochs}:[/yellow]")
self.fit_dataloader(trn, criterion, optimizer, metric, logger, history=history, ratio_width=ratio_width,
eval_trn=eval_trn, **self.config)
loss, dev_metric = self.evaluate_dataloader(dev, criterion, metric, logger=logger, ratio_width=ratio_width)
timer.update()
report = f"{timer.elapsed_human} / {timer.total_time_human} ETA: {timer.eta_human}"
if dev_metric > best_metric:
best_epoch, best_metric = epoch, deepcopy(dev_metric)
self.save_weights(save_dir)
report += ' [red](saved)[/red]'
else:
report += f' ({epoch - best_epoch})'
if epoch - best_epoch >= patience:
report += ' early stop'
logger.info(report)
if epoch - best_epoch >= patience:
break
if not best_epoch:
self.save_weights(save_dir)
elif best_epoch != epoch:
self.load_weights(save_dir)
logger.info(f"Max score of dev is {best_metric.cstr()} at epoch {best_epoch}")
logger.info(f"Average time of each epoch is {timer.elapsed_average_human}")
logger.info(f"{timer.elapsed_human} elapsed")
# noinspection PyMethodOverriding
def fit_dataloader(self,
trn: DataLoader,
criterion,
optimizer,
metric: MetricDict,
logger: logging.Logger,
history: History,
gradient_accumulation=1,
grad_norm=None,
ratio_width=None,
eval_trn=True,
**kwargs):
optimizer, scheduler = optimizer
metric.reset()
self.model.train()
timer = CountdownTimer(history.num_training_steps(len(trn), gradient_accumulation=gradient_accumulation))
total_loss = 0
for idx, batch in enumerate(trn):
out, mask = self.feed_batch(batch)
loss = out['loss']
if gradient_accumulation and gradient_accumulation > 1:
loss /= gradient_accumulation
loss.backward()
total_loss += loss.item()
if eval_trn:
self.decode_output(out, mask, batch)
self.update_metrics(metric, batch, out, mask)
if history.step(gradient_accumulation):
self._step(optimizer, scheduler, grad_norm)
report = f'loss: {total_loss / (idx + 1):.4f} {metric.cstr()}' if eval_trn \
else f'loss: {total_loss / (idx + 1):.4f}'
timer.log(report, logger=logger, ratio_percentage=False, ratio_width=ratio_width)
del loss
del out
del mask
def decode_output(self, outputs, mask, batch):
arc_scores, rel_scores = outputs['class_probabilities']['deps']['s_arc'], \
outputs['class_probabilities']['deps']['s_rel']
arc_preds, rel_preds = BiaffineDependencyParser.decode(self, arc_scores, rel_scores, mask, batch)
outputs['arc_preds'], outputs['rel_preds'] = arc_preds, rel_preds
return outputs
def update_metrics(self, metrics, batch, outputs, mask):
arc_preds, rel_preds, puncts = outputs['arc_preds'], outputs['rel_preds'], batch.get('punct_mask', None)
BiaffineDependencyParser.update_metric(self, arc_preds, rel_preds, batch['arc'], batch['rel_id'], mask, puncts,
metrics['deps'], batch)
for task, key in zip(['lemmas', 'upos', 'feats'], ['lemma_id', 'pos_id', 'feat_id']):
metric: Metric = metrics[task]
pred = outputs['class_probabilities'][task]
gold = batch[key]
metric(pred.detach(), gold, mask=mask)
return metrics
def feed_batch(self, batch: dict):
mask = self.compute_mask(batch)
output_dict = self.model(batch, mask)
if self.model.training:
mask = mask.clone()
mask[:, 0] = 0
return output_dict, mask
def compute_mask(self, batch):
lens = batch['token_length']
mask = lengths_to_mask(lens)
return mask
def _step(self, optimizer, scheduler, grad_norm):
clip_grad_norm(self.model, grad_norm)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
def input_is_flat(self, data):
# noinspection PyCallByClass,PyTypeChecker
return BiaffineDependencyParser.input_is_flat(self, data, False)
def prediction_to_human(self, outputs: dict, batch):
arcs, rels = outputs['arc_preds'], outputs['rel_preds']
upos = outputs['class_probabilities']['upos'][:, 1:, :].argmax(-1).tolist()
feats = outputs['class_probabilities']['feats'][:, 1:, :].argmax(-1).tolist()
lemmas = outputs['class_probabilities']['lemmas'][:, 1:, :].argmax(-1).tolist()
lem_vocab = self.vocabs['lemma'].idx_to_token
pos_vocab = self.vocabs['pos'].idx_to_token
feat_vocab = self.vocabs['feat'].idx_to_token
# noinspection PyCallByClass,PyTypeChecker
for tree, form, lemma, pos, feat in zip(BiaffineDependencyParser.prediction_to_head_rel(
self, arcs, rels, batch), batch['token'], lemmas, upos, feats):
form = form[1:]
assert len(form) == len(tree)
lemma = [apply_lemma_rule(t, lem_vocab[r]) for t, r in zip(form, lemma)]
pos = [pos_vocab[x] for x in pos]
feat = [feat_vocab[x] for x in feat]
yield CoNLLSentence(
[CoNLLUWord(id=i + 1, form=fo, lemma=l, upos=p, feats=fe, head=a, deprel=r) for
i, (fo, (a, r), l, p, fe) in enumerate(zip(form, tree, lemma, pos, feat))])
def __call__(self, data, batch_size=None, **kwargs) -> Union[CoNLLSentence, List[CoNLLSentence]]:
return super().__call__(data, batch_size, **kwargs)
|
5d38c86e833ba1815a3f3f66450fdaf85e81e79d
|
5ef6c8d47864f471e26b9902d61f8c687e941f05
|
/src/genie/libs/parser/iosxr/show_cdp.py
|
f56c57d8b9c6df850129c6a2a6794cdbca366138
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genieparser
|
169c196558f1c1a0f0d10650876096f993224917
|
b531eff760b2e44cd69d7a2716db6f866907c239
|
refs/heads/master
| 2023-09-03T08:56:18.831340
| 2023-08-29T22:32:02
| 2023-08-29T22:32:02
| 131,621,824
| 247
| 409
|
Apache-2.0
| 2023-08-29T22:32:04
| 2018-04-30T16:51:50
|
Python
|
UTF-8
|
Python
| false
| false
| 18,482
|
py
|
show_cdp.py
|
""" show_cdp.py
IOSXR parsers for the following commands:
* 'show cdp neighbors'
* 'show cdp neighbors detail'
* 'show cdp'
* 'show cdp interface'
* 'show cdp interface {interface}'
"""
# Python
import re
# Metaparser
from genie.libs.parser.utils.common import Common
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import Any, Optional
class ShowCdpNeighborsSchema(MetaParser):
''' Schema for:
* 'show cdp neighbors'
'''
schema = {
'cdp':
{Optional('index'):
{Any():
{'device_id': str,
'local_interface': str,
'hold_time': int,
Optional('capability'): str,
Optional('platform'): str,
'port_id': str, }, }, },
}
# ================================
# Parser for 'show cdp neighbors'
# ================================
class ShowCdpNeighbors(ShowCdpNeighborsSchema):
exclude = ['hold_time']
cli_command = 'show cdp neighbors'
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
# Capability Codes: R - Router, T - Trans Bridge, B - Source Route Bridge
# S - Switch, H - Host, I - IGMP, r - Repeater
#
# Specifically for situations when Platform and Port Id are
# concatenated
p1 = re.compile(
r'^(?P<device_id>\S+) +'
r'(?P<local_interface>[a-zA-Z]+[\s]*[\d\/\.]+) +'
r'(?P<hold_time>\d+) +(?P<capability>[RTBSHIrPDCM\s]+) +'
r'(?P<platform>\S+)'
'(?P<port_id>(Fa|Gi|GE).\s*\d*\/*\d*)$')
# No platform
p2 = re.compile(
r'^(?P<device_id>\S+) +'
r'(?P<local_interface>[a-zA-Z]+[\s]*[\d\/\.]+) +'
r'(?P<hold_time>\d+) +(?P<capability>[RTBSHIrPDCM\s]+)'
r'(?: +(?P<platform>[\w\-]+) )? +'
'(?P<port_id>[a-zA-Z0-9\/\s]+)$')
# device6 Gig 0 157 R S I C887VA-W-W Gi 0
p3 = re.compile(
r'^(?P<device_id>\S+) +'
r'(?P<local_interface>[a-zA-Z]+[\s]*[\d\/\.]+) +'
r'(?P<hold_time>\d+) +(?P<capability>[RTBSHIrPDCM\s]+) +'
'(?P<platform>\S+) (?P<port_id>[a-zA-Z0-9\/\s]+)$')
# p4 and p5 for two-line output, where device id is on a separate line
p4 = re.compile(r'^(?P<device_id>\S+)$')
p5 = re.compile(
r'(?P<local_interface>[a-zA-Z]+[\s]*[\d/.]+) +'
r'(?P<hold_time>\d+) +(?P<capability>[RTBSHIrPDCM\s]+) +'
r'(?P<platform>\S+) (?P<port_id>[a-zA-Z0-9/\s]+)$')
device_id_index = 0
parsed_dict = {}
devices_dict_info = {}
for line in out.splitlines():
line = line.strip()
result = p1.match(line)
if not result:
result = p2.match(line)
if not result:
result = p3.match(line)
if result:
device_id_index += 1
device_dict = devices_dict_info.setdefault(device_id_index, {})
group = result.groupdict()
device_dict['device_id'] = group['device_id'].strip()
device_dict['local_interface'] = Common.convert_intf_name(
intf=group['local_interface'].strip())
device_dict['hold_time'] = int(group['hold_time'])
device_dict['capability'] = group['capability'].strip()
if group['platform']:
device_dict['platform'] = group['platform'].strip()
elif not group['platform']:
device_dict['platform'] = ''
device_dict['port_id'] = Common.convert_intf_name(
intf=group['port_id'].strip())
continue
result = p4.match(line)
if result:
group = result.groupdict()
if 'Eth' not in group['device_id']:
device_id_index += 1
device_dict = parsed_dict.setdefault(
'cdp',
{}).setdefault(
'index',
{}).setdefault(
device_id_index,
{})
device_dict['device_id'] = group['device_id'].strip()
else:
device_dict['port_id'] = Common \
.convert_intf_name(intf=group['device_id'].strip())
continue
result = p5.match(line)
if result:
group = result.groupdict()
device_dict = parsed_dict.setdefault('cdp', {}) \
.setdefault('index', {}).setdefault(device_id_index, {})
device_dict['local_interface'] = Common \
.convert_intf_name(intf=group['local_interface'].strip())
device_dict['hold_time'] = int(group['hold_time'])
device_dict['capability'] = group['capability'].strip()
if group['platform']:
device_dict['platform'] = group['platform'].strip()
elif not group['platform']:
device_dict['platform'] = ''
device_dict['port_id'] = Common \
.convert_intf_name(intf=group['port_id'].strip())
continue
if device_id_index:
parsed_dict.setdefault('cdp', {}). \
setdefault('index', devices_dict_info)
return parsed_dict
class ShowCdpNeighborsDetailSchema(MetaParser):
""" Schema for:
* 'show cdp neighbors detail'
"""
schema = {
'total_entries_displayed': int,
Optional('index'): {
Any():
{'device_id': str,
Optional('system_name'): str,
'platform': str,
'capabilities': str,
'local_interface': str,
'port_id': str,
'hold_time': int,
'software_version': str,
'entry_addresses':
{Any():
{Optional('type'): str, }, },
Optional('duplex_mode'): str,
Optional('advertisement_ver'): int,
Optional('native_vlan'): str},
},
}
# =======================================
# Parser for 'show cdp neighbors details'
# =======================================
class ShowCdpNeighborsDetail(ShowCdpNeighborsDetailSchema):
cli_command = 'show cdp neighbors detail'
exclude = ['hold_time']
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
# Device ID: R3_nx.cisco.com(972ZZK4REQK)
deviceid_re = re.compile(r'Device\s+ID:\s*(?P<device_id>\S+)')
# SysName:R3_nx
system_name_re = re.compile(r''
'SysName\s*:\s*(?P<system_name>\S+)')
# Entry address(es):
entry_address_re = re.compile(
r''
'Entry\s*address\s*\(\w+\)\s*\:\s*')
# IPv4 address: 172.16.1.204
ipv4_address_re = re.compile(
r'\S*IPv4\s*'
'address:\s*(?P<ip_address>\S*)')
# Platform: N9K-9000v, Capabilities: Router Switch
platf_cap_re = re.compile(
r'Platform:\s*(?P<platform>[a-zA-Z\d +\-\/]+)'
r'\s*\,\s*Capabilities:\s*'
'(?P<capabilities>[a-zA-Z\d\s*\-\/]+)')
# Interface: GigabitEthernet0/0/0/5
interface_re = re.compile(
r'Interface:\s*'
r'(?P<local_interface>[\w\s\-\/\/]+)\s*')
# Port ID (outgoing port): Ethernet1/2
port_re = re.compile(
r'Port\s*ID\s*[\(\w\)\s]+:\s*'
r'(?P<port_id>\S+)')
# Holdtime : 126 sec
hold_time_re = re.compile(r'Holdtime\s*:\s*\s*(?P<hold_time>\d+)')
# Version: Cisco IOS Software, IOSv Software (VIOS-ADVENTERPRISEK9-M),
# Version 15.7(3)M3, RELEASE SOFTWARE (fc2)
software_version_re = re.compile(r'(?P<software_version>[\s\S]+)')
# Regexes for Flags:
# Version:
software_version_flag_re = re.compile(r'Version\s*:\s*')
# advertisement version: 2
advertver_re = re.compile(r'advertisement\s*version:\s*'
'(?P<advertisement_ver>\d+)')
# Native VLAN: 42
native_vlan_re = re.compile(r'Native\s*VLAN\s*:\s*'
'(?P<native_vlan>\d+)')
# Duplex: full
# Duplex Mode: half
duplex_re = re.compile(r'Duplex\s*(Mode)*:\s*(?P<duplex_mode>\w+)')
# 0 or 1 flags
entry_address_flag = 0
software_version_flag = 0
parsed_dict = {}
index_device = 0
sw_version = []
out = re.sub(r'\r', '', out)
for line in out.splitlines():
line = line.strip()
result = deviceid_re.match(line)
if result:
index_device += 1
parsed_dict['total_entries_displayed'] = index_device
devices_dict = parsed_dict.setdefault('index', {}) \
.setdefault(index_device, {})
device_id = result.group('device_id')
devices_dict['device_id'] = device_id
# Init keys
devices_dict['duplex_mode'] = ''
devices_dict['system_name'] = ''
devices_dict['native_vlan'] = ''
devices_dict['entry_addresses'] = {}
continue
result = system_name_re.match(line)
if result:
devices_dict['system_name'] = result.group('system_name')
continue
result = platf_cap_re.match(line)
if result:
platf_cap_dict = result.groupdict()
devices_dict['capabilities'] = \
platf_cap_dict['capabilities']
devices_dict['platform'] = \
platf_cap_dict['platform']
entry_address_flag = 0
continue
result = interface_re.match(line)
if result:
devices_dict['local_interface'] = result.group(
'local_interface')
continue
result = port_re.match(line)
if result:
devices_dict['port_id'] = result.group('port_id')
continue
result = hold_time_re.match(line)
if result:
devices_dict['hold_time'] = \
int(result.group('hold_time'))
continue
if entry_address_re.match(line):
entry_address_flag = 1
result = ipv4_address_re.match(line)
if result:
ip_address = result.group('ip_address')
if entry_address_flag:
devices_dict['entry_addresses'][ip_address] = {}
continue
result = advertver_re.match(line)
if result:
devices_dict['advertisement_ver'] = \
int(result.group('advertisement_ver'))
continue
if software_version_flag_re.match(line):
software_version_flag = 1
continue
if software_version_flag:
if line and not advertver_re.match(line):
sw_version.append(line)
continue
elif not line or advertver_re.match(line):
parsed_sw_ver = '\n'.join(sw_version)
result = software_version_re.match(parsed_sw_ver)
devices_dict['software_version'] = \
result.group('software_version')
software_version_flag = 0
sw_version.clear()
result = advertver_re.match(line)
if result:
devices_dict['advertisement_ver'] = \
int(result.group('advertisement_ver'))
continue
result = native_vlan_re.match(line)
if result:
devices_dict['native_vlan'] = \
result.group('native_vlan')
continue
result = duplex_re.match(line)
if result:
devices_dict['duplex_mode'] = \
result.group('duplex_mode')
continue
return parsed_dict
class ShowCdpSchema(MetaParser):
"""Schema for show cdp"""
schema = {
'enabled': bool,
Optional('cdp_packets'): int,
Optional('hold_timer'): int,
Optional('cdpv2_advertisements'): str
}
# =======================================
# Parser for 'show cdp'
# =======================================
class ShowCdp(ShowCdpSchema):
"""Parser for show cdp"""
cli_command = 'show cdp'
def cli(self, output=None):
if output is None:
output = self.device.execute(self.cli_command)
# initial return dictionary
ret_dict = {}
# Global CDP information:
p1 = re.compile(r'^Global\s+CDP\s+information:$')
# Sending CDP packets every 60 seconds
p2 = re.compile(r'^Sending\s+CDP\s+packets\s+every\s+(?P<cdp_packets>\d+)\s+seconds$')
# Sending a holdtime value of 180 seconds
p3 = re.compile(r'^Sending\s+a\s+holdtime\s+value\s+of\s+(?P<hold_timer>\d+)\s+seconds$')
# Sending CDPv2 advertisements is not enabled
# Sending CDPv2 advertisements is enabled
p4 = re.compile(r'^Sending\s+CDPv2\s+advertisements\s+is\s+(?P<cdpv2_advertisements>[\w ]+)$')
# % CDP is not enabled
p5 = re.compile(r'^%\s+CDP\s+is\s+not\s+enabled$')
for line in output.splitlines():
line = line.strip()
# Global CDP information:
m = p1.match(line)
if m:
ret_dict['enabled'] = True
continue
# Sending CDP packets every 60 seconds
m = p2.match(line)
if m:
group = m.groupdict()
ret_dict['cdp_packets'] = int(group['cdp_packets'])
continue
# Sending a holdtime value of 180 seconds
m = p3.match(line)
if m:
group = m.groupdict()
ret_dict['hold_timer'] = int(group['hold_timer'])
continue
# Sending CDPv2 advertisements is not enabled
# Sending CDPv2 advertisements is enabled
m = p4.match(line)
if m:
group = m.groupdict()
ret_dict['cdpv2_advertisements'] = group['cdpv2_advertisements']
continue
# % CDP is not enabled
m = p5.match(line)
if m:
ret_dict['enabled'] = False
continue
return ret_dict
class ShowCdpInterfaceSchema(MetaParser):
""" Schema for:
* 'show cdp interface {interface}'
* 'show cdp interface'
"""
schema = {
'interfaces': {
Any(): {
'interface': str,
'status': str,
'encapsulation': str,
'cdp_packets': int,
'hold_timer': int
}
}
}
# =======================================
# Parser for 'show cdp interface {interface}'
# Parser for 'show cdp interface'
# =======================================
class ShowCdpInterface(ShowCdpInterfaceSchema):
''' Parser for commands:
* 'show cdp interface {interface}'
* 'show cdp interface'
'''
cli_command = ['show cdp interface {interface}',
'show cdp interface']
def cli(self, interface=None, output=None):
if output is None:
if interface:
command = self.cli_command[0].format(interface=interface)
else:
command = self.cli_command[1]
output = self.device.execute(command)
# initial return dictionary
ret_dict = {}
# GigabitEthernet0/0/0/16 is Up
# GigabitEthernet0/0/0/17 is Down
# GigabitEthernet0/0/0/18 is Administratively Down
# TenGigE0/0/2/0 is Down
# TenGigE0/0/2/1 is Up
p1 = re.compile(r'^(?P<interface_name>\S+)\s+is\s+(?P<status>[a-zA-Z ]+)$')
# Encapsulation ether
p2 = re.compile(r'^Encapsulation\s+(?P<encapsulation>\w+)$')
# Sending CDP packets every 60 seconds
p3 = re.compile(r'^Sending\s+CDP\s+packets\s+every\s+(?P<cdp_packets>\d+)\s+seconds$')
# Holdtime is 180 seconds
p4 = re.compile(r'^Holdtime\s+is\s+(?P<hold_timer>\d+)\s+seconds$')
for line in output.splitlines():
line = line.strip() # strip whitespace from beginning and end
# GigabitEthernet0/0/0/16 is Up
# GigabitEthernet0/0/0/17 is Down
# GigabitEthernet0/0/0/18 is Administratively Down
# TenGigE0/0/2/0 is Down
# TenGigE0/0/2/1 is Up
m = p1.match(line)
if m:
group = m.groupdict()
intf = Common.convert_intf_name(group['interface_name'])
int_dict = ret_dict.setdefault('interfaces', {}).setdefault(intf, {})
int_dict.update({'interface': intf})
int_dict.update({'status': group['status']})
continue
# Encapsulation ether
m = p2.match(line)
if m:
group = m.groupdict()
int_dict.update({'encapsulation': group['encapsulation']})
continue
# Sending CDP packets every 60 seconds
m = p3.match(line)
if m:
group = m.groupdict()
int_dict.update({'cdp_packets': int(group['cdp_packets'])})
continue
# Holdtime is 180 seconds
m = p4.match(line)
if m:
group = m.groupdict()
int_dict.update({'hold_timer': int(group['hold_timer'])})
continue
return ret_dict
|
0b37c5c626037a9ca68bdb8889d6aba442f1a925
|
474c281c47aed69036b2a13e9a60d150d8ecddc5
|
/tests/test_path.py
|
af3b85761438036afb1fe7e8b07731b3575cd3ab
|
[
"MIT"
] |
permissive
|
ramonhagenaars/jsons
|
c2445eb7c002544abdfde4ac63d42f5a93e4d776
|
9abbf3a3bd32435ac74bc98c3554ad3c71086036
|
refs/heads/master
| 2023-07-23T22:08:10.093119
| 2022-06-09T19:50:52
| 2022-06-09T19:50:52
| 140,337,655
| 286
| 52
|
MIT
| 2023-07-14T15:20:59
| 2018-07-09T20:18:08
|
Python
|
UTF-8
|
Python
| false
| false
| 8,923
|
py
|
test_path.py
|
from pathlib import Path, PureWindowsPath, PurePosixPath
from unittest import TestCase
import jsons
class TestPath(TestCase):
def test_dump_singlepart_relative_path(self):
self.assertEqual('abc', jsons.dump(Path('abc')))
def test_dump_singlepart_pure_windows_path(self):
self.assertEqual('abc', jsons.dump(PureWindowsPath('abc')))
def test_dump_singlepart_pure_posix_path(self):
self.assertEqual('abc', jsons.dump(PurePosixPath('abc')))
def test_dump_multipart_relative_path(self):
self.assertEqual(
'abc/def/ghi',
jsons.dump(Path('abc', 'def', 'ghi'))
)
self.assertEqual(
'abc/def/ghi',
jsons.dump(Path('abc/def/ghi'))
)
def test_dump_multipart_pure_windows_path(self):
self.assertEqual(
'abc/def/ghi',
jsons.dump(PureWindowsPath('abc', 'def', 'ghi'))
)
self.assertEqual(
'abc/def/ghi',
jsons.dump(PureWindowsPath('abc/def/ghi'))
)
self.assertEqual(
'abc/def/ghi',
jsons.dump(PureWindowsPath('abc\\def\\ghi'))
)
def test_dump_multipart_pure_posix_path(self):
self.assertEqual(
'abc/def/ghi',
jsons.dump(PurePosixPath('abc', 'def', 'ghi'))
)
self.assertEqual(
'abc/def/ghi',
jsons.dump(PurePosixPath('abc/def/ghi'))
)
self.assertEqual(
'abc\\def\\ghi',
jsons.dump(PurePosixPath('abc\\def\\ghi'))
)
def test_dump_multipart_drived_pure_windows_path(self):
self.assertEqual(
'Z:/abc/def/ghi',
jsons.dump(PureWindowsPath('Z:\\', 'abc', 'def', 'ghi'))
)
self.assertEqual(
'Z:/abc/def/ghi',
jsons.dump(PureWindowsPath('Z:/abc/def/ghi'))
)
self.assertEqual(
'Z:/abc/def/ghi',
jsons.dump(PureWindowsPath('Z:\\abc\\def\\ghi'))
)
def test_dump_multipart_drived_pure_posix_path(self):
self.assertEqual(
'Z:/abc/def/ghi',
jsons.dump(PurePosixPath('Z:', 'abc', 'def', 'ghi'))
)
self.assertEqual(
'Z:/abc/def/ghi',
jsons.dump(PurePosixPath('Z:/abc/def/ghi'))
)
self.assertEqual(
'Z:\\abc\\def\\ghi',
jsons.dump(PurePosixPath('Z:\\abc\\def\\ghi'))
)
#################
def test_load_singlepart_relative_path(self):
self.assertEqual(
Path('abc'),
jsons.load('abc', Path)
)
def test_load_singlepart_pure_windows_path(self):
self.assertEqual(
PureWindowsPath('abc'),
jsons.load('abc', PureWindowsPath)
)
def test_load_singlepart_pure_posix_path(self):
self.assertEqual(
PurePosixPath('abc'),
jsons.load('abc', PurePosixPath)
)
def test_load_multipart_relative_path(self):
self.assertEqual(
Path('abc', 'def', 'ghi'),
jsons.load('abc/def/ghi', Path)
)
self.assertEqual(
Path('abc/def/ghi'),
jsons.load('abc/def/ghi', Path)
)
def test_load_multipart_pure_windows_path(self):
# We should be able to load Posix-style paths on Windows.
self.assertEqual(
PureWindowsPath('abc', 'def', 'ghi'),
jsons.load('abc/def/ghi', PureWindowsPath)
)
self.assertEqual(
PureWindowsPath('abc/def/ghi'),
jsons.load('abc/def/ghi', PureWindowsPath)
)
self.assertEqual(
PureWindowsPath('abc\\def\\ghi'),
jsons.load('abc/def/ghi', PureWindowsPath)
)
# We should be able to load Windows-style paths on Windows.
self.assertEqual(
PureWindowsPath('abc', 'def', 'ghi'),
jsons.load('abc\\def\\ghi', PureWindowsPath)
)
self.assertEqual(
PureWindowsPath('abc/def/ghi'),
jsons.load('abc\\def\\ghi', PureWindowsPath)
)
self.assertEqual(
PureWindowsPath('abc\\def\\ghi'),
jsons.load('abc\\def\\ghi', PureWindowsPath)
)
def test_load_multipart_pure_posix_path(self):
# We should be able to load Posix-style paths on Posix systems.
self.assertEqual(
PurePosixPath('abc', 'def', 'ghi'),
jsons.load('abc/def/ghi', PurePosixPath)
)
self.assertEqual(
PurePosixPath('abc/def/ghi'),
jsons.load('abc/def/ghi', PurePosixPath)
)
self.assertNotEqual(
PurePosixPath('abc\\def\\ghi'),
jsons.load('abc/def/ghi', PurePosixPath)
)
# Backslashes on Posix systems should be interpreted as escapes.
self.assertNotEqual(
PurePosixPath('abc', 'def', 'ghi'),
jsons.load('abc\\def\\ghi', PurePosixPath)
)
self.assertNotEqual(
PurePosixPath('abc/def/ghi'),
jsons.load('abc\\def\\ghi', PurePosixPath)
)
self.assertEqual(
PurePosixPath('abc\\def\\ghi'),
jsons.load('abc\\def\\ghi', PurePosixPath)
)
def test_load_multipart_drived_pure_windows_path(self):
# We should be able to load Posix-style paths on Windows.
self.assertEqual(
PureWindowsPath('Z:\\', 'abc', 'def', 'ghi'),
jsons.load('Z:/abc/def/ghi', PureWindowsPath)
)
self.assertEqual(
PureWindowsPath('Z:/abc/def/ghi'),
jsons.load('Z:/abc/def/ghi', PureWindowsPath)
)
self.assertEqual(
PureWindowsPath('Z:\\abc\\def\\ghi'),
jsons.load('Z:/abc/def/ghi', PureWindowsPath)
)
# We should be able to load Windows-style paths on Windows.
self.assertEqual(
PureWindowsPath('Z:\\', 'abc', 'def', 'ghi'),
jsons.load('Z:\\abc\\def\\ghi', PureWindowsPath)
)
self.assertEqual(
PureWindowsPath('Z:/abc/def/ghi'),
jsons.load('Z:\\abc\\def\\ghi', PureWindowsPath)
)
self.assertEqual(
PureWindowsPath('Z:\\abc\\def\\ghi'),
jsons.load('Z:\\abc\\def\\ghi', PureWindowsPath)
)
def test_load_multipart_drived_pure_posix_path(self):
# We should be able to load Posix-style paths on Windows.
self.assertEqual(
PurePosixPath('Z:', 'abc', 'def', 'ghi'),
jsons.load('Z:/abc/def/ghi', PurePosixPath)
)
self.assertEqual(
PurePosixPath('Z:/abc/def/ghi'),
jsons.load('Z:/abc/def/ghi', PurePosixPath)
)
self.assertNotEqual(
PurePosixPath('Z:\\abc\\def\\ghi'),
jsons.load('Z:/abc/def/ghi', PurePosixPath)
)
# Backslashes on Posix systems should be interpreted as escapes.
self.assertNotEqual(
PurePosixPath('Z:', 'abc', 'def', 'ghi'),
jsons.load('Z:\\abc\\def\\ghi', PurePosixPath)
)
self.assertNotEqual(
PurePosixPath('Z:/abc/def/ghi'),
jsons.load('Z:\\abc\\def\\ghi', PurePosixPath)
)
self.assertEqual(
PurePosixPath('Z:\\abc\\def\\ghi'),
jsons.load('Z:\\abc\\def\\ghi', PurePosixPath)
)
def test_dump_posix_load_windows(self):
dump_result = jsons.dump(PurePosixPath('abc', 'def', 'ghi'))
self.assertEqual(
'abc/def/ghi',
dump_result
)
load_result = jsons.load(dump_result, PureWindowsPath)
self.assertEqual(
PureWindowsPath('abc', 'def', 'ghi'),
load_result
)
def test_dump_windows_load_posix(self):
dump_result = jsons.dump(PureWindowsPath('abc', 'def', 'ghi'))
self.assertEqual(
'abc/def/ghi',
dump_result
)
load_result = jsons.load(dump_result, PurePosixPath)
self.assertEqual(
PurePosixPath('abc', 'def', 'ghi'),
load_result
)
def test_dump_posix_load_posix(self):
dump_result = jsons.dump(PurePosixPath('abc', 'def', 'ghi'))
self.assertEqual(
'abc/def/ghi',
dump_result
)
load_result = jsons.load(dump_result, PurePosixPath)
self.assertEqual(
PurePosixPath('abc', 'def', 'ghi'),
load_result
)
def test_dump_windows_load_windows(self):
dump_result = jsons.dump(PureWindowsPath('abc', 'def', 'ghi'))
self.assertEqual(
'abc/def/ghi',
dump_result
)
load_result = jsons.load(dump_result, PureWindowsPath)
self.assertEqual(
PureWindowsPath('abc', 'def', 'ghi'),
load_result
)
|
602105273ca7eca5a42052f89820c39edc9e0641
|
b049a961f100444dde14599bab06a0a4224d869b
|
/sdk/python/pulumi_azure_native/databox/v20230301/get_job.py
|
55fd5f0f7717afafb597c67fe888f78be8bb48da
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-azure-native
|
b390c88beef8381f9a71ab2bed5571e0dd848e65
|
4c499abe17ec6696ce28477dde1157372896364e
|
refs/heads/master
| 2023-08-30T08:19:41.564780
| 2023-08-28T19:29:04
| 2023-08-28T19:29:04
| 172,386,632
| 107
| 29
|
Apache-2.0
| 2023-09-14T13:17:00
| 2019-02-24T20:30:21
|
Python
|
UTF-8
|
Python
| false
| false
| 16,000
|
py
|
get_job.py
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetJobResult',
'AwaitableGetJobResult',
'get_job',
'get_job_output',
]
@pulumi.output_type
class GetJobResult:
"""
Job Resource.
"""
def __init__(__self__, cancellation_reason=None, delayed_stage=None, delivery_info=None, delivery_type=None, details=None, error=None, id=None, identity=None, is_cancellable=None, is_cancellable_without_fee=None, is_deletable=None, is_prepare_to_ship_enabled=None, is_shipping_address_editable=None, location=None, name=None, reverse_shipping_details_update=None, reverse_transport_preference_update=None, sku=None, start_time=None, status=None, system_data=None, tags=None, transfer_type=None, type=None):
if cancellation_reason and not isinstance(cancellation_reason, str):
raise TypeError("Expected argument 'cancellation_reason' to be a str")
pulumi.set(__self__, "cancellation_reason", cancellation_reason)
if delayed_stage and not isinstance(delayed_stage, str):
raise TypeError("Expected argument 'delayed_stage' to be a str")
pulumi.set(__self__, "delayed_stage", delayed_stage)
if delivery_info and not isinstance(delivery_info, dict):
raise TypeError("Expected argument 'delivery_info' to be a dict")
pulumi.set(__self__, "delivery_info", delivery_info)
if delivery_type and not isinstance(delivery_type, str):
raise TypeError("Expected argument 'delivery_type' to be a str")
pulumi.set(__self__, "delivery_type", delivery_type)
if details and not isinstance(details, dict):
raise TypeError("Expected argument 'details' to be a dict")
pulumi.set(__self__, "details", details)
if error and not isinstance(error, dict):
raise TypeError("Expected argument 'error' to be a dict")
pulumi.set(__self__, "error", error)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if is_cancellable and not isinstance(is_cancellable, bool):
raise TypeError("Expected argument 'is_cancellable' to be a bool")
pulumi.set(__self__, "is_cancellable", is_cancellable)
if is_cancellable_without_fee and not isinstance(is_cancellable_without_fee, bool):
raise TypeError("Expected argument 'is_cancellable_without_fee' to be a bool")
pulumi.set(__self__, "is_cancellable_without_fee", is_cancellable_without_fee)
if is_deletable and not isinstance(is_deletable, bool):
raise TypeError("Expected argument 'is_deletable' to be a bool")
pulumi.set(__self__, "is_deletable", is_deletable)
if is_prepare_to_ship_enabled and not isinstance(is_prepare_to_ship_enabled, bool):
raise TypeError("Expected argument 'is_prepare_to_ship_enabled' to be a bool")
pulumi.set(__self__, "is_prepare_to_ship_enabled", is_prepare_to_ship_enabled)
if is_shipping_address_editable and not isinstance(is_shipping_address_editable, bool):
raise TypeError("Expected argument 'is_shipping_address_editable' to be a bool")
pulumi.set(__self__, "is_shipping_address_editable", is_shipping_address_editable)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if reverse_shipping_details_update and not isinstance(reverse_shipping_details_update, str):
raise TypeError("Expected argument 'reverse_shipping_details_update' to be a str")
pulumi.set(__self__, "reverse_shipping_details_update", reverse_shipping_details_update)
if reverse_transport_preference_update and not isinstance(reverse_transport_preference_update, str):
raise TypeError("Expected argument 'reverse_transport_preference_update' to be a str")
pulumi.set(__self__, "reverse_transport_preference_update", reverse_transport_preference_update)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if start_time and not isinstance(start_time, str):
raise TypeError("Expected argument 'start_time' to be a str")
pulumi.set(__self__, "start_time", start_time)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if transfer_type and not isinstance(transfer_type, str):
raise TypeError("Expected argument 'transfer_type' to be a str")
pulumi.set(__self__, "transfer_type", transfer_type)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="cancellationReason")
def cancellation_reason(self) -> str:
"""
Reason for cancellation.
"""
return pulumi.get(self, "cancellation_reason")
@property
@pulumi.getter(name="delayedStage")
def delayed_stage(self) -> str:
"""
Name of the stage where delay might be present.
"""
return pulumi.get(self, "delayed_stage")
@property
@pulumi.getter(name="deliveryInfo")
def delivery_info(self) -> Optional['outputs.JobDeliveryInfoResponse']:
"""
Delivery Info of Job.
"""
return pulumi.get(self, "delivery_info")
@property
@pulumi.getter(name="deliveryType")
def delivery_type(self) -> Optional[str]:
"""
Delivery type of Job.
"""
return pulumi.get(self, "delivery_type")
@property
@pulumi.getter
def details(self) -> Optional[Any]:
"""
Details of a job run. This field will only be sent for expand details filter.
"""
return pulumi.get(self, "details")
@property
@pulumi.getter
def error(self) -> 'outputs.CloudErrorResponse':
"""
Top level error for the job.
"""
return pulumi.get(self, "error")
@property
@pulumi.getter
def id(self) -> str:
"""
Id of the object.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.ResourceIdentityResponse']:
"""
Msi identity of the resource
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter(name="isCancellable")
def is_cancellable(self) -> bool:
"""
Describes whether the job is cancellable or not.
"""
return pulumi.get(self, "is_cancellable")
@property
@pulumi.getter(name="isCancellableWithoutFee")
def is_cancellable_without_fee(self) -> bool:
"""
Flag to indicate cancellation of scheduled job.
"""
return pulumi.get(self, "is_cancellable_without_fee")
@property
@pulumi.getter(name="isDeletable")
def is_deletable(self) -> bool:
"""
Describes whether the job is deletable or not.
"""
return pulumi.get(self, "is_deletable")
@property
@pulumi.getter(name="isPrepareToShipEnabled")
def is_prepare_to_ship_enabled(self) -> bool:
"""
Is Prepare To Ship Enabled on this job
"""
return pulumi.get(self, "is_prepare_to_ship_enabled")
@property
@pulumi.getter(name="isShippingAddressEditable")
def is_shipping_address_editable(self) -> bool:
"""
Describes whether the shipping address is editable or not.
"""
return pulumi.get(self, "is_shipping_address_editable")
@property
@pulumi.getter
def location(self) -> str:
"""
The location of the resource. This will be one of the supported and registered Azure Regions (e.g. West US, East US, Southeast Asia, etc.). The region of a resource cannot be changed once it is created, but if an identical region is specified on update the request will succeed.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the object.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="reverseShippingDetailsUpdate")
def reverse_shipping_details_update(self) -> str:
"""
The Editable status for Reverse Shipping Address and Contact Info
"""
return pulumi.get(self, "reverse_shipping_details_update")
@property
@pulumi.getter(name="reverseTransportPreferenceUpdate")
def reverse_transport_preference_update(self) -> str:
"""
The Editable status for Reverse Transport preferences
"""
return pulumi.get(self, "reverse_transport_preference_update")
@property
@pulumi.getter
def sku(self) -> 'outputs.SkuResponse':
"""
The sku type.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="startTime")
def start_time(self) -> str:
"""
Time at which the job was started in UTC ISO 8601 format.
"""
return pulumi.get(self, "start_time")
@property
@pulumi.getter
def status(self) -> str:
"""
Name of the stage which is in progress.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
The list of key value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups).
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="transferType")
def transfer_type(self) -> str:
"""
Type of the data transfer.
"""
return pulumi.get(self, "transfer_type")
@property
@pulumi.getter
def type(self) -> str:
"""
Type of the object.
"""
return pulumi.get(self, "type")
class AwaitableGetJobResult(GetJobResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetJobResult(
cancellation_reason=self.cancellation_reason,
delayed_stage=self.delayed_stage,
delivery_info=self.delivery_info,
delivery_type=self.delivery_type,
details=self.details,
error=self.error,
id=self.id,
identity=self.identity,
is_cancellable=self.is_cancellable,
is_cancellable_without_fee=self.is_cancellable_without_fee,
is_deletable=self.is_deletable,
is_prepare_to_ship_enabled=self.is_prepare_to_ship_enabled,
is_shipping_address_editable=self.is_shipping_address_editable,
location=self.location,
name=self.name,
reverse_shipping_details_update=self.reverse_shipping_details_update,
reverse_transport_preference_update=self.reverse_transport_preference_update,
sku=self.sku,
start_time=self.start_time,
status=self.status,
system_data=self.system_data,
tags=self.tags,
transfer_type=self.transfer_type,
type=self.type)
def get_job(expand: Optional[str] = None,
job_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetJobResult:
"""
Gets information about the specified job.
:param str expand: $expand is supported on details parameter for job, which provides details on the job stages.
:param str job_name: The name of the job Resource within the specified resource group. job names must be between 3 and 24 characters in length and use any alphanumeric and underscore only
:param str resource_group_name: The Resource Group Name
"""
__args__ = dict()
__args__['expand'] = expand
__args__['jobName'] = job_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:databox/v20230301:getJob', __args__, opts=opts, typ=GetJobResult).value
return AwaitableGetJobResult(
cancellation_reason=pulumi.get(__ret__, 'cancellation_reason'),
delayed_stage=pulumi.get(__ret__, 'delayed_stage'),
delivery_info=pulumi.get(__ret__, 'delivery_info'),
delivery_type=pulumi.get(__ret__, 'delivery_type'),
details=pulumi.get(__ret__, 'details'),
error=pulumi.get(__ret__, 'error'),
id=pulumi.get(__ret__, 'id'),
identity=pulumi.get(__ret__, 'identity'),
is_cancellable=pulumi.get(__ret__, 'is_cancellable'),
is_cancellable_without_fee=pulumi.get(__ret__, 'is_cancellable_without_fee'),
is_deletable=pulumi.get(__ret__, 'is_deletable'),
is_prepare_to_ship_enabled=pulumi.get(__ret__, 'is_prepare_to_ship_enabled'),
is_shipping_address_editable=pulumi.get(__ret__, 'is_shipping_address_editable'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
reverse_shipping_details_update=pulumi.get(__ret__, 'reverse_shipping_details_update'),
reverse_transport_preference_update=pulumi.get(__ret__, 'reverse_transport_preference_update'),
sku=pulumi.get(__ret__, 'sku'),
start_time=pulumi.get(__ret__, 'start_time'),
status=pulumi.get(__ret__, 'status'),
system_data=pulumi.get(__ret__, 'system_data'),
tags=pulumi.get(__ret__, 'tags'),
transfer_type=pulumi.get(__ret__, 'transfer_type'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_job)
def get_job_output(expand: Optional[pulumi.Input[Optional[str]]] = None,
job_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetJobResult]:
"""
Gets information about the specified job.
:param str expand: $expand is supported on details parameter for job, which provides details on the job stages.
:param str job_name: The name of the job Resource within the specified resource group. job names must be between 3 and 24 characters in length and use any alphanumeric and underscore only
:param str resource_group_name: The Resource Group Name
"""
...
|
bd4b9acb1b9af86b6a27947d39485a1521902414
|
e9911598c43e8526da22b2773a73d9b5966f602a
|
/docsrc/exts/sphinxlocal/builders/gitstamp.py
|
0d37012a7837bc88dad0ffb06656e67e40fb53b0
|
[
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause"
] |
permissive
|
cyrusimap/cyrus-imapd
|
07236dfd887ed92c147938cf1ed2591449d7e8fd
|
315441d067ba85814768f840f20bc3bb7f20ea6b
|
refs/heads/master
| 2023-09-05T09:57:10.683822
| 2023-09-05T06:09:43
| 2023-09-05T06:09:43
| 59,071,965
| 508
| 164
|
NOASSERTION
| 2023-09-13T04:34:31
| 2016-05-18T01:33:49
|
C
|
UTF-8
|
Python
| false
| false
| 3,469
|
py
|
gitstamp.py
|
# -*- coding: utf-8 -*-
"""
sphinxlocal.builders.insertdatestamp
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Inserts a git datestamp into the context as 'gitstamp',
to make it available for template use. Only runs for builders that
generate html. (not manpage)
Adds itself as a page context handler: gets invoked after source
is read but before html is output.
:version: 0.1
:author: Nicola Nye <nicolan@fastmail.com>
:copyright: Copyright 2007-2016 by the Cyrus team,
:license: BSD, see LICENSE for details.
"""
from sphinx import errors
import datetime
import os
# Gets the datestamp of the latest commit on the given file
# Converts the datestamp into something more readable
# Skips files whose datestamp we can't parse.
# Expected git datestamp format: 2017-06-07 11:57:38 +1000
# Output to June 7, 2017
# Use the DOCSRC environment variable to determine the root of the
# tree in git where the rst lives. Used if you are invoking this extension
# from a makefile external to the conf.py directory
def page_context_handler(app, pagename, templatename, context, doctree):
import git
global g
if g is None:
# We have already errored about this
pass
fullpagename = pagename
docsrc = ''
try:
docsrc = os.environ['DOCSRC'] + "/"
if docsrc != "/":
fullpagename = docsrc + pagename
except KeyError:
pass
# Don't barf on "genindex", "search", etc
if not os.path.isfile("%s.rst" % fullpagename):
return
try:
updated = g.log('--pretty=format:%ai','-n 1',"%s.rst" % fullpagename)
updated = updated[:10]
if updated == "":
# Don't datestamp generated rst's (e.g. imapd.conf.rst)
# Ideally want to check their source - lib/imapoptions, etc, but
# that involves getting their source/output pair into the extension.
return
context['gitstamp'] = datetime.datetime.strptime(updated, "%Y-%m-%d").strftime(app.config.gitstamp_fmt)
except git.exc.GitCommandError:
# File doesn't exist or something else went wrong.
raise errors.ExtensionError("Can't fetch git history for %s.rst. Is DOCSRC set correctly? (DOCSRC=%s)" % (fullpagename, docsrc))
except ValueError:
# Datestamp can't be parsed.
app.info("%s: Can't parse datestamp () %s ) for gitstamp, output won't have last updated time." % (pagename,updated))
pass
# Only add the page context handler if we're generating html
def what_build_am_i(app):
global g
if (app.builder.format != 'html'):
return;
try:
import git
except ImportError:
raise errors.ExtensionError("gitpython package not installed. Required to generate html. Please run: pip install gitpython")
try:
global g
g = git.Git('.')
except:
app.info(sys.exc_info()[0])
app.warn("gitstamp extension enabled, but no git repository found. No git datestamps will be generated.")
else:
app.add_config_value('gitstamp_fmt', "%b %d %Y", 'html')
app.connect('html-page-context', page_context_handler)
# We can't immediately add a page context handler: we need to wait until we
# know what the build output format is.
def setup(app):
app.connect('builder-inited', what_build_am_i)
|
4a58d6a2d103db3506a1460845d1ccca4362645f
|
9f73d653197b5218f1a5a02e06cb7f56d858a572
|
/scripts/category_redirect.py
|
04153a60f6e7cb0ff1e29bb7f2f0c6a1ea90a784
|
[
"MIT"
] |
permissive
|
wikimedia/pywikibot
|
b32fbc2eb3d688f57668aed4dc488b4055196e8f
|
5c01e6bfcd328bc6eae643e661f1a0ae57612808
|
refs/heads/master
| 2023-09-03T19:22:13.926740
| 2023-09-03T14:56:01
| 2023-09-03T14:59:45
| 10,798,864
| 432
| 166
|
MIT
| 2023-08-10T23:36:48
| 2013-06-19T16:18:45
|
Python
|
UTF-8
|
Python
| false
| false
| 21,581
|
py
|
category_redirect.py
|
#!/usr/bin/env python3
"""This bot will move pages out of redirected categories.
The bot will look for categories that are marked with a category redirect
template, take the first parameter of the template as the target of the
redirect, and move all pages and subcategories of the category there. It
also changes hard redirects into soft redirects, and fixes double redirects.
A log is written under <userpage>/category_redirect_log. Only category pages
that haven't been edited for a certain cooldown period (currently 7 days)
are taken into account.
The following parameters are supported:
-always If used, the bot won't ask if it should add the specified
text
-delay:# Set an amount of days. If the category is edited more recenty
than given days, ignore it. Default is 7.
-tiny Only loops over Category:Non-empty_category_redirects and
moves all images, pages and categories in redirect categories
to the target category.
Usage:
python pwb.py category_redirect [options]
.. note:: This script is a
:py:obj:`ConfigParserBot <bot.ConfigParserBot>`. All options
can be set within a settings file which is scripts.ini by default.
"""
#
# (C) Pywikibot team, 2008-2022
#
# Distributed under the terms of the MIT license.
#
import pickle
import re
import time
from contextlib import suppress
from datetime import timedelta
import pywikibot
from pywikibot import config, i18n, pagegenerators
from pywikibot.backports import Tuple, removeprefix
from pywikibot.bot import ConfigParserBot, SingleSiteBot
from pywikibot.exceptions import CircularRedirectError, Error, NoPageError
LOG_SIZE = 7 # Number of items to keep in active log
class CategoryRedirectBot(ConfigParserBot, SingleSiteBot):
"""Page category update bot.
.. versionchanged:: 7.0
CategoryRedirectBot is a ConfigParserBot
"""
update_options = {
'tiny': False, # use Non-empty category redirects only
'delay': 7, # cool down delay in days
}
def __init__(self, **kwargs) -> None:
"""Initializer."""
super().__init__(**kwargs)
self.catprefix = self.site.namespace(14) + ':'
self.log_text = []
self.edit_requests = []
self.problems = []
self.template_list = []
self.cat = None
self.log_page = pywikibot.Page(self.site,
'User:{}/category redirect log'
.format(self.site.username()))
# Localization:
# Category that contains all redirected category pages
self.cat_redirect_cat = {
'commons': 'Category:Category redirects',
'meta': 'Category:Maintenance of categories/Soft redirected '
'categories',
'ar': 'تصنيف:تحويلات تصنيفات ويكيبيديا',
'ary': 'تصنيف:Wikipedia soft redirected categories',
'arz': 'تصنيف:تحويلات تصانيف ويكيبيديا',
'ckb': 'پۆل:پۆلە ڕەوانەکراوە نەرمەکان',
'cs': 'Kategorie:Údržba:Zastaralé kategorie',
'da': 'Kategori:Omdirigeringskategorier',
'en': 'Category:Wikipedia soft redirected categories',
'es': 'Categoría:Wikipedia:Categorías redirigidas',
'fa': 'رده:ردههای منتقلشده',
'hi': 'श्रेणी:विकिपीडिया श्रेणी अनुप्रेषित',
'hu': 'Kategória:Kategóriaátirányítások',
'ja': 'Category:移行中のカテゴリ',
'ko': '분류:비어 있지 않은 분류 넘겨주기',
'no': 'Kategori:Wikipedia omdirigertekategorier',
'pl': 'Kategoria:Przekierowania kategorii',
'pt': 'Categoria:!Redirecionamentos de categorias',
'sco': 'Category:Wikipaedia soft redirectit categories',
'simple': 'Category:Category redirects',
'sh': 'Kategorija:Preusmjerene kategorije Wikipedije',
'sr': 'Категорија:Википедијине меко преусмерене категорије',
'ur': 'زمرہ:منتقل شدہ زمرہ جات',
'vi': 'Thể loại:Thể loại đổi hướng',
'zh': 'Category:已重定向的分类',
'ro': 'Categorie:Categorii de redirecționare',
}
# Category that contains non-empty redirected category pages
self.tiny_cat_redirect_cat = 'Q8099903'
self.move_comment = 'category_redirect-change-category'
self.redir_comment = 'category_redirect-add-template'
self.dbl_redir_comment = 'category_redirect-fix-double'
self.maint_comment = 'category_redirect-comment'
self.edit_request_text = i18n.twtranslate(
self.site, 'category_redirect-edit-request') + '\n~~~~'
self.edit_request_item = i18n.twtranslate(
self.site, 'category_redirect-edit-request-item')
def get_cat(self):
"""Specify the category page."""
if self.opt.tiny:
self.cat = self.site.page_from_repository(
self.tiny_cat_redirect_cat)
else:
cat_title = pywikibot.translate(self.site, self.cat_redirect_cat)
if cat_title:
self.cat = pywikibot.Category(pywikibot.Link(cat_title,
self.site))
return self.cat is not None
def move_contents(self, old_cat_title: str, new_cat_title: str,
edit_summary: str) -> Tuple[int, int]:
"""The worker function that moves pages out of oldCat into newCat."""
old_cat = pywikibot.Category(self.site, self.catprefix + old_cat_title)
new_cat = pywikibot.Category(self.site, self.catprefix + new_cat_title)
param = {
'oldCatLink': old_cat.title(),
'oldCatTitle': old_cat_title,
'newCatLink': new_cat.title(),
'newCatTitle': new_cat_title,
}
summary = edit_summary % param
# Move articles
found, moved = 0, 0
for article in old_cat.members():
found += 1
moved += article.change_category(old_cat, new_cat, summary=summary)
if article.namespace() != 10:
continue
# pass 2: look for template doc pages
for subpage in self.site.doc_subpage:
doc = pywikibot.Page(self.site, article.title() + subpage)
try:
doc.get()
except Error:
pass
else:
moved += doc.change_category(old_cat, new_cat,
summary=summary)
if found:
pywikibot.info(f'{old_cat}: {found} found, {moved} moved')
return found, moved
def ready_to_edit(self, cat):
"""Return True if cat not edited during cooldown period, else False."""
today = pywikibot.Timestamp.now()
deadline = today + timedelta(days=-self.opt.delay)
return deadline > cat.latest_revision.timestamp
def get_log_text(self):
"""Rotate log text and return the most recent text."""
try:
log_text = self.log_page.get()
except NoPageError:
log_text = ''
log_items = {}
header = None
for line in log_text.splitlines():
if line.startswith('==') and line.endswith('=='):
header = line[2:-2].strip()
if header is not None:
log_items.setdefault(header, [])
log_items[header].append(line)
if len(log_items) < LOG_SIZE:
return log_text
# sort by keys and keep the first (LOG_SIZE-1) values
keep = [text for (key, text) in
sorted(log_items.items(), reverse=True)[:LOG_SIZE - 1]]
log_text = '\n'.join('\n'.join(line for line in text) for text in keep)
# get permalink to older logs
history = list(self.log_page.revisions(total=LOG_SIZE))
# get the id of the newest log being archived
rotate_revid = history[-1].revid
# append permalink
message = i18n.twtranslate(
self.site,
'category_redirect-older-logs',
{'oldlogs': self.log_page.permalink(oldid=rotate_revid)})
log_text += ('\n\n' + message)
return log_text
def check_hard_redirect(self) -> None:
"""
Check for hard-redirected categories.
Check categories that are not already marked with an appropriate
softredirect template.
"""
pywikibot.info('Checking hard-redirect category pages.')
comment = i18n.twtranslate(self.site, self.redir_comment)
# generator yields all hard redirect pages in namespace 14
for page in self.site.allpages(namespace=14, filterredir=True,
content=True):
if page.isCategoryRedirect():
# this is already a soft-redirect, so skip it (for now)
continue
try:
target = page.getRedirectTarget()
except CircularRedirectError:
target = page
message = i18n.twtranslate(
self.site, 'category_redirect-problem-self-linked',
{'oldcat': page.title(as_link=True, textlink=True)})
self.problems.append(message)
except RuntimeError:
# race condition: someone else removed the redirect while we
# were checking for it
continue
if not target.is_categorypage():
message = i18n.twtranslate(
self.site, 'category_redirect-problem-hard', {
'oldcat': page.title(as_link=True, textlink=True),
'page': target.title(as_link=True, textlink=True)
})
self.problems.append(message)
continue
# this is a hard-redirect to a category page
newtext = ('{{%(template)s|%(cat)s}}'
% {'cat': target.title(with_ns=False),
'template': self.template_list[0]})
params = {
'ns': self.site.namespaces.TEMPLATE.custom_prefix(),
'template': self.template_list[0],
'oldcat': page.title(as_link=True, textlink=True)
}
try:
page.text = newtext
page.save(comment)
message = i18n.twtranslate(
self.site, 'category_redirect-log-added', params)
self.log_text.append(message)
except Error as e:
pywikibot.error(e)
message = i18n.twtranslate(
self.site, 'category_redirect-log-add-failed', params)
self.log_text.append(message)
def run(self) -> None:
"""Run the bot."""
# validate L10N
self.template_list = self.site.category_redirects()
if not self.template_list:
pywikibot.warning(f'No redirect templates defined for {self.site}')
return
if not self.get_cat():
pywikibot.warning(f'No redirect category found for {self.site}')
return
self.user = self.site.user() # invokes login()
self.newredirs = []
localtime = time.localtime()
today = '{:04d}-{:02d}-{:02d}'.format(*localtime[:3])
self.datafile = pywikibot.config.datafilepath(
f'{self.site.dbName()}-catmovebot-data')
try:
with open(self.datafile, 'rb') as inp:
self.record = pickle.load(inp)
except OSError:
self.record = {}
if self.record:
with open(self.datafile + '.bak', 'wb') as f:
pickle.dump(self.record, f, protocol=config.pickle_protocol)
# regex to match soft category redirects
# TODO: enhance and use textlib.MultiTemplateMatchBuilder
# note that any templates containing optional "category:" are
# incorrect and will be fixed by the bot
template_regex = re.compile(
r"""{{{{\s*(?:{prefix}\s*:\s*)? # optional "template:"
(?:{template})\s*\| # catredir template name
(\s*{catns}\s*:\s*)? # optional "category:"
([^|}}]+) # redirect target cat
(?:\|[^|}}]*)*}}}} # optional arguments 2+, ignored
""".format(prefix=self.site.namespace(10).lower(),
template='|'.join(item.replace(' ', '[ _]+')
for item in self.template_list),
catns=self.site.namespace(14)),
re.I | re.X)
self.check_hard_redirect()
comment = i18n.twtranslate(self.site, self.move_comment)
counts = {}
nonemptypages = []
redircat = self.cat
pywikibot.info('\nChecking {} category redirect pages'
.format(redircat.categoryinfo['subcats']))
catpages = set()
for cat in redircat.subcategories():
catpages.add(cat)
cat_title = cat.title(with_ns=False)
if 'category redirect' in cat_title:
message = i18n.twtranslate(
self.site, 'category_redirect-log-ignoring',
{'oldcat': cat.title(as_link=True, textlink=True)})
self.log_text.append(message)
continue
if hasattr(cat, '_catinfo'):
# skip empty categories that don't return a "categoryinfo" key
catdata = cat.categoryinfo
if 'size' in catdata and int(catdata['size']):
# save those categories that have contents
nonemptypages.append(cat)
if cat_title not in self.record:
# make sure every redirect has a self.record entry
self.record[cat_title] = {today: None}
with suppress(Error):
self.newredirs.append('*# {} → {}'.format(
cat.title(as_link=True, textlink=True),
cat.getCategoryRedirectTarget().title(
as_link=True, textlink=True)))
# do a null edit on cat
with suppress(Exception):
cat.save()
# delete self.record entries for non-existent categories
for cat_name in list(self.record):
if pywikibot.Category(self.site,
self.catprefix + cat_name) not in catpages:
del self.record[cat_name]
pywikibot.info('\nMoving pages out of {} redirected categories.'
.format(len(nonemptypages)))
for cat in pagegenerators.PreloadingGenerator(nonemptypages):
i18n_param = {'oldcat': cat.title(as_link=True, textlink=True)}
try:
if not cat.isCategoryRedirect():
message = i18n.twtranslate(
self.site,
'category_redirect-log-false-positive',
i18n_param
)
self.log_text.append(message)
continue
except Error:
message = i18n.twtranslate(self.site,
'category_redirect-log-not-loaded',
i18n_param)
self.log_text.append(message)
continue
cat_title = cat.title(with_ns=False)
if not self.ready_to_edit(cat):
counts[cat_title] = None
message = i18n.twtranslate(self.site,
'category_redirect-log-skipping',
i18n_param)
self.log_text.append(message)
continue
dest = cat.getCategoryRedirectTarget()
if not dest.exists():
message = i18n.twtranslate(
self.site, 'category_redirect-problem-redirects', {
'oldcat': cat.title(as_link=True, textlink=True),
'redpage': dest.title(as_link=True, textlink=True)
})
self.problems.append(message)
# do a null edit on cat to update any special redirect
# categories this wiki might maintain
with suppress(Exception):
cat.save()
continue
if dest.isCategoryRedirect():
double = dest.getCategoryRedirectTarget()
if double in (dest, cat):
message = i18n.twtranslate(self.site,
'category_redirect-log-loop',
i18n_param)
self.log_text.append(message)
# do a null edit on cat
with suppress(Exception):
cat.save()
else:
message = i18n.twtranslate(
self.site, 'category_redirect-log-double', {
'oldcat': cat.title(as_link=True, textlink=True),
'newcat': dest.title(as_link=True, textlink=True),
'targetcat': double.title(
as_link=True, textlink=True)
})
self.log_text.append(message)
oldtext = cat.text
# remove the old redirect from the old text,
# leaving behind any non-redirect text
oldtext = template_regex.sub('', oldtext)
newtext = ('{{%(redirtemp)s|%(ncat)s}}'
% {'redirtemp': self.template_list[0],
'ncat': double.title(with_ns=False)})
newtext += oldtext.strip()
try:
cat.text = newtext
cat.save(i18n.twtranslate(self.site,
self.dbl_redir_comment))
except Error as e:
message = i18n.twtranslate(
self.site, 'category_redirect-log-failed',
{'error': e})
self.log_text.append(message)
continue
found, moved = self.move_contents(
cat_title, dest.title(with_ns=False), comment)
if found:
self.record[cat_title][today] = found
message = i18n.twtranslate(
self.site, 'category_redirect-log-moved', {
'oldcat': cat.title(as_link=True, textlink=True),
'found': found,
'moved': moved
})
self.log_text.append(message)
counts[cat_title] = found
# do a null edit on cat
with suppress(Exception):
cat.save()
self.teardown()
def teardown(self) -> None:
"""Write self.record to file and save logs."""
with open(self.datafile, 'wb') as f:
pickle.dump(self.record, f, protocol=config.pickle_protocol)
self.log_text.sort()
self.problems.sort()
self.newredirs.sort()
comment = i18n.twtranslate(self.site, self.maint_comment)
message = i18n.twtranslate(self.site, 'category_redirect-log-new')
date_line = '\n== {}-{:02d}-{:02d}T{:02d}:{:02d}:{:02d}Z ==\n' \
.format(*time.gmtime()[:6])
self.log_page.text = (date_line
+ '\n'.join(self.log_text)
+ '\n* ' + message + '\n'
+ '\n'.join(self.newredirs)
+ '\n' + '\n'.join(self.problems)
+ '\n' + self.get_log_text())
self.log_page.save(comment)
if self.edit_requests:
edit_request_page = pywikibot.Page(
self.site, f'User:{self.user}/category edit requests')
edit_request_page.text = (self.edit_request_text
% {'itemlist': '\n' + '\n'.join(
(self.edit_request_item % item)
for item in self.edit_requests)})
edit_request_page.save(comment)
def main(*args: str) -> None:
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
:param args: command line arguments
"""
options = {}
for arg in pywikibot.handle_args(args):
if arg.startswith('-delay:'):
options['delay'] = int(removeprefix(arg, '-delay:'))
else:
# generic handling of we have boolean options
options[arg[1:]] = True
bot = CategoryRedirectBot(**options)
bot.run()
if __name__ == '__main__':
main()
|
42722403942caa106d4a7b386a6f273746974ae2
|
0b134572e3ac3903ebb44df6d4138cbab9d3327c
|
/app/grandchallenge/challenges/models.py
|
95680084da30ab1aed4ee5833cf206e7db91a275
|
[
"Apache-2.0"
] |
permissive
|
comic/grand-challenge.org
|
660de3bafaf8f4560317f1dfd9ae9585ec272896
|
dac25f93b395974b32ba2a8a5f9e19b84b49e09d
|
refs/heads/main
| 2023-09-01T15:57:14.790244
| 2023-08-31T14:23:04
| 2023-08-31T14:23:04
| 4,557,968
| 135
| 53
|
Apache-2.0
| 2023-09-14T13:41:03
| 2012-06-05T09:26:39
|
Python
|
UTF-8
|
Python
| false
| false
| 35,579
|
py
|
models.py
|
import datetime
import logging
import math
from itertools import chain, product
from actstream.actions import follow, unfollow
from actstream.models import Follow
from django.conf import settings
from django.contrib.auth.models import Group
from django.contrib.contenttypes.models import ContentType
from django.contrib.postgres.fields import ArrayField, CICharField
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.core.validators import (
MaxValueValidator,
MinValueValidator,
validate_slug,
)
from django.db import models
from django.db.models.signals import post_delete, pre_delete
from django.db.transaction import on_commit
from django.dispatch import receiver
from django.template.loader import render_to_string
from django.utils.functional import cached_property
from django.utils.html import format_html
from django.utils.text import get_valid_filename
from django.utils.translation import gettext_lazy as _
from guardian.models import GroupObjectPermissionBase, UserObjectPermissionBase
from guardian.shortcuts import assign_perm
from guardian.utils import get_anonymous_user
from machina.apps.forum.models import Forum
from machina.apps.forum_permission.models import (
ForumPermission,
GroupForumPermission,
UserForumPermission,
)
from stdimage import JPEGField
from grandchallenge.anatomy.models import BodyStructure
from grandchallenge.challenges.emails import (
send_challenge_requested_email_to_requester,
send_challenge_requested_email_to_reviewers,
)
from grandchallenge.challenges.utils import ChallengeTypeChoices
from grandchallenge.core.models import UUIDModel
from grandchallenge.core.storage import (
get_banner_path,
get_logo_path,
get_social_image_path,
protected_s3_storage,
public_s3_storage,
)
from grandchallenge.core.utils.access_requests import (
AccessRequestHandlingOptions,
)
from grandchallenge.core.validators import (
ExtensionValidator,
MimeTypeValidator,
)
from grandchallenge.evaluation.tasks import assign_evaluation_permissions
from grandchallenge.evaluation.utils import (
StatusChoices,
SubmissionKindChoices,
)
from grandchallenge.modalities.models import ImagingModality
from grandchallenge.organizations.models import Organization
from grandchallenge.pages.models import Page
from grandchallenge.publications.fields import IdentifierField
from grandchallenge.publications.models import Publication
from grandchallenge.subdomains.utils import reverse
from grandchallenge.task_categories.models import TaskType
logger = logging.getLogger(__name__)
class ChallengeManager(models.Manager):
def non_hidden(self):
"""Filter the hidden challenge"""
return self.filter(hidden=False)
def validate_nounderscores(value):
if "_" in value:
raise ValidationError("Underscores (_) are not allowed.")
def validate_short_name(value):
if value.lower() in settings.DISALLOWED_CHALLENGE_NAMES:
raise ValidationError("That name is not allowed.")
class ChallengeSeries(models.Model):
name = CICharField(max_length=64, blank=False, unique=True)
url = models.URLField(blank=True)
class Meta:
ordering = ("name",)
verbose_name_plural = "Challenge Series"
def __str__(self):
return f"{self.name}"
@property
def badge(self):
return format_html(
(
'<span class="badge badge-info above-stretched-link" '
'title="Associated with {0}"><i class="fas fa-globe fa-fw">'
"</i> {0}</span>"
),
self.name,
)
class ChallengeBase(models.Model):
creator = models.ForeignKey(
settings.AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL
)
short_name = CICharField(
max_length=50,
blank=False,
help_text=(
"short name used in url, specific css, files etc. "
"No spaces allowed"
),
validators=[
validate_nounderscores,
validate_slug,
validate_short_name,
],
unique=True,
)
title = models.CharField(
max_length=64,
blank=True,
default="",
help_text=(
"The name of the challenge that is displayed on the All Challenges"
" page. If this is blank the short name of the challenge will be "
"used."
),
)
task_types = models.ManyToManyField(
TaskType, blank=True, help_text="What type of task is this challenge?"
)
modalities = models.ManyToManyField(
ImagingModality,
blank=True,
help_text="What imaging modalities are used in this challenge?",
)
structures = models.ManyToManyField(
BodyStructure,
blank=True,
help_text="What structures are used in this challenge?",
)
class Meta:
abstract = True
class Challenge(ChallengeBase):
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
description = models.CharField(
max_length=1024,
default="",
blank=True,
help_text="Short summary of this project, max 1024 characters.",
)
logo = JPEGField(
upload_to=get_logo_path,
storage=public_s3_storage,
blank=True,
help_text="A logo for this challenge. Should be square with a resolution of 640x640 px or higher.",
variations=settings.STDIMAGE_LOGO_VARIATIONS,
)
social_image = JPEGField(
upload_to=get_social_image_path,
storage=public_s3_storage,
blank=True,
help_text="An image for this challenge which is displayed when you post the link on social media. Should have a resolution of 640x320 px (1280x640 px for best display).",
variations=settings.STDIMAGE_SOCIAL_VARIATIONS,
)
hidden = models.BooleanField(
default=True,
help_text="Do not display this Challenge in any public overview",
)
educational = models.BooleanField(
default=False, help_text="It is an educational challenge"
)
workshop_date = models.DateField(
null=True,
blank=True,
help_text=(
"Date on which the workshop belonging to this project will be held"
),
)
event_name = models.CharField(
max_length=1024,
default="",
blank=True,
null=True,
help_text="The name of the event the workshop will be held at",
)
event_url = models.URLField(
blank=True,
null=True,
help_text="Website of the event which will host the workshop",
)
publications = models.ManyToManyField(
Publication,
blank=True,
help_text="Which publications are associated with this challenge?",
)
data_license_agreement = models.TextField(
blank=True,
help_text="What is the data license agreement for this challenge?",
)
series = models.ManyToManyField(
ChallengeSeries,
blank=True,
help_text="Which challenge series is this associated with?",
)
organizations = models.ManyToManyField(
Organization,
blank=True,
help_text="The organizations associated with this challenge",
related_name="%(class)ss",
)
number_of_training_cases = models.IntegerField(blank=True, null=True)
number_of_test_cases = models.IntegerField(blank=True, null=True)
filter_classes = ArrayField(
CICharField(max_length=32), default=list, editable=False
)
highlight = models.BooleanField(
default=False,
help_text="Should this challenge be advertised on the home page?",
)
banner = JPEGField(
upload_to=get_banner_path,
storage=public_s3_storage,
blank=True,
help_text=(
"Image that gets displayed at the top of each page. "
"Recommended resolution 2200x440 px."
),
variations=settings.STDIMAGE_BANNER_VARIATIONS,
)
disclaimer = models.CharField(
max_length=2048,
default="",
blank=True,
null=True,
help_text=(
"Optional text to show on each page in the project. "
"For showing 'under construction' type messages"
),
)
access_request_handling = models.CharField(
max_length=25,
choices=AccessRequestHandlingOptions.choices,
default=AccessRequestHandlingOptions.MANUAL_REVIEW,
help_text=("How would you like to handle access requests?"),
)
use_registration_page = models.BooleanField(
default=True,
help_text="If true, show a registration page on the challenge site.",
)
registration_page_text = models.TextField(
default="",
blank=True,
help_text=(
"The text to use on the registration page, you could include "
"a data usage agreement here. You can use HTML markup here."
),
)
use_workspaces = models.BooleanField(default=False)
use_teams = models.BooleanField(
default=False,
help_text=(
"If true, users are able to form teams to participate in "
"this challenge together."
),
)
admins_group = models.OneToOneField(
Group,
editable=False,
on_delete=models.PROTECT,
related_name="admins_of_challenge",
)
participants_group = models.OneToOneField(
Group,
editable=False,
on_delete=models.PROTECT,
related_name="participants_of_challenge",
)
forum = models.OneToOneField(
Forum, editable=False, on_delete=models.PROTECT
)
display_forum_link = models.BooleanField(
default=False,
help_text="Display a link to the challenge forum in the nav bar.",
)
cached_num_participants = models.PositiveIntegerField(
editable=False, default=0
)
cached_num_results = models.PositiveIntegerField(editable=False, default=0)
cached_latest_result = models.DateTimeField(
editable=False, blank=True, null=True
)
contact_email = models.EmailField(
blank=True,
default="",
help_text="This email will be listed as the contact email for the challenge and will be visible to all users of Grand Challenge.",
)
accumulated_compute_cost_in_cents = models.IntegerField(
default=0, blank=True
)
accumulated_docker_storage_cost_in_cents = models.IntegerField(
default=0, blank=True
)
objects = ChallengeManager()
class Meta:
verbose_name = "challenge"
verbose_name_plural = "challenges"
ordering = ("pk",)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._hidden_orig = self.hidden
def __str__(self):
return self.short_name
@property
def public(self):
"""Helper property for consistency with other objects"""
return not self.hidden
@property
def year(self):
if self.workshop_date:
return self.workshop_date.year
else:
return self.created.year
@property
def upcoming_workshop_date(self):
if self.workshop_date and self.workshop_date > datetime.date.today():
return self.workshop_date
def save(self, *args, **kwargs):
adding = self._state.adding
if adding:
self.create_groups()
self.create_forum()
super().save(*args, **kwargs)
if adding:
if self.creator:
self.add_admin(user=self.creator)
self.update_permissions()
self.create_forum_permissions()
self.create_default_pages()
if adding or self.hidden != self._hidden_orig:
on_commit(
lambda: assign_evaluation_permissions.apply_async(
kwargs={
"phase_pks": list(
self.phase_set.values_list("id", flat=True)
)
}
)
)
self.update_user_forum_permissions()
def update_permissions(self):
assign_perm("change_challenge", self.admins_group, self)
def create_forum_permissions(self):
participant_group_perms = {
"can_see_forum",
"can_read_forum",
"can_start_new_topics",
"can_reply_to_topics",
"can_delete_own_posts",
"can_edit_own_posts",
"can_post_without_approval",
"can_create_polls",
"can_vote_in_polls",
}
admin_group_perms = {
"can_lock_topics",
"can_edit_posts",
"can_delete_posts",
"can_approve_posts",
"can_reply_to_locked_topics",
"can_post_announcements",
"can_post_stickies",
*participant_group_perms,
}
permissions = ForumPermission.objects.filter(
codename__in=admin_group_perms
).values_list("codename", "pk")
permissions = {codename: pk for codename, pk in permissions}
GroupForumPermission.objects.bulk_create(
chain(
(
GroupForumPermission(
permission_id=permissions[codename],
group=self.participants_group,
forum=self.forum,
has_perm=True,
)
for codename in participant_group_perms
),
(
GroupForumPermission(
permission_id=permissions[codename],
group=self.admins_group,
forum=self.forum,
has_perm=True,
)
for codename in admin_group_perms
),
)
)
UserForumPermission.objects.bulk_create(
UserForumPermission(
permission_id=permissions[codename],
**{user: True},
forum=self.forum,
has_perm=not self.hidden,
)
for codename, user in product(
["can_see_forum", "can_read_forum"],
["anonymous_user", "authenticated_user"],
)
)
def update_user_forum_permissions(self):
perms = UserForumPermission.objects.filter(
permission__codename__in=["can_see_forum", "can_read_forum"],
forum=self.forum,
)
for p in perms:
p.has_perm = not self.hidden
UserForumPermission.objects.bulk_update(perms, ["has_perm"])
def create_groups(self):
# Create the groups only on first save
admins_group = Group.objects.create(name=f"{self.short_name}_admins")
participants_group = Group.objects.create(
name=f"{self.short_name}_participants"
)
self.admins_group = admins_group
self.participants_group = participants_group
def create_forum(self):
f, created = Forum.objects.get_or_create(
name=settings.FORUMS_CHALLENGE_CATEGORY_NAME, type=Forum.FORUM_CAT
)
if created:
UserForumPermission.objects.bulk_create(
UserForumPermission(
permission_id=perm_id,
**{user: True},
forum=f,
has_perm=True,
)
for perm_id, user in product(
ForumPermission.objects.filter(
codename__in=["can_see_forum", "can_read_forum"]
).values_list("pk", flat=True),
["anonymous_user", "authenticated_user"],
)
)
self.forum = Forum.objects.create(
name=self.title if self.title else self.short_name,
parent=f,
type=Forum.FORUM_POST,
)
def create_default_pages(self):
Page.objects.create(
display_title=self.short_name,
html=render_to_string(
"pages/defaults/home.html", {"challenge": self}
),
challenge=self,
permission_level=Page.ALL,
)
def is_admin(self, user) -> bool:
"""Determines if this user is an admin of this challenge."""
return (
user.is_superuser
or user.groups.filter(pk=self.admins_group.pk).exists()
)
def is_participant(self, user) -> bool:
"""Determines if this user is a participant of this challenge."""
return (
user.is_superuser
or user.groups.filter(pk=self.participants_group.pk).exists()
)
def get_admins(self):
"""Return all admins of this challenge."""
return self.admins_group.user_set.all()
def get_participants(self):
"""Return all participants of this challenge."""
return self.participants_group.user_set.all()
def get_absolute_url(self):
return reverse(
"pages:home", kwargs={"challenge_short_name": self.short_name}
)
def add_participant(self, user):
if user != get_anonymous_user():
user.groups.add(self.participants_group)
follow(
user=user, obj=self.forum, actor_only=False, send_action=False
)
else:
raise ValueError("You cannot add the anonymous user to this group")
def remove_participant(self, user):
user.groups.remove(self.participants_group)
unfollow(user=user, obj=self.forum, send_action=False)
def add_admin(self, user):
if user != get_anonymous_user():
user.groups.add(self.admins_group)
follow(
user=user, obj=self.forum, actor_only=False, send_action=False
)
else:
raise ValueError("You cannot add the anonymous user to this group")
def remove_admin(self, user):
user.groups.remove(self.admins_group)
unfollow(user=user, obj=self.forum, send_action=False)
@property
def status(self):
phase_status = {phase.status for phase in self.phase_set.all()}
if StatusChoices.OPEN in phase_status:
status = StatusChoices.OPEN
elif {StatusChoices.COMPLETED} == phase_status:
status = StatusChoices.COMPLETED
elif StatusChoices.OPENING_SOON in phase_status:
status = StatusChoices.OPENING_SOON
else:
status = StatusChoices.CLOSED
return status
@property
def challenge_type(self):
phase_types = {phase.submission_kind for phase in self.phase_set.all()}
# as long as one of the phases is type 2,
# the challenge is classified as type 2
if SubmissionKindChoices.ALGORITHM in phase_types:
challenge_type = ChallengeTypeChoices.T2
else:
challenge_type = ChallengeTypeChoices.T1
return challenge_type
@property
def status_badge_string(self):
if self.status == StatusChoices.OPEN:
detail = [
phase.submission_status_string
for phase in self.phase_set.all()
if phase.status == StatusChoices.OPEN
]
if len(detail) > 1:
# if there are multiple open phases it is unclear which
# status to print, so stay vague
detail = ["Accepting submissions"]
elif self.status == StatusChoices.COMPLETED:
detail = ["Challenge completed"]
elif self.status == StatusChoices.CLOSED:
detail = ["Not accepting submissions"]
elif self.status == StatusChoices.OPENING_SOON:
start_date = min(
(
phase.submissions_open_at
for phase in self.phase_set.all()
if phase.status == StatusChoices.OPENING_SOON
),
default=None,
)
phase = (
self.phase_set.filter(submissions_open_at=start_date)
.order_by("-created")
.first()
)
detail = [phase.submission_status_string]
else:
raise NotImplementedError(f"{self.status} not handled")
return detail[0]
@cached_property
def visible_phases(self):
return self.phase_set.filter(public=True)
@property
def exceeds_total_number_of_submissions_allowed(self):
return any(
phase.exceeds_total_number_of_submissions_allowed
for phase in self.phase_set.all()
)
@property
def exceeds_70_percent_of_submission_allowed(self):
return any(
phase.percent_of_total_submissions_allowed > 70
for phase in self.phase_set.all()
if phase.percent_of_total_submissions_allowed
)
@property
def total_number_of_submissions_defined(self):
return any(
phase.total_number_of_submissions_allowed
for phase in self.phase_set.all()
)
class ChallengeUserObjectPermission(UserObjectPermissionBase):
content_object = models.ForeignKey(Challenge, on_delete=models.CASCADE)
class ChallengeGroupObjectPermission(GroupObjectPermissionBase):
content_object = models.ForeignKey(Challenge, on_delete=models.CASCADE)
@receiver(post_delete, sender=Challenge)
def delete_challenge_groups_hook(*_, instance: Challenge, using, **__):
"""
Deletes the related groups.
We use a signal rather than overriding delete() to catch usages of
bulk_delete.
"""
try:
instance.admins_group.delete(using=using)
except ObjectDoesNotExist:
pass
try:
instance.participants_group.delete(using=using)
except ObjectDoesNotExist:
pass
@receiver(pre_delete, sender=Challenge)
def delete_challenge_follows(*_, instance: Challenge, **__):
ct = ContentType.objects.filter(
app_label=instance._meta.app_label, model=instance._meta.model_name
).get()
Follow.objects.filter(object_id=instance.pk, content_type=ct).delete()
def submission_pdf_path(instance, filename):
return (
f"{instance._meta.app_label.lower()}/"
f"{instance._meta.model_name.lower()}/"
f"{instance.pk}/"
f"{get_valid_filename(filename)}"
)
class ChallengeRequest(UUIDModel, ChallengeBase):
class ChallengeRequestStatusChoices(models.TextChoices):
ACCEPTED = "ACPT", _("Accepted")
REJECTED = "RJCT", _("Rejected")
PENDING = "PEND", _("Pending")
status = models.CharField(
max_length=4,
choices=ChallengeRequestStatusChoices.choices,
default=ChallengeRequestStatusChoices.PENDING,
)
abstract = models.TextField(
help_text="Provide a summary of the challenge purpose.",
)
contact_email = models.EmailField(
help_text="Please provide an email that our team can use to contact "
"you should there be any questions about your request.",
)
start_date = models.DateField(
help_text="Estimated start date for this challenge.",
)
end_date = models.DateField(
help_text="Estimated end date for this challenge. Please note that we aim to "
"keep challenges open for submission for at least 3 years after "
"the official end date if possible.",
)
organizers = models.TextField(
help_text="Provide information about the organizing team (names and affiliations)",
)
affiliated_event = models.CharField(
blank=True,
max_length=50,
help_text="Is this challenge part of a workshop or conference? If so, which one?",
)
structured_challenge_submission_form = models.FileField(
null=True,
blank=True,
upload_to=submission_pdf_path,
storage=protected_s3_storage,
validators=[
ExtensionValidator(allowed_extensions=(".pdf",)),
MimeTypeValidator(allowed_types=("application/pdf",)),
],
)
challenge_setup = models.TextField(
help_text="Describe the challenge set-up."
)
data_set = models.TextField(
help_text="Describe the training and test datasets you are planning to use."
)
submission_assessment = models.TextField(
help_text="Define the metrics you will use to assess and rank "
"participants’ submissions."
)
challenge_publication = models.TextField(
help_text="Please indicate if you plan to coordinate a publication "
"of the challenge results."
)
code_availability = models.TextField(
help_text="Will the participants’ code be accessible after the challenge?"
)
expected_number_of_teams = models.PositiveIntegerField(
help_text="How many teams do you expect to participate in your challenge?",
validators=[MinValueValidator(limit_value=1)],
)
average_algorithm_container_size_in_gb = models.PositiveIntegerField(
default=6,
help_text="Average algorithm container size in GB.",
validators=[MinValueValidator(limit_value=1)],
)
average_number_of_containers_per_team = models.PositiveIntegerField(
default=5,
help_text="Average number of algorithm containers per team.",
validators=[MinValueValidator(limit_value=1)],
)
inference_time_limit_in_minutes = models.PositiveIntegerField(
blank=True,
null=True,
help_text="Average run time per algorithm job in minutes.",
validators=[
MinValueValidator(limit_value=1),
MaxValueValidator(limit_value=60),
],
)
average_size_of_test_image_in_mb = models.PositiveIntegerField(
null=True,
blank=True,
help_text="Average size of a test image in MB.",
validators=[
MinValueValidator(limit_value=1),
MaxValueValidator(limit_value=10000),
],
)
phase_1_number_of_submissions_per_team = models.PositiveIntegerField(
null=True,
blank=True,
help_text="How many submissions do you expect per team in this phase?",
)
phase_2_number_of_submissions_per_team = models.PositiveIntegerField(
null=True,
blank=True,
help_text="How many submissions do you expect per team in this phase?",
)
phase_1_number_of_test_images = models.PositiveIntegerField(
null=True,
blank=True,
help_text="Number of test images for this phase.",
)
phase_2_number_of_test_images = models.PositiveIntegerField(
null=True,
blank=True,
help_text="Number of test images for this phase.",
)
number_of_tasks = models.PositiveIntegerField(
default=1,
help_text="If your challenge has multiple tasks, we multiply the "
"phase 1 and 2 cost estimates by the number of tasks.",
validators=[MinValueValidator(limit_value=1)],
)
budget_for_hosting_challenge = models.PositiveIntegerField(
default=0,
null=True,
blank=True,
help_text="What is your budget for hosting this challenge? Please be reminded of our <a href='/challenge-policy-and-pricing/'>challenge pricing policy</a>.",
)
long_term_commitment = models.BooleanField(
null=True,
blank=True,
)
long_term_commitment_extra = models.CharField(
max_length=2000,
blank=True,
)
data_license = models.BooleanField(
null=True,
blank=True,
)
data_license_extra = models.CharField(
max_length=2000,
blank=True,
)
comments = models.TextField(
blank=True,
help_text="If you have any comments, remarks or questions, please leave them here.",
)
algorithm_inputs = models.TextField(
blank=True,
help_text="What are the inputs to the algorithms submitted as solutions to "
"your challenge going to be? "
"Please describe in detail "
"what the input(s) reflect(s), for example, "
"MRI scan of the brain, or chest X-ray. Grand Challenge only "
"supports .mha and .tiff image files and json files for algorithms.",
)
algorithm_outputs = models.TextField(
blank=True,
help_text="What are the outputs to the algorithms submitted as solutions to "
"your challenge going to be? "
"Please describe in detail what the output(s) "
"reflect(s), for example, probability of a positive PCR result, or "
"stroke lesion segmentation. ",
)
structured_challenge_submission_doi = IdentifierField(
blank=True,
help_text="The DOI, e.g., 10.5281/zenodo.6362337, or the arXiv id, e.g., 2006.12449 of your challenge submission PDF.",
)
challenge_fee_agreement = models.BooleanField(
blank=False,
default=False,
)
def __str__(self):
return self.title
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._orig_status = self.status
def get_absolute_url(self):
return reverse("challenges:requests-detail", kwargs={"pk": self.pk})
def save(self, *args, **kwargs):
adding = self._state.adding
super().save(*args, **kwargs)
if adding:
send_challenge_requested_email_to_reviewers(self)
send_challenge_requested_email_to_requester(self)
def create_challenge(self):
challenge = Challenge(
title=self.title,
short_name=self.short_name,
creator=self.creator,
hidden=True,
contact_email=self.contact_email,
)
challenge.full_clean()
challenge.save()
challenge.task_types.set(self.task_types.all())
challenge.modalities.set(self.modalities.all())
challenge.structures.set(self.structures.all())
challenge.save()
return challenge
@property
def budget_fields(self):
budget_fields = (
"expected_number_of_teams",
"number_of_tasks",
"inference_time_limit_in_minutes",
"average_size_of_test_image_in_mb",
"phase_1_number_of_submissions_per_team",
"phase_1_number_of_test_images",
"phase_2_number_of_submissions_per_team",
"phase_2_number_of_test_images",
)
return {
field.verbose_name: field.value_to_string(self)
for field in self._meta.fields
if field.name in budget_fields
}
@cached_property
def budget(self):
if (
self.inference_time_limit_in_minutes is not None
and self.phase_1_number_of_test_images is not None
and self.phase_1_number_of_submissions_per_team is not None
and self.average_size_of_test_image_in_mb is not None
and self.phase_2_number_of_test_images is not None
and self.phase_2_number_of_submissions_per_team is not None
):
compute_costs = settings.CHALLENGES_COMPUTE_COST_CENTS_PER_HOUR
s3_storage_costs = (
settings.CHALLENGES_S3_STORAGE_COST_CENTS_PER_TB_PER_YEAR
)
ecr_storage_costs = (
settings.CHALLENGES_ECR_STORAGE_COST_CENTS_PER_TB_PER_YEAR
)
budget = {
"Base cost": settings.CHALLENGE_BASE_COST_IN_EURO,
"Data storage cost for phase 1": None,
"Compute costs for phase 1": None,
"Total phase 1": None,
"Data storage cost for phase 2": None,
"Compute costs for phase 2": None,
"Total phase 2": None,
"Docker storage cost": None,
"Total": None,
}
# calculate budget for phase 1
budget["Data storage cost for phase 1"] = (
math.ceil(
self.phase_1_number_of_test_images
* self.average_size_of_test_image_in_mb
* s3_storage_costs
* self.number_of_tasks
/ 1000000
/ 100
/ 10,
)
* 10
)
budget["Compute costs for phase 1"] = (
math.ceil(
self.phase_1_number_of_test_images
* self.phase_1_number_of_submissions_per_team
* self.expected_number_of_teams
* self.inference_time_limit_in_minutes
* compute_costs
* self.number_of_tasks
/ 60
/ 100
/ 10,
)
* 10
)
budget["Total phase 1"] = (
math.ceil(
(
budget["Data storage cost for phase 1"]
+ budget["Compute costs for phase 1"]
)
/ 10,
)
* 10
)
# calculate budget for phase 2
budget["Data storage cost for phase 2"] = (
math.ceil(
self.phase_2_number_of_test_images
* self.average_size_of_test_image_in_mb
* s3_storage_costs
* self.number_of_tasks
/ 1000000
/ 100
/ 10,
)
* 10
)
budget["Compute costs for phase 2"] = (
math.ceil(
self.phase_2_number_of_test_images
* self.phase_2_number_of_submissions_per_team
* self.expected_number_of_teams
* self.inference_time_limit_in_minutes
* compute_costs
* self.number_of_tasks
/ 60
/ 100
/ 10,
)
* 10
)
budget["Total phase 2"] = (
math.ceil(
(
budget["Data storage cost for phase 2"]
+ budget["Compute costs for phase 2"]
)
/ 10,
)
* 10
)
budget["Docker storage cost"] = (
math.ceil(
self.average_algorithm_container_size_in_gb
* self.average_number_of_containers_per_team
* self.expected_number_of_teams
* self.number_of_tasks
* ecr_storage_costs
/ 1000
/ 100
/ 10,
)
* 10
)
budget["Total"] = sum(
filter(
None,
[
budget["Total phase 1"],
budget["Total phase 2"],
budget["Docker storage cost"],
budget["Base cost"],
],
)
)
return budget
else:
return None
|
52d8a5eeeb1b3a1a4089c612824f0a21e1e831f9
|
26e8c952b4d7235c35fa4d2b5ad668931d3c0ef8
|
/tests/mock_eventloop.py
|
654071e8ce2a190abe7edf924c365df6071b95df
|
[
"MIT"
] |
permissive
|
ukBaz/python-bluezero
|
fb7a2e1eea4ee4ef8b58dda308884bceaa62ef0d
|
2b0aba891655bae44c1f281852d5669d5dc9db19
|
refs/heads/main
| 2023-08-17T06:33:27.460187
| 2023-08-06T16:19:09
| 2023-08-06T16:19:09
| 49,202,026
| 360
| 134
|
MIT
| 2023-08-06T16:14:33
| 2016-01-07T11:57:01
|
Python
|
UTF-8
|
Python
| false
| false
| 611
|
py
|
mock_eventloop.py
|
import dbus
from gi.repository import GLib
def run_pending_events():
"""
Iterate event loop until all pending events are cleared
"""
main_context = GLib.MainContext.default()
while main_context.pending():
main_context.iteration(False)
class MockAsync:
def __init__(self):
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
self.mainloop = GLib.MainLoop()
def run(self):
main_context = GLib.MainContext.default()
while main_context.pending():
main_context.iteration(False)
def quit(self):
self.mainloop.quit()
|
023a1302710b62fb741e7c39b600e2864d9a33b6
|
6545abba423bdc1554d3b5ba0f62ed9ee94839a2
|
/channels_graphql_ws/subscription.py
|
62f03d9c72de2959ef17184cf0911da9dee180ba
|
[
"MIT"
] |
permissive
|
datadvance/DjangoChannelsGraphqlWs
|
428ca34f531a82e7b4a7fa42269730d1accca500
|
09a2ffdde45a1553abd09b5b3e595402b6e6c9b1
|
refs/heads/master
| 2023-08-25T15:56:01.861894
| 2023-05-10T09:46:41
| 2023-05-10T09:46:41
| 138,874,616
| 295
| 82
|
MIT
| 2023-08-15T19:55:19
| 2018-06-27T11:50:46
|
Python
|
UTF-8
|
Python
| false
| false
| 16,209
|
py
|
subscription.py
|
# Copyright (C) DATADVANCE, 2010-2023
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Graphene-like subscription class.
The `Subscription` class itself is a "creative" copy of `Mutation` class
from the Graphene (`graphene/types/mutation.py`).
"""
import asyncio
import collections
import hashlib
import logging
from typing import Optional
import asgiref.sync
import channels.db
import channels.layers
import graphene
import graphene.types.objecttype
import graphene.types.utils
import graphene.utils.get_unbound_function
import graphene.utils.props
from .graphql_ws_consumer import GraphqlWsConsumer
from .serializer import Serializer
# Module logger.
LOG = logging.getLogger(__name__)
class Subscription(graphene.ObjectType):
"""Subscription type definition.
Subclass this the Subscription class to define a GraphQL
subscription. The class works with the `GraphqlWsConsumer` which
maintains a WebSocket connection with the client.
The subclass specifies the following methods. You can define each of
them as a `@classmethod`, as a `@staticmethod`, or even as a regular
method (like Graphene typically does). It shall work fine either
way. NOTE, if you define the method as a regular method (not a
classmethod or a staticmethod) you will receive the first argument
(`payload`/`root`) into the `self` argument.
[async] publish(payload, info, *args, **kwds):
This method invoked each time subscription "triggers".
Raising an exception here will lead to sending the
notification with the error. Technically the WebSocket
message will contain extra field "extensions.code" holding
the classname of the exception raised. To suppress the
notification return `None`.
Can be implemented as both asynchronous (`async def`) or
synchronous (`def`) function. Asynchronous implementation
runs blazingly fast in the main event loop of the main
thread. You must be careful with blocking calls though. You
can offload blocking operations to a thread in such cases.
Synchronous implementation always runs in a worker thread
which comes with a price of extra overhead.
Required.
Args:
payload: The `payload` from the `broadcast` invocation.
info: The value of `info.context` is a Channels
websocket context with all the connection
information.
args, kwds: Values of the GraphQL subscription inputs.
Returns:
The same that any Graphene resolver returns.
[async] subscribe(root, info, *args, **kwds):
Called when client subscribes. Define this to do some extra
work when client subscribes and to group subscriptions into
different subscription groups. Method signature is the same
as in other GraphQL "resolver" methods but it may return the
subscription groups names to put the subscription into.
Can be implemented as both asynchronous (`async def`) or
synchronous (`def`) function. Asynchronous implementation
runs blazingly fast in the main event loop of the main
thread. You must be careful with blocking calls though. You
can offload blocking operations to a thread in such cases.
Synchronous implementation always runs in a worker thread
which comes with a price of extra overhead.
Optional.
Args:
root: Root resolver object. Typically `None`.
info: The value of `info.context` is a Channels
websocket context with all the connection
information.
args, kwds: Values of the GraphQL subscription inputs.
Returns:
The list or tuple of subscription group names this
subscription instance belongs to. Later the subscription
will trigger on publishes to any of that groups. If
method returns None (default behavior) then the
subscription is only put to the default group (the one
which corresponds to the `Subscription` subclass).
[async] unsubscribed(root, info, *args, **kwds):
Called when client unsubscribes. Define this to be notified
when client unsubscribes.
Can be implemented as both asynchronous (`async def`) or
synchronous (`def`) function. Asynchronous implementation
runs blazingly fast in the main event loop of the main
thread. You must be careful with blocking calls though. You
can offload blocking operations to a thread in such cases.
Synchronous implementation always runs in a worker thread
which comes with a price of extra overhead.
Args:
root: Always `None`.
info: The value of `info.context` is a Channels
websocket context with all the connection
information.
args, kwds: Values of the GraphQL subscription inputs.
The methods enlisted above receives "standard" set of GraphQL
resolver arguments. The `info` field has `context` which can be used
to transmit some useful payload between these methods. For example
if `subscribe` sets `info.context.zen=42` then `publish` will have
access to this value as `info.context.zen`.
Static methods of subscription subclass:
broadcast(): Call this to notify all subscriptions in the group.
unsubscribe(): Call this to stop all subscriptions in the group.
NOTE: If you call any of these methods from the asynchronous context
then `await` the result of the call.
"""
# ----------------------------------------------------------------------- PUBLIC API
# Subscription notifications queue limit. Set this to control the
# amount of notifications server keeps in the queue when
# notifications come faster than server processes them. Setting this
# to 1 drops all notifications in the queue except the latest one.
# Useful to skip intermediate notifications, e.g. progress reports.
notification_queue_limit: Optional[int] = None
@classmethod
def broadcast(cls, *, group=None, payload=None):
"""Call this method to notify all subscriptions in the group.
Can be called from both synchronous and asynchronous contexts.
It is necessary to `await` if called from the async context.
Args:
group: Name of the subscription group which members must be
notified. `None` means that all the subscriptions of
type will be triggered.
payload: The payload delivered to the `publish` handler.
NOTE: The `payload` is serialized before sending to the
subscription group.
"""
try:
event_loop = asyncio.get_event_loop()
except RuntimeError:
pass
else:
if event_loop.is_running():
return event_loop.create_task(
cls.broadcast_async(group=group, payload=payload)
)
return cls.broadcast_sync(group=group, payload=payload)
@classmethod
async def broadcast_async(cls, *, group=None, payload=None):
"""Broadcast, asynchronous version."""
# Manually serialize the `payload` to allow transfer of Django
# models inside `payload`, auto serialization does not do this.
serialized_payload = await channels.db.database_sync_to_async(
Serializer.serialize, thread_sensitive=False
)(payload)
# Send the message to the Channels group.
group = cls._group_name(group)
group_send = cls._channel_layer().group_send
# Will result in a call of `GraphqlWsConsumer.broadcast`.
await group_send(
group=group,
message={
"type": "broadcast",
"group": group,
"payload": serialized_payload,
},
)
@classmethod
def broadcast_sync(cls, *, group=None, payload=None):
"""Broadcast, synchronous version."""
# Manually serialize the `payload` to allow transfer of Django
# models inside the `payload`.
serialized_payload = Serializer.serialize(payload)
group = cls._group_name(group)
sync_channel_layer_group_send = asgiref.sync.async_to_sync(
cls._channel_layer().group_send
)
# Will result in a call of `GraphqlWsConsumer.broadcast`.
sync_channel_layer_group_send(
group=group,
message={
"type": "broadcast",
"group": group,
"payload": serialized_payload,
},
)
@classmethod
def unsubscribe(cls, *, group=None):
"""Call this method to stop all subscriptions in the group.
This method can be called from both synchronous and asynchronous
contexts. If you call it from the asynchronous context then you
have to `await`.
Args:
group: Name of the subscription group which members must be
unsubscribed. `None` means that all the client of the
subscription will be unsubscribed.
"""
try:
event_loop = asyncio.get_event_loop()
except RuntimeError:
pass
else:
if event_loop.is_running():
return asyncio.create_task(cls.unsubscribe_async(group=group))
return cls.unsubscribe_sync(group=group)
@classmethod
async def unsubscribe_async(cls, *, group=None):
"""Unsubscribe, asynchronous version."""
# Send the 'unsubscribe' message to the Channels group.
group = cls._group_name(group)
await cls._channel_layer().group_send(
group=group, message={"type": "unsubscribe", "group": group}
)
@classmethod
def unsubscribe_sync(cls, *, group=None):
"""Unsubscribe, synchronous version."""
# Send the message to the Channels group.
group = cls._group_name(group)
sync_channel_layer_group_send = asgiref.sync.async_to_sync(
cls._channel_layer().group_send
)
sync_channel_layer_group_send(
group=group,
message={
"type": "unsubscribe",
"group": group,
},
)
@classmethod
def Field( # pylint: disable=invalid-name
cls, name=None, description=None, deprecation_reason=None, required=False
):
"""Represent subscription as a field to mount it to the schema.
Typical usage:
class Subscription(graphene.ObjectType):
on_new_chat_message = OnNewChatMessage.Field()
"""
return graphene.Field(
cls._meta.output,
args=cls._meta.arguments,
resolver=cls._meta.publish,
name=name,
description=description,
deprecation_reason=deprecation_reason,
required=required,
)
# ------------------------------------------------------------------- IMPLEMENTATION
@classmethod
def __init_subclass_with_meta__(
cls,
subscribe=None,
publish=None,
unsubscribed=None,
output=None,
arguments=None,
_meta=None,
**options,
): # pylint: disable=arguments-renamed
"""Prepare subscription on subclass creation.
This method is invoked by the superclass `__init__subclass__`.
It is needed to process class fields, `Meta` and inheritance
parameters. This is genuine Graphene approach inherited/cloned
from the original Mutation class implementation.
"""
if not _meta:
_meta = SubscriptionOptions(cls)
output = output or getattr(cls, "Output", None)
# Collect fields if output class is not explicitly defined.
fields: dict = {}
if not output:
fields = collections.OrderedDict()
for base in reversed(cls.__mro__):
fields.update(
graphene.types.utils.yank_fields_from_attrs(
base.__dict__, _as=graphene.Field
)
)
output = cls
if not arguments:
input_class = getattr(cls, "Arguments", None)
if input_class:
arguments = graphene.utils.props.props(input_class)
else:
arguments = {}
# Get `publish`, `subscribe`, and `unsubscribe` handlers.
subscribe = subscribe or getattr(cls, "subscribe", None)
publish = publish or getattr(cls, "publish", None)
unsubscribed = unsubscribed or getattr(cls, "unsubscribed", None)
assert publish is not None, (
f"Subscription '{cls.__qualname__}' does not define a"
" method 'publish'! All subscriptions must define"
" 'publish' which processes GraphQL queries!"
)
if _meta.fields:
_meta.fields.update(fields)
else:
_meta.fields = fields
# Auxiliary alias.
graphene_get_function = graphene.utils.get_unbound_function.get_unbound_function
# pylint: disable=attribute-defined-outside-init
_meta.arguments = arguments
_meta.output = output
_meta.publish = graphene_get_function(publish)
_meta.subscribe = graphene_get_function(subscribe)
_meta.unsubscribed = graphene_get_function(unsubscribed)
super().__init_subclass_with_meta__(_meta=_meta, **options)
@classmethod
def _group_name(cls, group=None):
"""Group name based on the name of the subscription class."""
suffix = f"{cls.__module__}.{cls.__qualname__}"
if group is not None:
suffix += "-" + group
# Wrap the suffix into SHA256 to guarantee that the length of
# the group name is limited. Otherwise Channels will complain
# about that the group name is wrong (actually is too long).
suffix_sha256 = hashlib.sha256()
suffix_sha256.update(suffix.encode("utf-8"))
return f"{GraphqlWsConsumer.group_name_prefix}-{suffix_sha256.hexdigest()}"
@classmethod
def _channel_layer(cls):
"""Channel layer."""
# We cannot simply check existence of channel layer in the
# consumer constructor, so we added this property.
channel_layer = channels.layers.get_channel_layer()
assert channel_layer is not None, "Channel layer is not configured!"
return channel_layer
class SubscriptionOptions(graphene.types.objecttype.ObjectTypeOptions):
"""Options stored in the Subscription's `_meta` field."""
arguments = None
output = None
subscribe = None
publish = None
unsubscribed = None
|
299b3b310891b61fe7a6380f61c21529c64a40ae
|
069c2295076c482afadfe6351da5ae02be8e18e6
|
/django/contrib/messages/storage/cookie.py
|
2008b31843597f3b9e16d24e87e07822a8c7cc42
|
[
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"GPL-1.0-or-later",
"Python-2.0.1",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-permissive",
"Python-2.0"
] |
permissive
|
django/django
|
5eb557f57053631cd4f566f451e43197309dbeeb
|
c74a6fad5475495756a5bdb18b2cab2b68d429bc
|
refs/heads/main
| 2023-09-01T03:43:44.033530
| 2023-08-31T08:27:32
| 2023-08-31T08:27:32
| 4,164,482
| 73,530
| 38,187
|
BSD-3-Clause
| 2023-09-14T20:03:48
| 2012-04-28T02:47:18
|
Python
|
UTF-8
|
Python
| false
| false
| 8,678
|
py
|
cookie.py
|
import binascii
import json
from django.conf import settings
from django.contrib.messages.storage.base import BaseStorage, Message
from django.core import signing
from django.http import SimpleCookie
from django.utils.safestring import SafeData, mark_safe
class MessageEncoder(json.JSONEncoder):
"""
Compactly serialize instances of the ``Message`` class as JSON.
"""
message_key = "__json_message"
def default(self, obj):
if isinstance(obj, Message):
# Using 0/1 here instead of False/True to produce more compact json
is_safedata = 1 if isinstance(obj.message, SafeData) else 0
message = [self.message_key, is_safedata, obj.level, obj.message]
if obj.extra_tags is not None:
message.append(obj.extra_tags)
return message
return super().default(obj)
class MessageDecoder(json.JSONDecoder):
"""
Decode JSON that includes serialized ``Message`` instances.
"""
def process_messages(self, obj):
if isinstance(obj, list) and obj:
if obj[0] == MessageEncoder.message_key:
if obj[1]:
obj[3] = mark_safe(obj[3])
return Message(*obj[2:])
return [self.process_messages(item) for item in obj]
if isinstance(obj, dict):
return {key: self.process_messages(value) for key, value in obj.items()}
return obj
def decode(self, s, **kwargs):
decoded = super().decode(s, **kwargs)
return self.process_messages(decoded)
class MessagePartSerializer:
def dumps(self, obj):
return [
json.dumps(
o,
separators=(",", ":"),
cls=MessageEncoder,
)
for o in obj
]
class MessagePartGatherSerializer:
def dumps(self, obj):
"""
The parameter is an already serialized list of Message objects. No need
to serialize it again, only join the list together and encode it.
"""
return ("[" + ",".join(obj) + "]").encode("latin-1")
class MessageSerializer:
def loads(self, data):
return json.loads(data.decode("latin-1"), cls=MessageDecoder)
class CookieStorage(BaseStorage):
"""
Store messages in a cookie.
"""
cookie_name = "messages"
# uwsgi's default configuration enforces a maximum size of 4kb for all the
# HTTP headers. In order to leave some room for other cookies and headers,
# restrict the session cookie to 1/2 of 4kb. See #18781.
max_cookie_size = 2048
not_finished = "__messagesnotfinished__"
not_finished_json = json.dumps("__messagesnotfinished__")
key_salt = "django.contrib.messages"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.signer = signing.get_cookie_signer(salt=self.key_salt)
def _get(self, *args, **kwargs):
"""
Retrieve a list of messages from the messages cookie. If the
not_finished sentinel value is found at the end of the message list,
remove it and return a result indicating that not all messages were
retrieved by this storage.
"""
data = self.request.COOKIES.get(self.cookie_name)
messages = self._decode(data)
all_retrieved = not (messages and messages[-1] == self.not_finished)
if messages and not all_retrieved:
# remove the sentinel value
messages.pop()
return messages, all_retrieved
def _update_cookie(self, encoded_data, response):
"""
Either set the cookie with the encoded data if there is any data to
store, or delete the cookie.
"""
if encoded_data:
response.set_cookie(
self.cookie_name,
encoded_data,
domain=settings.SESSION_COOKIE_DOMAIN,
secure=settings.SESSION_COOKIE_SECURE or None,
httponly=settings.SESSION_COOKIE_HTTPONLY or None,
samesite=settings.SESSION_COOKIE_SAMESITE,
)
else:
response.delete_cookie(
self.cookie_name,
domain=settings.SESSION_COOKIE_DOMAIN,
samesite=settings.SESSION_COOKIE_SAMESITE,
)
def _store(self, messages, response, remove_oldest=True, *args, **kwargs):
"""
Store the messages to a cookie and return a list of any messages which
could not be stored.
If the encoded data is larger than ``max_cookie_size``, remove
messages until the data fits (these are the messages which are
returned), and add the not_finished sentinel value to indicate as much.
"""
unstored_messages = []
serialized_messages = MessagePartSerializer().dumps(messages)
encoded_data = self._encode_parts(serialized_messages)
if self.max_cookie_size:
# data is going to be stored eventually by SimpleCookie, which
# adds its own overhead, which we must account for.
cookie = SimpleCookie() # create outside the loop
def is_too_large_for_cookie(data):
return data and len(cookie.value_encode(data)[1]) > self.max_cookie_size
def compute_msg(some_serialized_msg):
return self._encode_parts(
some_serialized_msg + [self.not_finished_json],
encode_empty=True,
)
if is_too_large_for_cookie(encoded_data):
if remove_oldest:
idx = bisect_keep_right(
serialized_messages,
fn=lambda m: is_too_large_for_cookie(compute_msg(m)),
)
unstored_messages = messages[:idx]
encoded_data = compute_msg(serialized_messages[idx:])
else:
idx = bisect_keep_left(
serialized_messages,
fn=lambda m: is_too_large_for_cookie(compute_msg(m)),
)
unstored_messages = messages[idx:]
encoded_data = compute_msg(serialized_messages[:idx])
self._update_cookie(encoded_data, response)
return unstored_messages
def _encode_parts(self, messages, encode_empty=False):
"""
Return an encoded version of the serialized messages list which can be
stored as plain text.
Since the data will be retrieved from the client-side, the encoded data
also contains a hash to ensure that the data was not tampered with.
"""
if messages or encode_empty:
return self.signer.sign_object(
messages, serializer=MessagePartGatherSerializer, compress=True
)
def _encode(self, messages, encode_empty=False):
"""
Return an encoded version of the messages list which can be stored as
plain text.
Proxies MessagePartSerializer.dumps and _encoded_parts.
"""
serialized_messages = MessagePartSerializer().dumps(messages)
return self._encode_parts(serialized_messages, encode_empty=encode_empty)
def _decode(self, data):
"""
Safely decode an encoded text stream back into a list of messages.
If the encoded text stream contained an invalid hash or was in an
invalid format, return None.
"""
if not data:
return None
try:
return self.signer.unsign_object(data, serializer=MessageSerializer)
except (signing.BadSignature, binascii.Error, json.JSONDecodeError):
pass
# Mark the data as used (so it gets removed) since something was wrong
# with the data.
self.used = True
return None
def bisect_keep_left(a, fn):
"""
Find the index of the first element from the start of the array that
verifies the given condition.
The function is applied from the start of the array to the pivot.
"""
lo = 0
hi = len(a)
while lo < hi:
mid = (lo + hi) // 2
if fn(a[: mid + 1]):
hi = mid
else:
lo = mid + 1
return lo
def bisect_keep_right(a, fn):
"""
Find the index of the first element from the end of the array that verifies
the given condition.
The function is applied from the pivot to the end of array.
"""
lo = 0
hi = len(a)
while lo < hi:
mid = (lo + hi) // 2
if fn(a[mid:]):
lo = mid + 1
else:
hi = mid
return lo
|
cb28a06de509df9aaa86e32cf7a37ce49577dd58
|
e27f9f1f8bef8b1f4676df84ee3e753974d21a1c
|
/examples/transformers/utils.py
|
8f49066335bb949469440409e9352d81790da0be
|
[
"BSD-3-Clause"
] |
permissive
|
pytorch/ignite
|
8fb275638e94e702762eec932b21dc8df7a54cb0
|
34a707e53785cf8a524589f33a570a7516fe064e
|
refs/heads/master
| 2023-09-02T00:27:22.485479
| 2023-08-31T15:10:14
| 2023-08-31T15:10:14
| 111,835,796
| 4,613
| 788
|
BSD-3-Clause
| 2023-09-13T07:46:41
| 2017-11-23T17:31:21
|
Python
|
UTF-8
|
Python
| false
| false
| 1,522
|
py
|
utils.py
|
import torch
from dataset import TransformerDataset
from datasets import load_dataset
from model import TransformerModel
from transformers import AutoTokenizer
from ignite.handlers import DiskSaver
def get_tokenizer(tokenizer_name, tokenizer_dir):
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, cache_dir=tokenizer_dir, do_lower_case=True)
return tokenizer
def get_model(model_name, model_dir, drop_out, n_fc, num_classes):
model = TransformerModel(model_name, model_dir, drop_out, n_fc, num_classes)
return model
def get_dataset(cache_dir, tokenizer_name, tokenizer_dir, max_length):
train_dataset, test_dataset = load_dataset("imdb", split=["train", "test"], cache_dir=cache_dir)
tokenizer = get_tokenizer(tokenizer_name, tokenizer_dir)
train_texts, train_labels = train_dataset["text"], train_dataset["label"]
test_texts, test_labels = test_dataset["text"], test_dataset["label"]
train_dataset = TransformerDataset(train_texts, train_labels, tokenizer, max_length)
test_dataset = TransformerDataset(test_texts, test_labels, tokenizer, max_length)
return train_dataset, test_dataset
def thresholded_output_transform(output):
y_pred, y = output
return torch.round(torch.sigmoid(y_pred)), y
def get_save_handler(config):
if config["with_clearml"]:
from ignite.contrib.handlers.clearml_logger import ClearMLSaver
return ClearMLSaver(dirname=config["output_dir"])
return DiskSaver(config["output_dir"], require_empty=False)
|
9239390ce704d5fb667a4f86175d377c057bf847
|
0933f9ecf49ed89db35cee051a64648886f13e40
|
/fs/lrucache.py
|
8ae26de5cac1d6e6d8ed0f8aad26f60367482f79
|
[
"MIT"
] |
permissive
|
PyFilesystem/pyfilesystem2
|
63da155692594d0405dd237db7d66be243658249
|
8ed9dc495d8ba2f83fbb2a1145d34d92e13644be
|
refs/heads/master
| 2023-09-01T17:05:54.176292
| 2022-10-18T10:59:07
| 2022-10-18T10:59:07
| 70,920,962
| 1,956
| 254
|
MIT
| 2023-08-24T20:00:22
| 2016-10-14T15:05:27
|
Python
|
UTF-8
|
Python
| false
| false
| 1,303
|
py
|
lrucache.py
|
"""Least Recently Used cache mapping.
"""
from __future__ import absolute_import, unicode_literals
import typing
from collections import OrderedDict
_K = typing.TypeVar("_K")
_V = typing.TypeVar("_V")
class LRUCache(OrderedDict, typing.Generic[_K, _V]):
"""A dictionary-like container that stores a given maximum items.
If an additional item is added when the LRUCache is full, the least
recently used key is discarded to make room for the new item.
"""
def __init__(self, cache_size):
# type: (int) -> None
"""Create a new LRUCache with the given size."""
self.cache_size = cache_size
super(LRUCache, self).__init__()
def __setitem__(self, key, value):
# type: (_K, _V) -> None
"""Store a new views, potentially discarding an old value."""
if key not in self:
if len(self) >= self.cache_size:
self.popitem(last=False)
OrderedDict.__setitem__(self, key, value)
def __getitem__(self, key):
# type: (_K) -> _V
"""Get the item, but also makes it most recent."""
_super = typing.cast(OrderedDict, super(LRUCache, self))
value = _super.__getitem__(key)
_super.__delitem__(key)
_super.__setitem__(key, value)
return value
|
1041ba6d9223f3f1abc57f1e9e38d6ddbf300bb7
|
518bf342bc4138982af3e2724e75f1d9ca3ba56c
|
/solutions/0780. Reaching Points/0780.py
|
131b6302accc457b58279e8778c01989d31e9b7a
|
[
"MIT"
] |
permissive
|
walkccc/LeetCode
|
dae85af7cc689882a84ee5011f0a13a19ad97f18
|
a27be41c174565d365cbfe785f0633f634a01b2a
|
refs/heads/main
| 2023-08-28T01:32:43.384999
| 2023-08-20T19:00:45
| 2023-08-20T19:00:45
| 172,231,974
| 692
| 302
|
MIT
| 2023-08-13T14:48:42
| 2019-02-23T15:46:23
|
C++
|
UTF-8
|
Python
| false
| false
| 268
|
py
|
0780.py
|
class Solution:
def reachingPoints(self, sx: int, sy: int, tx: int, ty: int) -> bool:
while sx < tx and sy < ty:
tx, ty = tx % ty, ty % tx
return sx == tx and sy <= ty and (ty - sy) % tx == 0 or \
sy == ty and sx <= tx and (tx - sx) % ty == 0
|
1950057ca1b8453015c23471010c16197c76dd52
|
487b3bfc69dd211c83861f79818b90a5142824cd
|
/ahk/_async/window.py
|
5de120ad6bf0eb13247065b20c7d3d5616631a97
|
[
"MIT"
] |
permissive
|
spyoungtech/ahk
|
bf39a7fe671ca2da8c7d04fc3c47df238c55b28a
|
7615f9389a831f7b601a1edfe7f3d25cfbe036dc
|
refs/heads/main
| 2023-08-31T04:44:09.616179
| 2023-08-22T22:35:50
| 2023-08-22T22:35:50
| 158,650,053
| 746
| 75
|
MIT
| 2023-09-14T18:10:54
| 2018-11-22T06:07:12
|
Python
|
UTF-8
|
Python
| false
| false
| 29,128
|
py
|
window.py
|
from __future__ import annotations
import sys
import warnings
from typing import Any
from typing import Coroutine
from typing import Literal
from typing import Optional
from typing import overload
from typing import Sequence
from typing import Tuple
from typing import TYPE_CHECKING
from typing import Union
from ahk.message import Position
if sys.version_info < (3, 10):
from typing_extensions import TypeAlias
else:
from typing import TypeAlias
if TYPE_CHECKING:
from .engine import AsyncAHK
from .transport import AsyncFutureResult
class WindowNotFoundException(Exception):
...
AsyncPropertyReturnStr: TypeAlias = Coroutine[None, None, str] # unasync: remove
SyncPropertyReturnStr: TypeAlias = str
AsyncPropertyReturnInt: TypeAlias = Coroutine[None, None, int] # unasync: remove
SyncPropertyReturnInt: TypeAlias = int
AsyncPropertyReturnTupleIntInt: TypeAlias = Coroutine[None, None, Tuple[int, int]] # unasync: remove
SyncPropertyReturnTupleIntInt: TypeAlias = Tuple[int, int]
AsyncPropertyReturnBool: TypeAlias = Coroutine[None, None, bool] # unasync: remove
SyncPropertyReturnBool: TypeAlias = bool
_PROPERTY_DEPRECATION_WARNING_MESSAGE = 'Use of the {0} property is not recommended (in the async API only) and may be removed in a future version. Use the get_{0} method instead.'
_SETTERS_REMOVED_ERROR_MESSAGE = (
'Use of the {0} property setter is not supported in the async API. Use the set_{0} instead.'
)
class AsyncWindow:
def __init__(self, engine: AsyncAHK, ahk_id: str):
self._engine: AsyncAHK = engine
if not ahk_id:
raise ValueError(f'Invalid ahk_id: {ahk_id!r}')
self._ahk_id: str = ahk_id
def __repr__(self) -> str:
return f'<{self.__class__.__qualname__} ahk_id={self._ahk_id}>'
def __eq__(self, other: object) -> bool:
if not isinstance(other, AsyncWindow):
return NotImplemented
return self._ahk_id == other._ahk_id
def __hash__(self) -> int:
return hash(self._ahk_id)
async def close(self) -> None:
await self._engine.win_close(
title=f'ahk_id {self._ahk_id}', detect_hidden_windows=True, title_match_mode=(1, 'Fast')
)
return None
async def kill(self) -> None:
await self._engine.win_kill(
title=f'ahk_id {self._ahk_id}', detect_hidden_windows=True, title_match_mode=(1, 'Fast')
)
async def exists(self) -> bool:
return await self._engine.win_exists(
title=f'ahk_id {self._ahk_id}', detect_hidden_windows=True, title_match_mode=(1, 'Fast')
)
@property
def id(self) -> str:
return self._ahk_id
@property
def exist(self) -> AsyncPropertyReturnBool:
warnings.warn( # unasync: remove
_PROPERTY_DEPRECATION_WARNING_MESSAGE.format('exist'), category=DeprecationWarning, stacklevel=2
)
return self.exists()
async def get_pid(self) -> int:
pid = await self._engine.win_get_pid(
title=f'ahk_id {self._ahk_id}', detect_hidden_windows=True, title_match_mode=(1, 'Fast')
)
if pid is None:
raise WindowNotFoundException(
f'Error when trying to get PID of window {self._ahk_id!r}. The window may have been closed before the operation could be completed'
)
return pid
@property
def pid(self) -> AsyncPropertyReturnInt:
warnings.warn( # unasync: remove
_PROPERTY_DEPRECATION_WARNING_MESSAGE.format('pid'), category=DeprecationWarning, stacklevel=2
)
return self.get_pid()
async def get_process_name(self) -> str:
name = await self._engine.win_get_process_name(
title=f'ahk_id {self._ahk_id}', detect_hidden_windows=True, title_match_mode=(1, 'Fast')
)
if name is None:
raise WindowNotFoundException(
f'Error when trying to get process name of window {self._ahk_id!r}. The window may have been closed before the operation could be completed'
)
return name
@property
def process_name(self) -> AsyncPropertyReturnStr:
warnings.warn( # unasync: remove
_PROPERTY_DEPRECATION_WARNING_MESSAGE.format('process_name'), category=DeprecationWarning, stacklevel=2
)
return self.get_process_name()
async def get_process_path(self) -> str:
path = await self._engine.win_get_process_path(
title=f'ahk_id {self._ahk_id}', detect_hidden_windows=True, title_match_mode=(1, 'Fast')
)
if path is None:
raise WindowNotFoundException(
f'Error when trying to get process path of window {self._ahk_id!r}. The window may have been closed before the operation could be completed'
)
return path
@property
def process_path(self) -> AsyncPropertyReturnStr:
warnings.warn( # unasync: remove
_PROPERTY_DEPRECATION_WARNING_MESSAGE.format('process_path'), category=DeprecationWarning, stacklevel=2
)
return self.get_process_path()
async def get_minmax(self) -> int:
minmax = await self._engine.win_get_minmax(
title=f'ahk_id {self._ahk_id}', detect_hidden_windows=True, title_match_mode=(1, 'Fast')
)
if minmax is None:
raise WindowNotFoundException(
f'Error when trying to get minmax state of window {self._ahk_id}. The window may have been closed before the operation could be completed'
)
return minmax
async def get_title(self) -> str:
title = await self._engine.win_get_title(
title=f'ahk_id {self._ahk_id}', detect_hidden_windows=True, title_match_mode=(1, 'Fast')
)
return title
@property
def title(self) -> AsyncPropertyReturnStr:
warnings.warn( # unasync: remove
_PROPERTY_DEPRECATION_WARNING_MESSAGE.format('title'), category=DeprecationWarning, stacklevel=2
)
return self.get_title()
@title.setter
def title(self, value: str) -> Any:
raise RuntimeError(_SETTERS_REMOVED_ERROR_MESSAGE) # unasync: remove
self.set_title(value)
async def set_title(self, new_title: str) -> None:
await self._engine.win_set_title(
title=f'ahk_id {self._ahk_id}',
detect_hidden_windows=True,
new_title=new_title,
title_match_mode=(1, 'Fast'),
)
return None
async def list_controls(self) -> Sequence['AsyncControl']:
controls = await self._engine.win_get_control_list(
title=f'ahk_id {self._ahk_id}', detect_hidden_windows=True, title_match_mode=(1, 'Fast')
)
if controls is None:
raise WindowNotFoundException(
f'Error when trying to enumerate controls for window {self._ahk_id}. The window may have been closed before the operation could be completed'
)
return controls
# fmt: off
@overload
async def minimize(self) -> None: ...
@overload
async def minimize(self, blocking: Literal[True]) -> None: ...
@overload
async def minimize(self, blocking: Literal[False]) -> AsyncFutureResult[None]: ...
@overload
async def minimize(self, blocking: bool = True) -> Optional[AsyncFutureResult[None]]: ...
# fmt: on
async def minimize(self, blocking: bool = True) -> Optional[AsyncFutureResult[None]]:
return await self._engine.win_minimize(
title=f'ahk_id {self._ahk_id}', title_match_mode=(1, 'Fast'), detect_hidden_windows=True, blocking=blocking
)
# fmt: off
@overload
async def maximize(self) -> None: ...
@overload
async def maximize(self, blocking: Literal[True]) -> None: ...
@overload
async def maximize(self, blocking: Literal[False]) -> AsyncFutureResult[None]: ...
@overload
async def maximize(self, blocking: bool = True) -> Optional[AsyncFutureResult[None]]: ...
# fmt: on
async def maximize(self, blocking: bool = True) -> Optional[AsyncFutureResult[None]]:
return await self._engine.win_maximize(
title=f'ahk_id {self._ahk_id}', title_match_mode=(1, 'Fast'), detect_hidden_windows=True, blocking=blocking
)
# fmt: off
@overload
async def restore(self) -> None: ...
@overload
async def restore(self, blocking: Literal[True]) -> None: ...
@overload
async def restore(self, blocking: Literal[False]) -> AsyncFutureResult[None]: ...
@overload
async def restore(self, blocking: bool = True) -> Optional[AsyncFutureResult[None]]: ...
# fmt: on
async def restore(self, blocking: bool = True) -> Optional[AsyncFutureResult[None]]:
return await self._engine.win_restore(
title=f'ahk_id {self._ahk_id}', title_match_mode=(1, 'Fast'), detect_hidden_windows=True, blocking=blocking
)
# fmt: off
@overload
async def get_class(self) -> str: ...
@overload
async def get_class(self, blocking: Literal[True]) -> str: ...
@overload
async def get_class(self, blocking: Literal[False]) -> AsyncFutureResult[str]: ...
@overload
async def get_class(self, blocking: bool = True) -> Union[str, AsyncFutureResult[str]]: ...
# fmt: on
async def get_class(self, blocking: bool = True) -> Union[str, AsyncFutureResult[str]]:
return await self._engine.win_get_class(
title=f'ahk_id {self._ahk_id}', detect_hidden_windows=True, title_match_mode=(1, 'Fast'), blocking=blocking
)
# fmt: off
@overload
async def set_always_on_top(self, toggle: Literal['On', 'Off', 'Toggle', 1, -1, 0]) -> None: ...
@overload
async def set_always_on_top(self, toggle: Literal['On', 'Off', 'Toggle', 1, -1, 0], *, blocking: Literal[False]) -> AsyncFutureResult[None]: ...
@overload
async def set_always_on_top(self, toggle: Literal['On', 'Off', 'Toggle', 1, -1, 0], *, blocking: Literal[True]) -> None: ...
@overload
async def set_always_on_top(self, toggle: Literal['On', 'Off', 'Toggle', 1, -1, 0], *, blocking: bool = True) -> Union[None, AsyncFutureResult[None]]: ...
# fmt: on
async def set_always_on_top(
self, toggle: Literal['On', 'Off', 'Toggle', 1, -1, 0], *, blocking: bool = True
) -> Union[None, AsyncFutureResult[None]]:
return await self._engine.win_set_always_on_top(
toggle=toggle,
title=f'ahk_id {self._ahk_id}',
blocking=blocking,
detect_hidden_windows=True,
title_match_mode=(1, 'Fast'),
)
# fmt: off
@overload
async def is_always_on_top(self) -> bool: ...
@overload
async def is_always_on_top(self, *, blocking: Literal[False]) -> AsyncFutureResult[Optional[bool]]: ...
@overload
async def is_always_on_top(self, *, blocking: Literal[True]) -> bool: ...
@overload
async def is_always_on_top(self, *, blocking: bool = True) -> Union[bool, AsyncFutureResult[Optional[bool]]]: ...
# fmt: on
async def is_always_on_top(self, *, blocking: bool = True) -> Union[bool, AsyncFutureResult[Optional[bool]]]:
args = [f'ahk_id {self._ahk_id}']
resp = await self._engine._transport.function_call(
'AHKWinIsAlwaysOnTop', args, blocking=blocking
) # XXX: maybe shouldn't access transport directly?
if resp is None:
raise WindowNotFoundException(
f'Error when trying to get always on top style for window {self._ahk_id}. The window may have been closed before the operation could be completed'
)
return resp
@property
def always_on_top(self) -> AsyncPropertyReturnBool:
return self.is_always_on_top()
@always_on_top.setter
def always_on_top(self, toggle: Literal['On', 'Off', 'Toggle', 1, -1, 0]) -> Any:
raise RuntimeError(_SETTERS_REMOVED_ERROR_MESSAGE) # unasync: remove
self.set_always_on_top(toggle)
# fmt: off
@overload
async def send(self, keys: str) -> None: ...
@overload
async def send(self, keys: str, *, blocking: Literal[False]) -> AsyncFutureResult[None]: ...
@overload
async def send(self, keys: str, *, blocking: Literal[True]) -> None: ...
@overload
async def send(self, keys: str, *, blocking: bool = True) -> Union[None, AsyncFutureResult[None]]: ...
# fmt: on
async def send(self, keys: str, *, blocking: bool = True) -> Union[None, AsyncFutureResult[None]]:
return await self._engine.control_send(
keys=keys,
title=f'ahk_id {self._ahk_id}',
blocking=blocking,
detect_hidden_windows=True,
title_match_mode=(1, 'Fast'),
)
# fmt: off
@overload
async def click(self, x: int = 0, y: int = 0, *, button: Literal['L', 'R', 'M', 'LEFT', 'RIGHT', 'MIDDLE'] = 'L', click_count: int = 1, options: str = '') -> None: ...
@overload
async def click(self, x: int = 0, y: int = 0, *, button: Literal['L', 'R', 'M', 'LEFT', 'RIGHT', 'MIDDLE'] = 'L', click_count: int = 1, options: str = '', blocking: Literal[False]) -> AsyncFutureResult[None]: ...
@overload
async def click(self, x: int = 0, y: int = 0, *, button: Literal['L', 'R', 'M', 'LEFT', 'RIGHT', 'MIDDLE'] = 'L', click_count: int = 1, options: str = '', blocking: Literal[True]) -> None: ...
@overload
async def click(self, x: int = 0, y: int = 0, *, button: Literal['L', 'R', 'M', 'LEFT', 'RIGHT', 'MIDDLE'] = 'L', click_count: int = 1, options: str = '', blocking: bool = True) -> Union[None, AsyncFutureResult[None]]: ...
# fmt: on
async def click(
self,
x: int = 0,
y: int = 0,
*,
button: Literal['L', 'R', 'M', 'LEFT', 'RIGHT', 'MIDDLE'] = 'L',
click_count: int = 1,
options: str = '',
blocking: bool = True,
) -> Union[None, AsyncFutureResult[None]]:
pos = f'X{x} Y{y}'
return await self._engine.control_click(
control=pos,
title=f'ahk_id {self._ahk_id}',
button=button,
click_count=click_count,
options=options,
blocking=blocking,
detect_hidden_windows=True,
title_match_mode=(1, 'Fast'),
)
# fmt: off
@overload
async def get_text(self) -> str: ...
@overload
async def get_text(self, *, blocking: Literal[False]) -> AsyncFutureResult[str]: ...
@overload
async def get_text(self, *, blocking: Literal[True]) -> str: ...
@overload
async def get_text(self, *, blocking: bool = True) -> Union[str, AsyncFutureResult[str]]: ...
# fmt: on
async def get_text(self, *, blocking: bool = True) -> Union[str, AsyncFutureResult[str]]:
return await self._engine.win_get_text(
title=f'ahk_id {self._ahk_id}', blocking=blocking, detect_hidden_windows=True, title_match_mode=(1, 'Fast')
)
@property
def text(self) -> AsyncPropertyReturnStr:
return self.get_text()
# fmt: off
@overload
async def get_position(self) -> Position: ...
@overload
async def get_position(self, *, blocking: Literal[False]) -> AsyncFutureResult[Optional[Position]]: ...
@overload
async def get_position(self, *, blocking: Literal[True]) -> Position: ...
@overload
async def get_position(self, *, blocking: bool = True) -> Union[Position, AsyncFutureResult[Optional[Position]]]: ...
# fmt: on
async def get_position(self, *, blocking: bool = True) -> Union[Position, AsyncFutureResult[Optional[Position]]]:
resp = await self._engine.win_get_position(
title=f'ahk_id {self._ahk_id}',
blocking=blocking,
detect_hidden_windows=True,
title_match_mode=(1, 'Fast'),
)
if resp is None:
raise WindowNotFoundException(
f'Error when trying to get position for window {self._ahk_id}. The window may have been closed before the operation could be completed'
)
return resp
# fmt: off
@overload
async def activate(self) -> None: ...
@overload
async def activate(self, *, blocking: Literal[False]) -> AsyncFutureResult[None]: ...
@overload
async def activate(self, *, blocking: Literal[True]) -> None: ...
@overload
async def activate(self, *, blocking: bool = True) -> Union[None, AsyncFutureResult[None]]: ...
# fmt: on
async def activate(self, *, blocking: bool = True) -> Union[None, AsyncFutureResult[None]]:
resp = await self._engine.win_activate(
title=f'ahk_id {self._ahk_id}',
blocking=blocking,
detect_hidden_windows=True,
title_match_mode=(1, 'Fast'),
)
return resp
# fmt: off
@overload
async def to_bottom(self, *, blocking: Literal[True]) -> None: ...
@overload
async def to_bottom(self, *, blocking: Literal[False]) -> AsyncFutureResult[None]: ...
@overload
async def to_bottom(self) -> None: ...
# fmt: on
async def to_bottom(self, *, blocking: bool = True) -> Union[None, AsyncFutureResult[None]]:
return await self._engine.win_set_bottom(
title=f'ahk_id {self._ahk_id}',
blocking=blocking,
detect_hidden_windows=True,
title_match_mode=(1, 'Fast'),
)
# fmt: off
@overload
async def to_top(self, *, blocking: Literal[True]) -> None: ...
@overload
async def to_top(self, *, blocking: Literal[False]) -> AsyncFutureResult[None]: ...
@overload
async def to_top(self) -> None: ...
# fmt: on
async def to_top(self, *, blocking: bool = True) -> Union[None, AsyncFutureResult[None]]:
return await self._engine.win_set_top(
title=f'ahk_id {self._ahk_id}',
blocking=blocking,
detect_hidden_windows=True,
title_match_mode=(1, 'Fast'),
)
# fmt: off
@overload
async def show(self, *, blocking: Literal[True]) -> None: ...
@overload
async def show(self, *, blocking: Literal[False]) -> AsyncFutureResult[None]: ...
@overload
async def show(self) -> None: ...
# fmt: on
async def show(self, *, blocking: bool = True) -> Union[None, AsyncFutureResult[None]]:
return await self._engine.win_show(
title=f'ahk_id {self._ahk_id}',
blocking=blocking,
detect_hidden_windows=True,
title_match_mode=(1, 'Fast'),
)
# fmt: off
@overload
async def hide(self, *, blocking: Literal[True]) -> None: ...
@overload
async def hide(self, *, blocking: Literal[False]) -> AsyncFutureResult[None]: ...
@overload
async def hide(self) -> None: ...
# fmt: on
async def hide(self, *, blocking: bool = True) -> Union[None, AsyncFutureResult[None]]:
return await self._engine.win_hide(
title=f'ahk_id {self._ahk_id}',
blocking=blocking,
detect_hidden_windows=True,
title_match_mode=(1, 'Fast'),
)
# fmt: off
@overload
async def disable(self, *, blocking: Literal[True]) -> None: ...
@overload
async def disable(self, *, blocking: Literal[False]) -> AsyncFutureResult[None]: ...
@overload
async def disable(self) -> None: ...
# fmt: on
async def disable(self, *, blocking: bool = True) -> Union[None, AsyncFutureResult[None]]:
return await self._engine.win_set_disable(
title=f'ahk_id {self._ahk_id}',
blocking=blocking,
detect_hidden_windows=True,
title_match_mode=(1, 'Fast'),
)
# fmt: off
@overload
async def enable(self, *, blocking: Literal[True]) -> None: ...
@overload
async def enable(self, *, blocking: Literal[False]) -> AsyncFutureResult[None]: ...
@overload
async def enable(self) -> None: ...
# fmt: on
async def enable(self, *, blocking: bool = True) -> Union[None, AsyncFutureResult[None]]:
return await self._engine.win_set_enable(
title=f'ahk_id {self._ahk_id}',
blocking=blocking,
detect_hidden_windows=True,
title_match_mode=(1, 'Fast'),
)
# fmt: off
@overload
async def redraw(self, *, blocking: Literal[True]) -> None: ...
@overload
async def redraw(self, *, blocking: Literal[False]) -> AsyncFutureResult[None]: ...
@overload
async def redraw(self) -> None: ...
@overload
async def redraw(self, *, blocking: bool = True) -> Union[None, AsyncFutureResult[None]]: ...
# fmt: on
async def redraw(self, *, blocking: bool = True) -> Union[None, AsyncFutureResult[None]]:
return await self._engine.win_set_redraw(
title=f'ahk_id {self._ahk_id}',
blocking=blocking,
detect_hidden_windows=True,
title_match_mode=(1, 'Fast'),
)
@overload
async def set_style(self, style: str) -> bool:
...
@overload
async def set_style(self, style: str, *, blocking: Literal[True]) -> bool:
...
@overload
async def set_style(self, style: str, *, blocking: Literal[False]) -> AsyncFutureResult[bool]:
...
@overload
async def set_style(self, style: str, *, blocking: bool = True) -> Union[bool, AsyncFutureResult[bool]]:
...
async def set_style(self, style: str, *, blocking: bool = True) -> Union[bool, AsyncFutureResult[bool]]:
return await self._engine.win_set_style(
style=style,
title=f'ahk_id {self._ahk_id}',
detect_hidden_windows=True,
title_match_mode=(1, 'Fast'),
blocking=blocking,
)
@overload
async def set_ex_style(self, style: str) -> bool:
...
@overload
async def set_ex_style(self, style: str, *, blocking: Literal[False]) -> AsyncFutureResult[bool]:
...
@overload
async def set_ex_style(self, style: str, *, blocking: Literal[True]) -> bool:
...
@overload
async def set_ex_style(self, style: str, *, blocking: bool = True) -> Union[bool, AsyncFutureResult[bool]]:
...
async def set_ex_style(self, style: str, *, blocking: bool = True) -> Union[bool, AsyncFutureResult[bool]]:
return await self._engine.win_set_ex_style(
style=style,
title=f'ahk_id {self._ahk_id}',
detect_hidden_windows=True,
title_match_mode=(1, 'Fast'),
blocking=blocking,
)
@overload
async def set_region(self, options: str) -> bool:
...
@overload
async def set_region(self, options: str, *, blocking: Literal[True]) -> bool:
...
@overload
async def set_region(self, options: str, *, blocking: Literal[False]) -> AsyncFutureResult[bool]:
...
@overload
async def set_region(self, options: str, *, blocking: bool = True) -> Union[bool, AsyncFutureResult[bool]]:
...
async def set_region(self, options: str, *, blocking: bool = True) -> Union[bool, AsyncFutureResult[bool]]:
return await self._engine.win_set_region(
options=options,
title=f'ahk_id {self._ahk_id}',
detect_hidden_windows=True,
title_match_mode=(1, 'Fast'),
blocking=blocking,
)
async def set_transparent(
self, transparency: Union[int, Literal['Off']], *, blocking: bool = True
) -> Union[None, AsyncFutureResult[None]]:
return await self._engine.win_set_transparent(
transparency=transparency,
title=f'ahk_id {self._ahk_id}',
detect_hidden_windows=True,
title_match_mode=(1, 'Fast'),
blocking=blocking,
)
async def set_trans_color(
self, color: Union[int, str], *, blocking: bool = True
) -> Union[None, AsyncFutureResult[None]]:
return await self._engine.win_set_trans_color(
color=color,
title=f'ahk_id {self._ahk_id}',
detect_hidden_windows=True,
title_match_mode=(1, 'Fast'),
blocking=blocking,
)
@property
def active(self) -> AsyncPropertyReturnBool:
return self.is_active()
async def is_active(self) -> bool:
return await self._engine.win_is_active(
title=f'ahk_id {self._ahk_id}',
detect_hidden_windows=True,
title_match_mode=(1, 'Fast'),
)
async def move(
self, x: int, y: int, *, width: Optional[int] = None, height: Optional[int] = None, blocking: bool = True
) -> Union[None, AsyncFutureResult[None]]:
return await self._engine.win_move(
x=x,
y=y,
width=width,
height=height,
title=f'ahk_id {self._ahk_id}',
detect_hidden_windows=True,
title_match_mode=(1, 'Fast'),
blocking=blocking,
)
@classmethod
async def from_pid(cls, engine: AsyncAHK, pid: int) -> Optional[AsyncWindow]:
return await engine.win_get(title=f'ahk_pid {pid}')
@classmethod
async def from_mouse_position(cls, engine: AsyncAHK) -> Optional[AsyncWindow]:
return await engine.win_get_from_mouse_position()
class AsyncControl:
def __init__(self, window: AsyncWindow, hwnd: str, control_class: str):
self.window: AsyncWindow = window
self.hwnd: str = hwnd
self.control_class: str = control_class
self._engine = window._engine
# fmt: off
@overload
async def click(self, *, button: Literal['L', 'R', 'M', 'LEFT', 'RIGHT', 'MIDDLE'] = 'L', click_count: int = 1, options: str = '') -> None: ...
@overload
async def click(self, *, button: Literal['L', 'R', 'M', 'LEFT', 'RIGHT', 'MIDDLE'] = 'L', click_count: int = 1, options: str = '', blocking: Literal[False]) -> AsyncFutureResult[None]: ...
@overload
async def click(self, *, button: Literal['L', 'R', 'M', 'LEFT', 'RIGHT', 'MIDDLE'] = 'L', click_count: int = 1, options: str = '', blocking: Literal[True]) -> None: ...
@overload
async def click(self, *, button: Literal['L', 'R', 'M', 'LEFT', 'RIGHT', 'MIDDLE'] = 'L', click_count: int = 1, options: str = '', blocking: bool = True) -> Union[None, AsyncFutureResult[None]]: ...
# fmt: on
async def click(
self,
*,
button: Literal['L', 'R', 'M', 'LEFT', 'RIGHT', 'MIDDLE'] = 'L',
click_count: int = 1,
options: str = '',
blocking: bool = True,
) -> Union[None, AsyncFutureResult[None]]:
return await self._engine.control_click(
button=button,
control=self.control_class,
click_count=click_count,
options=options,
title=f'ahk_id {self.window._ahk_id}',
title_match_mode=(1, 'Fast'),
detect_hidden_windows=True,
blocking=blocking,
)
# fmt: off
@overload
async def send(self, keys: str) -> None: ...
@overload
async def send(self, keys: str, *, blocking: Literal[False]) -> AsyncFutureResult[None]: ...
@overload
async def send(self, keys: str, *, blocking: Literal[True]) -> None: ...
@overload
async def send(self, keys: str, *, blocking: bool = True) -> Union[None, AsyncFutureResult[None]]: ...
# fmt: on
async def send(self, keys: str, *, blocking: bool = True) -> Union[None, AsyncFutureResult[None]]:
return await self._engine.control_send(
keys=keys,
control=self.control_class,
title=f'ahk_id {self.window._ahk_id}',
blocking=blocking,
detect_hidden_windows=True,
title_match_mode=(1, 'Fast'),
)
async def get_text(self, blocking: bool = True) -> Union[str, AsyncFutureResult[str]]:
return await self._engine.control_get_text(
control=self.control_class,
title=f'ahk_id {self.window._ahk_id}',
blocking=blocking,
detect_hidden_windows=True,
title_match_mode=(1, 'Fast'),
)
# fmt: off
@overload
async def get_position(self) -> Position: ...
@overload
async def get_position(self, blocking: Literal[False]) -> AsyncFutureResult[Position]: ...
@overload
async def get_position(self, blocking: Literal[True]) -> Position: ...
@overload
async def get_position(self, blocking: bool = True) -> Union[Position, AsyncFutureResult[Position]]: ...
# fmt: on
async def get_position(self, blocking: bool = True) -> Union[Position, AsyncFutureResult[Position]]:
return await self._engine.control_get_position(
control=self.control_class,
title=f'ahk_id {self.window._ahk_id}',
blocking=blocking,
detect_hidden_windows=True,
title_match_mode=(1, 'Fast'),
)
def __repr__(self) -> str:
return f'<{self.__class__.__name__} window={self.window!r}, control_hwnd={self.hwnd!r}, control_class={self.control_class!r}>'
|
2cdbc4faae1e64299f23bb69337b50a01588da47
|
5b2fddc5fbe199735a7cfd8de62bd890381955ba
|
/tensorflow_quantum/core/ops/math_ops/simulate_mps_test.py
|
114706d64d9ddb276b0fc37439ba9904d147f2d4
|
[
"Apache-2.0"
] |
permissive
|
tensorflow/quantum
|
d4c17cb8821c832dcf0b194658a44caf6405422e
|
f56257bceb988b743790e1e480eac76fd036d4ff
|
refs/heads/master
| 2023-08-19T03:07:10.092102
| 2023-07-05T22:01:24
| 2023-07-05T22:01:24
| 238,772,762
| 1,799
| 564
|
Apache-2.0
| 2023-07-13T12:37:47
| 2020-02-06T19:58:35
|
Python
|
UTF-8
|
Python
| false
| false
| 46,614
|
py
|
simulate_mps_test.py
|
# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests that specifically target simulate_mps."""
# Remove PYTHONPATH collisions for protobuf.
# pylint: disable=wrong-import-position
import sys
NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x]
sys.path = NEW_PATH
# pylint: enable=wrong-import-position
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
import cirq
import cirq_google
import sympy
from scipy import stats
from tensorflow_quantum.core.ops import batch_util
from tensorflow_quantum.core.ops.math_ops import simulate_mps
from tensorflow_quantum.python import util
def _make_1d_circuit(qubits, depth):
"""Create a 1d ladder circuit."""
even_pairs = list(zip(qubits[::2], qubits[1::2]))
odd_pairs = list(zip(qubits[1::2], qubits[2::2]))
ret = cirq.Circuit()
for _ in range(depth):
# return ret
ret += [(cirq.Y(q)**np.random.random()) for q in qubits]
ret += [
cirq_google.SycamoreGate()(q0, q1)**np.random.random()
for q0, q1 in even_pairs
]
ret += [(cirq.Y(q)**np.random.random()) for q in qubits]
ret += [
cirq_google.SycamoreGate()(q1, q0)**np.random.random()
for q0, q1 in odd_pairs
]
return ret
class SimulateMPS1DExpectationTest(tf.test.TestCase):
"""Tests mps_1d_expectation."""
def test_simulate_mps_1d_expectation_inputs(self):
"""Makes sure that the op fails gracefully on bad inputs."""
n_qubits = 5
batch_size = 5
symbol_names = ['alpha']
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit_batch = [
cirq.Circuit(
cirq.X(qubits[0])**sympy.Symbol(symbol_names[0]),
cirq.Z(qubits[1]),
cirq.CNOT(qubits[2], qubits[3]),
cirq.Y(qubits[4])**sympy.Symbol(symbol_names[0]),
) for _ in range(batch_size)
]
resolver_batch = [{symbol_names[0]: 0.123} for _ in range(batch_size)]
symbol_values_array = np.array(
[[resolver[symbol]
for symbol in symbol_names]
for resolver in resolver_batch])
pauli_sums = util.random_pauli_sums(qubits, 3, batch_size)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'programs must be rank 1'):
# Circuit tensor has too many dimensions.
simulate_mps.mps_1d_expectation(
util.convert_to_tensor([circuit_batch]), symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]))
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'symbol_names must be rank 1.'):
# symbol_names tensor has too many dimensions.
simulate_mps.mps_1d_expectation(
util.convert_to_tensor(circuit_batch), np.array([symbol_names]),
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]))
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'symbol_values must be rank 2.'):
# symbol_values_array tensor has too many dimensions.
simulate_mps.mps_1d_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
np.array([symbol_values_array]),
util.convert_to_tensor([[x] for x in pauli_sums]))
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'symbol_values must be rank 2.'):
# symbol_values_array tensor has too few dimensions.
simulate_mps.mps_1d_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array[0],
util.convert_to_tensor([[x] for x in pauli_sums]))
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'pauli_sums must be rank 2.'):
# pauli_sums tensor has too few dimensions.
simulate_mps.mps_1d_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array, util.convert_to_tensor(list(pauli_sums)))
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'pauli_sums must be rank 2.'):
# pauli_sums tensor has too many dimensions.
simulate_mps.mps_1d_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor([[[x]] for x in pauli_sums]))
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'Unparseable proto'):
# circuit tensor has the right type but invalid values.
simulate_mps.mps_1d_expectation(
['junk'] * batch_size, symbol_names, symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]))
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'Could not find symbol in parameter map'):
# symbol_names tensor has the right type but invalid values.
simulate_mps.mps_1d_expectation(
util.convert_to_tensor(circuit_batch), ['junk'],
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]))
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'qubits not found in circuit'):
# pauli_sums tensor has the right type but invalid values.
new_qubits = [cirq.GridQubit(5, 5), cirq.GridQubit(9, 9)]
new_pauli_sums = util.random_pauli_sums(new_qubits, 2, batch_size)
simulate_mps.mps_1d_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in new_pauli_sums]))
with self.assertRaisesRegex(TypeError, 'Cannot convert'):
# circuits tensor has the wrong type.
simulate_mps.mps_1d_expectation(
[1.0] * batch_size, symbol_names, symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]))
with self.assertRaisesRegex(TypeError, 'Cannot convert'):
# symbol_names tensor has the wrong type.
simulate_mps.mps_1d_expectation(
util.convert_to_tensor(circuit_batch), [0.1234],
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]))
with self.assertRaisesRegex(tf.errors.UnimplementedError, ''):
# symbol_values tensor has the wrong type.
simulate_mps.mps_1d_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
[['junk']] * batch_size,
util.convert_to_tensor([[x] for x in pauli_sums]))
with self.assertRaisesRegex(TypeError, 'Cannot convert'):
# pauli_sums tensor has the wrong type.
simulate_mps.mps_1d_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array, [[1.0]] * batch_size)
with self.assertRaisesRegex(TypeError, 'missing'):
# we are missing an argument.
# pylint: disable=no-value-for-parameter
simulate_mps.mps_1d_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array)
# pylint: enable=no-value-for-parameter
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'at least minimum 4'):
# pylint: disable=too-many-function-args
simulate_mps.mps_1d_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]), 1)
with self.assertRaisesRegex(TypeError, 'Expected int'):
# bond_dim should be int.
simulate_mps.mps_1d_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]), [])
with self.assertRaisesRegex(TypeError, 'positional arguments'):
# pylint: disable=too-many-function-args
simulate_mps.mps_1d_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]), 1, [])
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='do not match'):
# wrong op size.
simulate_mps.mps_1d_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums
][:int(batch_size * 0.5)]))
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='do not match'):
# wrong symbol_values size.
simulate_mps.mps_1d_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array[:int(batch_size * 0.5)],
util.convert_to_tensor([[x] for x in pauli_sums]))
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='cirq.Channel'):
# attempting to use noisy circuit.
noisy_circuit = cirq.Circuit(cirq.depolarize(0.3).on_each(*qubits))
simulate_mps.mps_1d_expectation(
util.convert_to_tensor([noisy_circuit for _ in pauli_sums]),
symbol_names, symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]))
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='not in 1D topology'):
# attempting to use a circuit not in 1D topology
# 0--1--2--3
# \-4
circuit_not_1d = cirq.Circuit(
cirq.X(qubits[0])**sympy.Symbol(symbol_names[0]),
cirq.Z(qubits[1])**sympy.Symbol(symbol_names[0]),
cirq.CNOT(qubits[2], qubits[3]),
cirq.CNOT(qubits[2], qubits[4]),
)
simulate_mps.mps_1d_expectation(
util.convert_to_tensor([circuit_not_1d for _ in pauli_sums]),
symbol_names, symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]))
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='not in 1D topology'):
# attempting to use a circuit in 1D topology, which looks in 2D.
# 0--1
# \-2-\
# 3--4 == 1--0--2--4--3
circuit_not_1d = cirq.Circuit(
cirq.CNOT(qubits[0], qubits[1]),
cirq.CNOT(qubits[0], qubits[2]),
cirq.CNOT(qubits[2], qubits[4]),
cirq.CNOT(qubits[3], qubits[4]),
)
simulate_mps.mps_1d_expectation(
util.convert_to_tensor([circuit_not_1d for _ in pauli_sums]),
symbol_names, symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]))
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='Found: 3 qubit gate'):
# attempting to use 3 qubit gate
three_qb_circuit = cirq.Circuit(
cirq.ISWAP(qubits[0], qubits[1]).controlled_by(qubits[2]),
cirq.X.on_each(*qubits))
simulate_mps.mps_1d_expectation(
util.convert_to_tensor([three_qb_circuit for _ in pauli_sums]),
symbol_names, symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]))
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='minimum 3 qubits'):
# too few qubits.
circuit_small = cirq.Circuit(cirq.X(qubits[0]), cirq.X(qubits[1]),
cirq.X(qubits[2]))
small_pauli = cirq.Z(qubits[0])
simulate_mps.mps_1d_expectation(
util.convert_to_tensor([circuit_small for _ in pauli_sums]),
symbol_names, symbol_values_array,
util.convert_to_tensor([[small_pauli] for _ in pauli_sums]))
def test_simulate_mps_1d_expectation_simple(self):
"""Makes sure that the op shows the same result with Cirq."""
n_qubits = 5
batch_size = 5
symbol_names = ['alpha']
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit_batch = [
cirq.Circuit(
cirq.X(qubits[0])**sympy.Symbol(symbol_names[0]),
cirq.Z(qubits[1]),
cirq.CNOT(qubits[2], qubits[3]),
cirq.Y(qubits[4])**sympy.Symbol(symbol_names[0]),
) for _ in range(batch_size)
]
resolver_batch = [{symbol_names[0]: 0.123} for _ in range(batch_size)]
symbol_values_array = np.array(
[[resolver[symbol]
for symbol in symbol_names]
for resolver in resolver_batch])
pauli_sums = [
cirq.Z(qubits[0]) * cirq.X(qubits[4]) for _ in range(batch_size)
]
cirq_result = [
cirq.Simulator().simulate_expectation_values(c, p, r)
for c, p, r in zip(circuit_batch, pauli_sums, resolver_batch)
]
# Default bond_dim=4
mps_result = simulate_mps.mps_1d_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]))
# Expected value of 0.349...
self.assertAllClose(mps_result, cirq_result)
def test_complex_equality(self):
"""Check moderate sized 1d random circuits."""
batch_size = 10
qubits = cirq.GridQubit.rect(1, 8)
circuit_batch = [_make_1d_circuit(qubits, 3) for _ in range(batch_size)]
pauli_sums = [[
cirq.Z(qubits[0]),
cirq.Z(qubits[-1]),
cirq.Z(qubits[0]) * cirq.Z(qubits[-1]),
cirq.Z(qubits[0]) + cirq.Z(qubits[-1])
] for _ in range(batch_size)]
symbol_names = []
resolver_batch = [{} for _ in range(batch_size)]
symbol_values_array = np.array(
[[resolver[symbol]
for symbol in symbol_names]
for resolver in resolver_batch])
cirq_result = [
cirq.Simulator().simulate_expectation_values(c, p, r)
for c, p, r in zip(circuit_batch, pauli_sums, resolver_batch)
]
mps_result = simulate_mps.mps_1d_expectation(
util.convert_to_tensor(circuit_batch),
symbol_names,
symbol_values_array,
util.convert_to_tensor(pauli_sums),
bond_dim=32)
self.assertAllClose(mps_result, cirq_result, atol=1e-4)
def test_correctness_empty(self):
"""Tests the mps op with empty circuits."""
empty_circuit = tf.raw_ops.Empty(shape=(0,), dtype=tf.string)
empty_symbols = tf.raw_ops.Empty(shape=(0,), dtype=tf.string)
empty_values = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.float32)
empty_paulis = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.string)
out = simulate_mps.mps_1d_expectation(empty_circuit, empty_symbols,
empty_values, empty_paulis, 32)
self.assertShapeEqual(np.zeros((0, 0)), out)
class SimulateMPS1DSamplesTest(tf.test.TestCase, parameterized.TestCase):
"""Tests tfq_simulate_mps1d_samples."""
def test_simulate_mps1d_samples_inputs(self):
"""Make sure the sample op fails gracefully on bad inputs."""
n_qubits = 5
num_samples = 10
batch_size = 5
symbol_names = ['alpha']
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit_batch = [
cirq.Circuit(
cirq.X(qubits[0])**sympy.Symbol(symbol_names[0]),
cirq.Z(qubits[1]),
cirq.CNOT(qubits[2], qubits[3]),
cirq.Y(qubits[4])**sympy.Symbol(symbol_names[0]),
) for _ in range(batch_size)
]
resolver_batch = [{symbol_names[0]: 0.123} for _ in range(batch_size)]
symbol_values_array = np.array(
[[resolver[symbol]
for symbol in symbol_names]
for resolver in resolver_batch])
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'rank 1. Got rank 2'):
# programs tensor has the wrong shape.
simulate_mps.mps_1d_sample(util.convert_to_tensor([circuit_batch]),
symbol_names, symbol_values_array,
[num_samples])
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'rank 1. Got rank 2'):
# symbol_names tensor has the wrong shape.
simulate_mps.mps_1d_sample(util.convert_to_tensor(circuit_batch),
np.array([symbol_names]),
symbol_values_array, [num_samples])
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'rank 2. Got rank 3'):
# symbol_values tensor has the wrong shape.
simulate_mps.mps_1d_sample(util.convert_to_tensor(circuit_batch),
symbol_names,
np.array([symbol_values_array]),
[num_samples])
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'rank 2. Got rank 1'):
# symbol_values tensor has the wrong shape 2.
simulate_mps.mps_1d_sample(util.convert_to_tensor(circuit_batch),
symbol_names, symbol_values_array[0],
[num_samples])
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'rank 1. Got rank 2'):
# num_samples tensor has the wrong shape.
simulate_mps.mps_1d_sample(util.convert_to_tensor(circuit_batch),
symbol_names, symbol_values_array,
[[num_samples]])
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'Could not find symbol in parameter map'):
# symbol_names tensor has the right type, but invalid value.
simulate_mps.mps_1d_sample(util.convert_to_tensor(circuit_batch),
['junk'], symbol_values_array,
[num_samples])
with self.assertRaisesRegex(TypeError, 'Cannot convert'):
# programs tensor has the wrong type.
simulate_mps.mps_1d_sample([1] * batch_size, symbol_names,
symbol_values_array, [num_samples])
with self.assertRaisesRegex(TypeError, 'Cannot convert'):
# programs tensor has the wrong type.
simulate_mps.mps_1d_sample(util.convert_to_tensor(circuit_batch),
[1], symbol_values_array, [num_samples])
with self.assertRaisesRegex(tf.errors.UnimplementedError,
'Cast string to float is not supported'):
# programs tensor has the wrong type.
simulate_mps.mps_1d_sample(util.convert_to_tensor(circuit_batch),
symbol_names, [['junk']] * batch_size,
[num_samples])
with self.assertRaisesRegex(Exception, 'junk'):
# num_samples tensor has the wrong shape.
simulate_mps.mps_1d_sample(util.convert_to_tensor(circuit_batch),
symbol_names, symbol_values_array,
['junk'])
with self.assertRaisesRegex(TypeError, 'missing'):
# too few tensors.
# pylint: disable=no-value-for-parameter
simulate_mps.mps_1d_sample(util.convert_to_tensor(circuit_batch),
symbol_names, symbol_values_array)
# pylint: enable=no-value-for-parameter
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='do not match'):
# wrong symbol_values size.
simulate_mps.mps_1d_sample(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array[:int(batch_size * 0.5)], num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='cirq.Channel'):
# attempting to use noisy circuit.
noisy_circuit = cirq.Circuit(cirq.depolarize(0.3).on_each(*qubits))
simulate_mps.mps_1d_sample(
util.convert_to_tensor([noisy_circuit for _ in circuit_batch]),
symbol_names, symbol_values_array, [num_samples])
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'at least minimum 4'):
# pylint: disable=too-many-function-args
simulate_mps.mps_1d_sample(util.convert_to_tensor(circuit_batch),
symbol_names, symbol_values_array,
[num_samples], 1)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='not in 1D topology'):
# attempting to use a circuit not in 1D topology
# 0--1--2--3
# \-4
circuit_not_1d = cirq.Circuit(
cirq.X(qubits[0])**sympy.Symbol(symbol_names[0]),
cirq.Z(qubits[1])**sympy.Symbol(symbol_names[0]),
cirq.CNOT(qubits[2], qubits[3]),
cirq.CNOT(qubits[2], qubits[4]),
)
simulate_mps.mps_1d_sample(
util.convert_to_tensor([circuit_not_1d for _ in circuit_batch]),
symbol_names, symbol_values_array, [num_samples])
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='not in 1D topology'):
# attempting to use a circuit in 1D topology, which looks in 2D.
# 0--1
# \-2-\
# 3--4 == 1--0--2--4--3
circuit_not_1d = cirq.Circuit(
cirq.CNOT(qubits[0], qubits[1]),
cirq.CNOT(qubits[0], qubits[2]),
cirq.CNOT(qubits[2], qubits[4]),
cirq.CNOT(qubits[3], qubits[4]),
)
simulate_mps.mps_1d_sample(
util.convert_to_tensor([circuit_not_1d for _ in circuit_batch]),
symbol_names, symbol_values_array, [num_samples])
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='minimum 3 qubits'):
# too few qubits.
circuit_small = cirq.Circuit(cirq.X(qubits[0]), cirq.X(qubits[1]),
cirq.X(qubits[2]))
simulate_mps.mps_1d_sample(
util.convert_to_tensor([circuit_small for _ in circuit_batch]),
symbol_names, symbol_values_array, [num_samples])
@parameterized.parameters([
{
'all_n_qubits': [4, 5],
'n_samples': 10
},
{
'all_n_qubits': [4, 5, 8],
'n_samples': 10
},
])
def test_sampling_output_padding(self, all_n_qubits, n_samples):
"""Check that the sampling ops pad outputs correctly"""
op = simulate_mps.mps_1d_sample
circuits = []
expected_outputs = []
for n_qubits in all_n_qubits:
expected_outputs.append(np.ones((n_samples, n_qubits)))
circuits.append(
cirq.Circuit(cirq.X.on_each(*cirq.GridQubit.rect(1, n_qubits))))
results = op(util.convert_to_tensor(circuits), [], [[]] * len(circuits),
[n_samples]).to_list()
for a, b in zip(expected_outputs, results):
self.assertAllClose(a, b)
def test_ghz_state(self):
"""Test a simple GHZ-like state."""
op = simulate_mps.mps_1d_sample
qubits = cirq.GridQubit.rect(1, 6)
circuit = cirq.Circuit(cirq.I.on_each(*qubits))
circuit += [
cirq.X(qubits[0]),
cirq.H(qubits[1]),
cirq.CNOT(qubits[1], qubits[2])
]
circuit_batch = [circuit]
resolver_batch = [cirq.ParamResolver({})]
n_samples = 1000
cirq_samples = batch_util.batch_sample(circuit_batch, resolver_batch,
n_samples, cirq.Simulator())
op_samples = np.array(
op(util.convert_to_tensor(circuit_batch), [], [[]], [n_samples],
bond_dim=16).to_list())
self.assertAllClose(np.mean(op_samples, axis=1),
np.mean(cirq_samples, axis=1),
atol=1e-1)
def test_sampling_fuzz(self):
"""Compare sampling with tfq ops and Cirq."""
op = simulate_mps.mps_1d_sample
batch_size = 10
n_qubits = 6
qubits = cirq.GridQubit.rect(1, n_qubits)
symbol_names = []
n_samples = 10_000
circuit_batch = [_make_1d_circuit(qubits, 1) for _ in range(batch_size)]
resolver_batch = [cirq.ParamResolver({}) for _ in range(batch_size)]
symbol_values_array = np.array(
[[resolver[symbol]
for symbol in symbol_names]
for resolver in resolver_batch])
op_samples = np.array(
op(util.convert_to_tensor(circuit_batch),
symbol_names,
symbol_values_array, [n_samples],
bond_dim=16).to_list())
op_histograms = [
np.histogram(
sample.dot(1 << np.arange(sample.shape[-1] - 1, -1, -1)),
range=(0, 2**len(qubits)),
bins=2**len(qubits))[0] for sample in op_samples
]
cirq_samples = batch_util.batch_sample(circuit_batch, resolver_batch,
n_samples, cirq.Simulator())
cirq_histograms = [
np.histogram(
sample.dot(1 << np.arange(sample.shape[-1] - 1, -1, -1)),
range=(0, 2**len(qubits)),
bins=2**len(qubits))[0] for sample in cirq_samples
]
for a, b in zip(op_histograms, cirq_histograms):
self.assertLess(stats.entropy(a + 1e-8, b + 1e-8), 0.05)
class SimulateMPS1DSampledExpectationTest(tf.test.TestCase):
"""Tests tfq_simulate_mps1d_sampled_expectation."""
def test_simulate_mps1d_sampled_expectation_inputs(self):
"""Make sure sampled expectation op fails gracefully on bad inputs."""
n_qubits = 5
batch_size = 5
symbol_names = ['alpha']
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit_batch = [
cirq.Circuit(
cirq.X(qubits[0])**sympy.Symbol(symbol_names[0]),
cirq.Z(qubits[1]),
cirq.CNOT(qubits[2], qubits[3]),
cirq.Y(qubits[4])**sympy.Symbol(symbol_names[0]),
) for _ in range(batch_size)
]
resolver_batch = [{symbol_names[0]: 0.123} for _ in range(batch_size)]
symbol_values_array = np.array(
[[resolver[symbol]
for symbol in symbol_names]
for resolver in resolver_batch])
pauli_sums = util.random_pauli_sums(qubits, 3, batch_size)
num_samples = [[10]] * batch_size
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'programs must be rank 1'):
# Circuit tensor has too many dimensions.
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor([circuit_batch]), symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'symbol_names must be rank 1.'):
# symbol_names tensor has too many dimensions.
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor(circuit_batch), np.array([symbol_names]),
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'symbol_values must be rank 2.'):
# symbol_values_array tensor has too many dimensions.
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
np.array([symbol_values_array]),
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'symbol_values must be rank 2.'):
# symbol_values_array tensor has too few dimensions.
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array[0],
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'pauli_sums must be rank 2.'):
# pauli_sums tensor has too few dimensions.
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor(circuit_batch),
symbol_names, symbol_values_array,
util.convert_to_tensor(list(pauli_sums)), num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'pauli_sums must be rank 2.'):
# pauli_sums tensor has too many dimensions.
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
[util.convert_to_tensor([[x] for x in pauli_sums])],
num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'num_samples must be rank 2'):
# num_samples tensor has the wrong shape.
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]),
[num_samples])
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'num_samples must be rank 2'):
# num_samples tensor has the wrong shape.
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]),
num_samples[0])
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'Unparseable proto'):
# circuit tensor has the right type but invalid values.
simulate_mps.mps_1d_sampled_expectation(
['junk'] * batch_size, symbol_names, symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'Could not find symbol in parameter map'):
# symbol_names tensor has the right type but invalid values.
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor(circuit_batch), ['junk'],
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'qubits not found in circuit'):
# pauli_sums tensor has the right type but invalid values.
new_qubits = [cirq.GridQubit(5, 5), cirq.GridQubit(9, 9)]
new_pauli_sums = util.random_pauli_sums(new_qubits, 2, batch_size)
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in new_pauli_sums]),
num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'Unparseable proto'):
# pauli_sums tensor has the right type but invalid values 2.
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array, [['junk']] * batch_size, num_samples)
with self.assertRaisesRegex(TypeError, 'Cannot convert'):
# circuits tensor has the wrong type.
simulate_mps.mps_1d_sampled_expectation(
[1.0] * batch_size, symbol_names, symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(TypeError, 'Cannot convert'):
# symbol_names tensor has the wrong type.
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor(circuit_batch), [0.1234],
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(tf.errors.UnimplementedError, ''):
# symbol_values tensor has the wrong type.
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
[['junk']] * batch_size,
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(TypeError, 'Cannot convert'):
# pauli_sums tensor has the wrong type.
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array, [[1.0]] * batch_size, num_samples)
with self.assertRaisesRegex(TypeError, 'missing'):
# we are missing an argument.
# pylint: disable=no-value-for-parameter
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array, num_samples)
# pylint: enable=no-value-for-parameter
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='do not match'):
# wrong op size.
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor([cirq.Circuit()]), symbol_names,
symbol_values_array.astype(np.float64),
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'minimum 4'):
# pylint: disable=too-many-function-args
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor(circuit_batch),
symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]),
num_samples,
bond_dim=-10)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='do not match'):
# wrong symbol_values size.
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array[:int(batch_size * 0.5)],
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='cirq.Channel'):
# attempting to use noisy circuit.
noisy_circuit = cirq.Circuit(cirq.depolarize(0.3).on_each(*qubits))
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor([noisy_circuit for _ in pauli_sums]),
symbol_names, symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'at least minimum 4'):
# pylint: disable=too-many-function-args
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples,
1)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='not in 1D topology'):
# attempting to use a circuit not in 1D topology
# 0--1--2--3
# \-4
circuit_not_1d = cirq.Circuit(
cirq.X(qubits[0])**sympy.Symbol(symbol_names[0]),
cirq.Z(qubits[1])**sympy.Symbol(symbol_names[0]),
cirq.CNOT(qubits[2], qubits[3]),
cirq.CNOT(qubits[2], qubits[4]),
)
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor([circuit_not_1d for _ in pauli_sums]),
symbol_names, symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='not in 1D topology'):
# attempting to use a circuit in 1D topology, which looks in 2D.
# 0--1
# \-2-\
# 3--4 == 1--0--2--4--3
circuit_not_1d = cirq.Circuit(
cirq.CNOT(qubits[0], qubits[1]),
cirq.CNOT(qubits[0], qubits[2]),
cirq.CNOT(qubits[2], qubits[4]),
cirq.CNOT(qubits[3], qubits[4]),
)
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor([circuit_not_1d for _ in pauli_sums]),
symbol_names, symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='minimum 3 qubits'):
# too few qubits.
circuit_small = cirq.Circuit(cirq.X(qubits[0]), cirq.X(qubits[1]),
cirq.X(qubits[2]))
small_pauli = cirq.Z(qubits[0])
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor([circuit_small for _ in pauli_sums]),
symbol_names, symbol_values_array,
util.convert_to_tensor([[small_pauli] for _ in pauli_sums]),
num_samples)
def test_simulate_sampled_mps_1d_expectation_simple(self):
"""Makes sure that the op shows the same result with Cirq."""
n_qubits = 5
batch_size = 5
symbol_names = ['alpha']
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit_batch = [
cirq.Circuit(
cirq.X(qubits[0])**sympy.Symbol(symbol_names[0]),
cirq.Z(qubits[1]),
cirq.CNOT(qubits[2], qubits[3]),
cirq.Y(qubits[4])**sympy.Symbol(symbol_names[0]),
) for _ in range(batch_size)
]
resolver_batch = [{symbol_names[0]: 0.123} for _ in range(batch_size)]
symbol_values_array = np.array(
[[resolver[symbol]
for symbol in symbol_names]
for resolver in resolver_batch])
pauli_sums = [
cirq.Z(qubits[0]) * cirq.X(qubits[4]) for _ in range(batch_size)
]
num_samples = np.ones(shape=(len(pauli_sums), 1)) * 10000
cirq_result = [
cirq.Simulator().simulate_expectation_values(c, p, r)
for c, p, r in zip(circuit_batch, pauli_sums, resolver_batch)
]
# Default bond_dim=4
mps_result = simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
# Expected value of 0.349...
self.assertAllClose(mps_result, cirq_result, atol=5e-2)
def test_complex_equality(self):
"""Check moderate sized 1d random circuits."""
batch_size = 10
qubits = cirq.GridQubit.rect(1, 8)
circuit_batch = [_make_1d_circuit(qubits, 3) for _ in range(batch_size)]
pauli_sums = [[
cirq.Z(qubits[0]),
cirq.Z(qubits[-1]),
cirq.Z(qubits[0]) * cirq.Z(qubits[-1]),
cirq.Z(qubits[0]) + cirq.Z(qubits[-1])
] for _ in range(batch_size)]
symbol_names = []
resolver_batch = [{} for _ in range(batch_size)]
# Because `pauli_sums` has inhomogeneous shape due to the different
# number of terms, `np.ones_like` failed with `pauli_sums`.
puali_sums_len = [[len(x) for x in y] for y in pauli_sums]
num_samples = np.ones_like(puali_sums_len, dtype=int) * 1000
symbol_values_array = np.array(
[[resolver[symbol]
for symbol in symbol_names]
for resolver in resolver_batch])
cirq_result = [
cirq.Simulator().simulate_expectation_values(c, p, r)
for c, p, r in zip(circuit_batch, pauli_sums, resolver_batch)
]
mps_result = simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor(circuit_batch),
symbol_names,
symbol_values_array,
util.convert_to_tensor(pauli_sums),
num_samples,
bond_dim=32)
self.assertAllClose(mps_result, cirq_result, atol=2e-1)
def test_correctness_empty(self):
"""Tests the mps op with empty circuits."""
empty_circuit = tf.raw_ops.Empty(shape=(0,), dtype=tf.string)
empty_symbols = tf.raw_ops.Empty(shape=(0,), dtype=tf.string)
empty_values = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.float32)
empty_paulis = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.string)
num_samples = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.int32)
out = simulate_mps.mps_1d_sampled_expectation(empty_circuit,
empty_symbols,
empty_values,
empty_paulis, num_samples,
32)
self.assertShapeEqual(np.zeros((0, 0)), out)
class InputTypesTest(tf.test.TestCase, parameterized.TestCase):
"""Tests that different inputs types work for all of the ops. """
@parameterized.parameters([
{
'symbol_type': tf.float32
},
{
'symbol_type': tf.float64
},
{
'symbol_type': tf.int32
},
{
'symbol_type': tf.int64
},
{
'symbol_type': tf.complex64
},
])
def test_symbol_values_type(self, symbol_type):
"""Tests all three ops for the different types. """
qubits = cirq.GridQubit.rect(1, 5)
circuits = util.convert_to_tensor(
[cirq.Circuit(cirq.H.on_each(*qubits))])
symbol_names = ['symbol']
symbol_values = tf.convert_to_tensor([[1]], dtype=symbol_type)
pauli_sums = util.random_pauli_sums(qubits, 3, 1)
pauli_sums = util.convert_to_tensor([[x] for x in pauli_sums])
result = simulate_mps.mps_1d_expectation(circuits, symbol_names,
symbol_values, pauli_sums)
self.assertDTypeEqual(result, np.float32)
result = simulate_mps.mps_1d_sample(circuits, symbol_names,
symbol_values, [100])
self.assertDTypeEqual(result.numpy(), np.int8)
result = simulate_mps.mps_1d_sampled_expectation(
circuits, symbol_names, symbol_values, pauli_sums, [[100]])
self.assertDTypeEqual(result, np.float32)
if __name__ == "__main__":
tf.test.main()
|
fe7447b40ebb83002542e07261f3720e480d6464
|
0e92203844a29b8c36d2c289ef0658204f80c127
|
/utils/Base_Logging.py
|
7a64668576b349824c8c0c03d6b89f648117d674
|
[
"MIT"
] |
permissive
|
qxf2/qxf2-page-object-model
|
b61ae2bd77cb2e6b97db991707945779c6254224
|
17b9d6095b881c6e9f25f8a467d90fc4bb6cef91
|
refs/heads/master
| 2023-08-08T08:30:10.739019
| 2023-07-27T14:15:07
| 2023-07-27T14:15:07
| 77,039,202
| 263
| 197
|
MIT
| 2023-07-27T14:15:08
| 2016-12-21T09:50:23
|
Python
|
UTF-8
|
Python
| false
| false
| 4,351
|
py
|
Base_Logging.py
|
"""
Qxf2 Services: A plug-n-play class for logging.
This class wraps around Python's loguru module.
"""
import os, inspect
import logging
from loguru import logger
from pytest_reportportal import RPLogger, RPLogHandler
class Base_Logging():
"A plug-n-play class for logging"
def __init__(self,log_file_name=None,level="DEBUG",format="{time:YYYY-MM-DD HH:mm:ss} | {level} | {module} | {message}"):
"Constructor for the logging class"
self.log_file_name=log_file_name
self.log_file_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','log'))
self.level=level
self.format=format
self.set_log(self.log_file_name,self.level,self.format)
self.rp_logger = None
def set_log(self,log_file_name,level,format,test_module_name=None):
"Add an handler sending log messages to a sink"
if test_module_name is None:
test_module_name = self.get_calling_module()
if not os.path.exists(self.log_file_dir):
os.makedirs(self.log_file_dir)
if log_file_name is None:
log_file_name = self.log_file_dir + os.sep + test_module_name + '.log'
else:
log_file_name = self.log_file_dir + os.sep + log_file_name
logger.add(log_file_name,level=level,format=format,
rotation="30 days", filter=None, colorize=None, serialize=False, backtrace=True, enqueue=False, catch=True)
def get_calling_module(self):
"Get the name of the calling module"
calling_file = inspect.stack()[-1][1]
if 'runpy' in calling_file:
calling_file = inspect.stack()[4][1]
calling_filename = calling_file.split(os.sep)
#This logic bought to you by windows + cygwin + git bash
if len(calling_filename) == 1: #Needed for
calling_filename = calling_file.split('/')
self.calling_module = calling_filename[-1].split('.')[0]
return self.calling_module
def setup_rp_logging(self, rp_pytest_service):
"Setup reportportal logging"
try:
# Setting up a logging.
logging.setLoggerClass(RPLogger)
self.rp_logger = logging.getLogger(__name__)
self.rp_logger.setLevel(logging.INFO)
# Create handler for Report Portal.
rp_handler = RPLogHandler(rp_pytest_service)
# Set INFO level for Report Portal handler.
rp_handler.setLevel(logging.INFO)
return self.rp_logger
except Exception as e:
self.write("Exception when trying to set rplogger")
self.write(str(e))
self.exceptions.append("Error when setting up the reportportal logger")
def write(self,msg,level='info'):
"Write out a message"
#fname = inspect.stack()[2][3] #May be use a entry-exit decorator instead
all_stack_frames = inspect.stack()
for stack_frame in all_stack_frames[1:]:
if 'Base_Page' not in stack_frame[1]:
break
fname = stack_frame[3]
d = {'caller_func': fname}
if self.rp_logger:
if level.lower()== 'debug':
self.rp_logger.debug(msg=msg)
elif level.lower()== 'info':
self.rp_logger.info(msg)
elif level.lower()== 'warn' or level.lower()=='warning':
self.rp_logger.warning(msg)
elif level.lower()== 'error':
self.rp_logger.error(msg)
elif level.lower()== 'critical':
self.rp_logger.critical(msg)
else:
self.rp_logger.critical(msg)
return
if level.lower()== 'debug':
logger.debug("{module} | {msg}",module=d['caller_func'],msg=msg)
elif level.lower()== 'info':
logger.info("{module} | {msg}",module=d['caller_func'],msg=msg)
elif level.lower()== 'warn' or level.lower()=='warning':
logger.warning("{module} | {msg}",module=d['caller_func'],msg=msg)
elif level.lower()== 'error':
logger.error("{module} | {msg}",module=d['caller_func'],msg=msg)
elif level.lower()== 'critical':
logger.critical("{module} | {msg}",module=d['caller_func'],msg=msg)
else:
logger.critical("Unknown level passed for the msg: {}", msg)
|
73813c9f937e6cdda3d1ddaee258abbee2708065
|
6415c13547e6943f7b65337cbd2790c4e18723c8
|
/netbox/virtualization/apps.py
|
1b6b110dfb408aa7a346977671a3a70262a9065a
|
[
"Apache-2.0"
] |
permissive
|
netbox-community/netbox
|
287254a9698270d51f57b1297118e9f01536da5a
|
506884bc4dc70299db3e2a7ad577dd7fd808065e
|
refs/heads/develop
| 2023-08-24T09:11:46.685121
| 2023-08-23T18:44:14
| 2023-08-23T18:44:14
| 52,796,596
| 8,122
| 1,817
|
Apache-2.0
| 2023-09-14T18:16:01
| 2016-02-29T14:15:46
|
Python
|
UTF-8
|
Python
| false
| false
| 154
|
py
|
apps.py
|
from django.apps import AppConfig
class VirtualizationConfig(AppConfig):
name = 'virtualization'
def ready(self):
from . import search
|
cd0418294c59fa821b63d31dac65567a4d52abcd
|
c43b5835b4499f4e6d6fa4efda9546dc67ae0767
|
/sfepy/base/goptions.py
|
4475ab7b7d74db917b64f1686ce8fdf8e6c1bc12
|
[
"BSD-3-Clause"
] |
permissive
|
sfepy/sfepy
|
4b74e7839b5e7b5e8d90e19ab6e90a068fe33df4
|
0c2d1690e764b601b2687be1e4261b82207ca366
|
refs/heads/master
| 2023-09-04T22:07:28.041123
| 2023-08-28T14:47:50
| 2023-08-28T14:47:50
| 802,525
| 651
| 188
|
BSD-3-Clause
| 2023-09-12T07:28:19
| 2010-07-28T09:14:41
|
Python
|
UTF-8
|
Python
| false
| false
| 1,614
|
py
|
goptions.py
|
"""
Various global options/parameters.
Notes
-----
Inspired by rcParams of matplotlib.
"""
from __future__ import absolute_import
import six
def validate_bool(val):
"""
Convert b to a boolean or raise a ValueError.
"""
if type(val) is str:
val = val.lower()
if val in ('t', 'y', 'yes', 'on', 'true', '1', 1, True):
return True
elif val in ('f', 'n', 'no', 'off', 'false', '0', 0, False):
return False
else:
raise ValueError('Could not convert "%s" to boolean!' % val)
default_goptions = {
'verbose' : [True, validate_bool],
'check_term_finiteness' : [False, validate_bool],
}
class ValidatedDict(dict):
"""
A dictionary object including validation.
"""
validate = dict([(key, validator) for key, (default, validator) in \
six.iteritems(default_goptions)])
def __setitem__(self, key, val):
try:
cval = self.validate[key](val)
dict.__setitem__(self, key, cval)
except KeyError:
raise KeyError('%s is not a valid option.'
' See goptions.keys() for a list of valid'
' options.' % (key,))
def keys(self):
"""
Return sorted list of keys.
"""
ks = dict.keys(self)
ks.sort()
return ks
def values(self):
"""
Return values in order of sorted keys.
"""
return [self[key] for key in self.keys()]
goptions = ValidatedDict([(key, val[0])
for key, val in six.iteritems(default_goptions)])
|
f8d7cc3fc2cff02bbd32ddcb8e219d79334bc2bb
|
cce6cf621ee444b0e87a4c9cd86f81265ef8a21a
|
/test/test_source_dummy.py
|
0558ecbce59d0542c0bfcdd9f0511cfc122ae3d1
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
girder/large_image
|
2f2ee5ad65a4158e7a7e46bd404788a9b46c2e0d
|
955c25d7e3ba2860ad4529a287f0ef8d172b70d3
|
refs/heads/master
| 2023-08-31T05:31:31.010468
| 2023-08-30T19:25:00
| 2023-08-30T19:25:00
| 45,569,214
| 164
| 41
|
Apache-2.0
| 2023-09-14T18:54:38
| 2015-11-04T21:39:10
|
Python
|
UTF-8
|
Python
| false
| false
| 694
|
py
|
test_source_dummy.py
|
import large_image_source_dummy
import large_image
def testDummyTileSource():
source = large_image_source_dummy.open()
tileMetadata = source.getMetadata()
assert tileMetadata['tileWidth'] == 0
assert tileMetadata['tileHeight'] == 0
assert tileMetadata['sizeX'] == 0
assert tileMetadata['sizeY'] == 0
assert tileMetadata['levels'] == 0
assert tileMetadata['magnification'] is None
assert tileMetadata['mm_x'] is None
assert tileMetadata['mm_y'] is None
assert source.getTile(0, 0, 0) == b''
def testGetDummyTileSource():
source = large_image.open('large_image://dummy')
assert isinstance(source, large_image_source_dummy.DummyTileSource)
|
f4429e55523f12110b2b1748267fed4153a5a07e
|
45ba55b4fbdaf1657fde92beaeba4f173265afcd
|
/strawberry/channels/__init__.py
|
455513babb337d84e3d238b96464cbbed2516a52
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
strawberry-graphql/strawberry
|
af96afd4edd1788c59e150597a12501fbc7bf444
|
6d86d1c08c1244e00535840d9d87925431bc6a1c
|
refs/heads/main
| 2023-08-30T03:34:12.929874
| 2023-08-24T12:01:09
| 2023-08-24T12:01:09
| 162,690,887
| 3,408
| 529
|
MIT
| 2023-09-14T21:49:44
| 2018-12-21T08:56:55
|
Python
|
UTF-8
|
Python
| false
| false
| 671
|
py
|
__init__.py
|
from .handlers.base import ChannelsConsumer, ChannelsWSConsumer
from .handlers.graphql_transport_ws_handler import GraphQLTransportWSHandler
from .handlers.graphql_ws_handler import GraphQLWSHandler
from .handlers.http_handler import (
ChannelsRequest,
GraphQLHTTPConsumer,
SyncGraphQLHTTPConsumer,
)
from .handlers.ws_handler import GraphQLWSConsumer
from .router import GraphQLProtocolTypeRouter
__all__ = [
"ChannelsConsumer",
"ChannelsRequest",
"ChannelsWSConsumer",
"GraphQLProtocolTypeRouter",
"GraphQLWSHandler",
"GraphQLTransportWSHandler",
"GraphQLHTTPConsumer",
"GraphQLWSConsumer",
"SyncGraphQLHTTPConsumer",
]
|
8391278f5f7708a78cd9efaddb745f0e75994f52
|
010279e2ba272d09e9d2c4e903722e5faba2cf7a
|
/library/python/pytest/config.py
|
703e442c0b2e7f10d69d21e2067ad19ba7a46c7b
|
[
"Apache-2.0"
] |
permissive
|
catboost/catboost
|
854c1a1f439a96f1ae6b48e16644be20aa04dba2
|
f5042e35b945aded77b23470ead62d7eacefde92
|
refs/heads/master
| 2023-09-01T12:14:14.174108
| 2023-09-01T10:01:01
| 2023-09-01T10:22:12
| 97,556,265
| 8,012
| 1,425
|
Apache-2.0
| 2023-09-11T03:32:32
| 2017-07-18T05:29:04
|
Python
|
UTF-8
|
Python
| false
| false
| 116
|
py
|
config.py
|
_test_mode = [False]
def is_test_mode():
return _test_mode[0]
def set_test_mode():
_test_mode[0] = True
|
bdebadf40c3b5a771788a555a0f4f449e4c16c2d
|
8287c1677795d23856edefecaf5e878f348e99ba
|
/biostar/planet/urls.py
|
2c764eb4075b53c73ac5df6c2a7362394a9fd806
|
[
"MIT"
] |
permissive
|
ialbert/biostar-central
|
ba325593d6b3a9e2b1ebaddb6257b863b22eface
|
a051511350871dcd82bdf0b88ce5cda9fd9ef141
|
refs/heads/master
| 2023-08-30T06:07:44.892831
| 2023-07-24T15:43:02
| 2023-07-24T15:43:02
| 1,511,294
| 535
| 271
|
MIT
| 2023-02-15T18:49:10
| 2011-03-22T13:09:30
|
Python
|
UTF-8
|
Python
| false
| false
| 453
|
py
|
urls.py
|
from django.urls import path, include
from biostar.planet import views
from django.contrib import admin
planet_patterns = [
path('', views.blog_list, name="blog_list"),
path(r'view/<int:id>/', views.blog_view, name="blog_view"),
path(r'bump/<int:id>/', views.blog_bump, name="blog_bump"),
]
urlpatterns = [
# Get the reset/ urls
path(r'', include(planet_patterns)),
# Add admin urls.
path('admin/', admin.site.urls),
]
|
bbba50073006a47fd6e0ef5b1d05815ef23faa1f
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/ai/mmdetection/mmdet/models/detectors/dab_detr.py
|
b61301cf6660924f0832f4068841a4664797c585
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 5,907
|
py
|
dab_detr.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, Tuple
from mmengine.model import uniform_init
from torch import Tensor, nn
from mmdet.registry import MODELS
from ..layers import SinePositionalEncoding
from ..layers.transformer import (DABDetrTransformerDecoder,
DABDetrTransformerEncoder, inverse_sigmoid)
from .detr import DETR
@MODELS.register_module()
class DABDETR(DETR):
r"""Implementation of `DAB-DETR:
Dynamic Anchor Boxes are Better Queries for DETR.
<https://arxiv.org/abs/2201.12329>`_.
Code is modified from the `official github repo
<https://github.com/IDEA-Research/DAB-DETR>`_.
Args:
with_random_refpoints (bool): Whether to randomly initialize query
embeddings and not update them during training.
Defaults to False.
num_patterns (int): Inspired by Anchor-DETR. Defaults to 0.
"""
def __init__(self,
*args,
with_random_refpoints: bool = False,
num_patterns: int = 0,
**kwargs) -> None:
self.with_random_refpoints = with_random_refpoints
assert isinstance(num_patterns, int), \
f'num_patterns should be int but {num_patterns}.'
self.num_patterns = num_patterns
super().__init__(*args, **kwargs)
def _init_layers(self) -> None:
"""Initialize layers except for backbone, neck and bbox_head."""
self.positional_encoding = SinePositionalEncoding(
**self.positional_encoding)
self.encoder = DABDetrTransformerEncoder(**self.encoder)
self.decoder = DABDetrTransformerDecoder(**self.decoder)
self.embed_dims = self.encoder.embed_dims
self.query_dim = self.decoder.query_dim
self.query_embedding = nn.Embedding(self.num_queries, self.query_dim)
if self.num_patterns > 0:
self.patterns = nn.Embedding(self.num_patterns, self.embed_dims)
num_feats = self.positional_encoding.num_feats
assert num_feats * 2 == self.embed_dims, \
f'embed_dims should be exactly 2 times of num_feats. ' \
f'Found {self.embed_dims} and {num_feats}.'
def init_weights(self) -> None:
"""Initialize weights for Transformer and other components."""
super(DABDETR, self).init_weights()
if self.with_random_refpoints:
uniform_init(self.query_embedding)
self.query_embedding.weight.data[:, :2] = \
inverse_sigmoid(self.query_embedding.weight.data[:, :2])
self.query_embedding.weight.data[:, :2].requires_grad = False
def pre_decoder(self, memory: Tensor) -> Tuple[Dict, Dict]:
"""Prepare intermediate variables before entering Transformer decoder,
such as `query`, `query_pos`.
Args:
memory (Tensor): The output embeddings of the Transformer encoder,
has shape (bs, num_feat_points, dim).
Returns:
tuple[dict, dict]: The first dict contains the inputs of decoder
and the second dict contains the inputs of the bbox_head function.
- decoder_inputs_dict (dict): The keyword args dictionary of
`self.forward_decoder()`, which includes 'query', 'query_pos',
'memory' and 'reg_branches'.
- head_inputs_dict (dict): The keyword args dictionary of the
bbox_head functions, which is usually empty, or includes
`enc_outputs_class` and `enc_outputs_class` when the detector
support 'two stage' or 'query selection' strategies.
"""
batch_size = memory.size(0)
query_pos = self.query_embedding.weight
query_pos = query_pos.unsqueeze(0).repeat(batch_size, 1, 1)
if self.num_patterns == 0:
query = query_pos.new_zeros(batch_size, self.num_queries,
self.embed_dims)
else:
query = self.patterns.weight[:, None, None, :]\
.repeat(1, self.num_queries, batch_size, 1)\
.view(-1, batch_size, self.embed_dims)\
.permute(1, 0, 2)
query_pos = query_pos.repeat(1, self.num_patterns, 1)
decoder_inputs_dict = dict(
query_pos=query_pos, query=query, memory=memory)
head_inputs_dict = dict()
return decoder_inputs_dict, head_inputs_dict
def forward_decoder(self, query: Tensor, query_pos: Tensor, memory: Tensor,
memory_mask: Tensor, memory_pos: Tensor) -> Dict:
"""Forward with Transformer decoder.
Args:
query (Tensor): The queries of decoder inputs, has shape
(bs, num_queries, dim).
query_pos (Tensor): The positional queries of decoder inputs,
has shape (bs, num_queries, dim).
memory (Tensor): The output embeddings of the Transformer encoder,
has shape (bs, num_feat_points, dim).
memory_mask (Tensor): ByteTensor, the padding mask of the memory,
has shape (bs, num_feat_points).
memory_pos (Tensor): The positional embeddings of memory, has
shape (bs, num_feat_points, dim).
Returns:
dict: The dictionary of decoder outputs, which includes the
`hidden_states` and `references` of the decoder output.
"""
hidden_states, references = self.decoder(
query=query,
key=memory,
query_pos=query_pos,
key_pos=memory_pos,
key_padding_mask=memory_mask,
reg_branches=self.bbox_head.
fc_reg # iterative refinement for anchor boxes
)
head_inputs_dict = dict(
hidden_states=hidden_states, references=references)
return head_inputs_dict
|
adc36e1094ebbca42e2bee65708e6e0252750d7c
|
29dfa1deefc72493d1b1eecf1a8df62e24599a77
|
/tests/vfs/extent.py
|
07b1d61c5e9935d5dbfa952fac98e7b8ed0e59fb
|
[
"Apache-2.0"
] |
permissive
|
log2timeline/dfvfs
|
fd301eaf721a9945641a44ff722aec963158a6b3
|
28756d910e951a22c5f0b2bcf5184f055a19d544
|
refs/heads/main
| 2023-08-07T22:45:45.432668
| 2023-07-30T12:17:56
| 2023-07-30T12:17:56
| 23,820,144
| 197
| 65
|
Apache-2.0
| 2023-07-30T12:17:58
| 2014-09-09T05:06:44
|
Python
|
UTF-8
|
Python
| false
| false
| 438
|
py
|
extent.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the VFS extent."""
import unittest
from dfvfs.vfs import extent
from tests import test_lib as shared_test_lib
class ExtentTest(shared_test_lib.BaseTestCase):
"""Tests the VFS extent."""
def testInitialize(self):
"""Test the __init__ function."""
test_extent = extent.Extent()
self.assertIsNotNone(test_extent)
if __name__ == '__main__':
unittest.main()
|
ebe33e4afb705b108c487c95a304287f23bdcd8f
|
2d5f297ec3274ce93f1f5592d5b80c2605f8edc5
|
/pydiffvg_tensorflow/image.py
|
18eb1e6b66ae077b1c9d4b534a5fce250fe3958a
|
[
"Apache-2.0"
] |
permissive
|
BachiLi/diffvg
|
9ec3e3e7b3674c82ca42b18fe49c69991c076370
|
6f60468bfdef5b9fec8cc3fa47b441dc2720eefc
|
refs/heads/master
| 2023-06-21T18:49:09.604301
| 2023-06-13T17:16:46
| 2023-06-13T17:16:46
| 292,727,955
| 747
| 134
|
Apache-2.0
| 2023-06-13T17:16:47
| 2020-09-04T02:23:40
|
Python
|
UTF-8
|
Python
| false
| false
| 700
|
py
|
image.py
|
import numpy as np
import skimage
import skimage.io
import os
def imwrite(img, filename, gamma = 2.2, normalize = False):
directory = os.path.dirname(filename)
if directory != '' and not os.path.exists(directory):
os.makedirs(directory)
if not isinstance(img, np.ndarray):
img = img.numpy()
if normalize:
img_rng = np.max(img) - np.min(img)
if img_rng > 0:
img = (img - np.min(img)) / img_rng
img = np.clip(img, 0.0, 1.0)
if img.ndim==2:
#repeat along the third dimension
img=np.expand_dims(img,2)
img[:, :, :3] = np.power(img[:, :, :3], 1.0/gamma)
skimage.io.imsave(filename, (img * 255).astype(np.uint8))
|
d90ff704d333c3ec9fea045ed8d972b9595786cb
|
b310854c11812430cb434eaa34643c7758f4f8a1
|
/ulid/codec.py
|
e6afec80e18cbf99fbad9f9f8e5f1883b3e59285
|
[
"Apache-2.0"
] |
permissive
|
ahawker/ulid
|
90ca46d2a729cda7d15b50210c0246c3cc703167
|
06289583e9de4286b4d80b4ad000d137816502ca
|
refs/heads/master
| 2023-04-02T02:19:25.846467
| 2021-09-06T18:46:12
| 2021-09-06T18:54:40
| 93,703,838
| 560
| 35
|
Apache-2.0
| 2023-03-14T11:26:18
| 2017-06-08T03:30:51
|
Python
|
UTF-8
|
Python
| false
| false
| 4,268
|
py
|
codec.py
|
"""
ulid/codec
~~~~~~~~~~
Defines encoding/decoding functions for ULID data representations.
"""
import datetime
import typing
from . import base32, hints, ulid
#: Type hint that defines multiple primitive types that can represent
#: a Unix timestamp in seconds.
TimestampPrimitive = typing.Union[hints.Primitive, # pylint: disable=invalid-name
datetime.datetime, ulid.Timestamp, ulid.ULID]
#: Type hint that defines multiple primitive types that can represent randomness.
RandomnessPrimitive = typing.Union[hints.Primitive, ulid.Randomness, ulid.ULID] # pylint: disable=invalid-name
def decode_timestamp(timestamp: TimestampPrimitive) -> ulid.Timestamp:
"""
Create a new :class:`~ulid.ulid.ULID` instance using a timestamp value of a supported type.
The following types are supported for timestamp values:
* :class:`~datetime.datetime`
* :class:`~int`
* :class:`~float`
* :class:`~str`
* :class:`~memoryview`
* :class:`~ulid.ulid.Timestamp`
* :class:`~ulid.ulid.ULID`
* :class:`~bytes`
* :class:`~bytearray`
:param timestamp: Unix timestamp in seconds
:type timestamp: See docstring for types
:return: ULID using given timestamp and new randomness
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is an unsupported type
:raises ValueError: when the value is a string and cannot be Base32 decoded
:raises ValueError: when the value is or was converted to something 48 bits
"""
if isinstance(timestamp, datetime.datetime):
timestamp = timestamp.timestamp()
if isinstance(timestamp, (int, float)):
timestamp = int(timestamp * 1000.0).to_bytes(6, byteorder='big')
elif isinstance(timestamp, str):
timestamp = base32.decode_timestamp(timestamp)
elif isinstance(timestamp, memoryview):
timestamp = timestamp.tobytes()
elif isinstance(timestamp, ulid.Timestamp):
timestamp = timestamp.bytes
elif isinstance(timestamp, ulid.ULID):
timestamp = timestamp.timestamp().bytes
if not isinstance(timestamp, (bytes, bytearray)):
raise ValueError('Expected datetime, int, float, str, memoryview, Timestamp, ULID, '
'bytes, or bytearray; got {}'.format(type(timestamp).__name__))
length = len(timestamp)
if length != 6:
raise ValueError('Expects timestamp to be 48 bits; got {} bytes'.format(length))
return ulid.Timestamp(timestamp)
def decode_randomness(randomness: RandomnessPrimitive) -> ulid.Randomness:
"""
Create a new :class:`~ulid.ulid.Randomness` instance using the given randomness value of a supported type.
The following types are supported for randomness values:
* :class:`~int`
* :class:`~float`
* :class:`~str`
* :class:`~memoryview`
* :class:`~ulid.ulid.Randomness`
* :class:`~ulid.ulid.ULID`
* :class:`~bytes`
* :class:`~bytearray`
:param randomness: Random bytes
:type randomness: See docstring for types
:return: ULID using new timestamp and given randomness
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is an unsupported type
:raises ValueError: when the value is a string and cannot be Base32 decoded
:raises ValueError: when the value is or was converted to something 80 bits
"""
if isinstance(randomness, (int, float)):
randomness = int(randomness).to_bytes(10, byteorder='big')
elif isinstance(randomness, str):
randomness = base32.decode_randomness(randomness)
elif isinstance(randomness, memoryview):
randomness = randomness.tobytes()
elif isinstance(randomness, ulid.Randomness):
randomness = randomness.bytes
elif isinstance(randomness, ulid.ULID):
randomness = randomness.randomness().bytes
if not isinstance(randomness, (bytes, bytearray)):
raise ValueError('Expected int, float, str, memoryview, Randomness, ULID, '
'bytes, or bytearray; got {}'.format(type(randomness).__name__))
length = len(randomness)
if length != 10:
raise ValueError('Expects randomness to be 80 bits; got {} bytes'.format(length))
return ulid.Randomness(randomness)
|
ae2de339a5f2d94eb50c90348959b1266e2a9930
|
2337351b228818e41be3002bd38f68f77c2aa074
|
/sa/models/action.py
|
0dcd17ad065a460fe68d0086a399bafef9171374
|
[
"BSD-3-Clause"
] |
permissive
|
nocproject/noc
|
57d40c680a1499374463e472434f9595ed6d1374
|
6e6d71574e9b9d822bec572cc629a0ea73604a59
|
refs/heads/master
| 2023-08-31T01:11:33.544573
| 2023-08-30T17:31:11
| 2023-08-30T17:31:11
| 107,815,776
| 105
| 33
|
BSD-3-Clause
| 2023-07-31T07:57:45
| 2017-10-21T21:04:33
|
Python
|
UTF-8
|
Python
| false
| false
| 7,363
|
py
|
action.py
|
# ----------------------------------------------------------------------
# Action
# ----------------------------------------------------------------------
# Copyright (C) 2007-2021 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
import re
import threading
import operator
from typing import Any, Dict
# Third-party modules
from mongoengine.document import Document, EmbeddedDocument
from mongoengine.fields import (
StringField,
UUIDField,
IntField,
BooleanField,
ListField,
EmbeddedDocumentField,
)
import jinja2
import cachetools
# NOC modules
from noc.core.text import quote_safe_path
from noc.core.prettyjson import to_json
from noc.core.ip import IP
from noc.core.model.decorator import on_delete_check
id_lock = threading.Lock()
class ActionParameter(EmbeddedDocument):
name = StringField()
type = StringField(
choices=[
("int", "int"),
("float", "float"),
("str", "str"),
("interface", "interface"),
("ip", "ip"),
("vrf", "vrf"),
]
)
description = StringField()
is_required = BooleanField(default=True)
default = StringField()
def __str__(self):
return self.name
@property
def json_data(self) -> Dict[str, Any]:
r = {
"name": self.name,
"type": self.type,
"description": self.description,
"is_required": self.is_required,
}
if self.default is not None:
r["default"] = self.default
return r
@on_delete_check(
check=[
("sa.ActionCommands", "action"),
("fm.AlarmDiagnosticConfig", "on_clear_action"),
("fm.AlarmDiagnosticConfig", "periodic_action"),
("fm.AlarmDiagnosticConfig", "on_raise_action"),
]
)
class Action(Document):
meta = {
"collection": "noc.actions",
"strict": False,
"auto_create_index": False,
"json_collection": "sa.actions",
}
uuid = UUIDField(unique=True)
name = StringField(unique=True)
label = StringField()
description = StringField()
access_level = IntField(default=15)
# Optional handler for non-sa actions
handler = StringField()
#
params = ListField(EmbeddedDocumentField(ActionParameter))
_id_cache = cachetools.TTLCache(1000, ttl=60)
def __str__(self):
return self.name
@classmethod
@cachetools.cachedmethod(operator.attrgetter("_id_cache"), lock=lambda _: id_lock)
def get_by_id(cls, id):
return Action.objects.filter(id=id).first()
def get_json_path(self) -> str:
return "%s.json" % quote_safe_path(self.name)
@property
def json_data(self) -> Dict[str, Any]:
r = {
"name": self.name,
"$collection": self._meta["json_collection"],
"uuid": self.uuid,
"label": self.label,
"description": self.description,
"access_level": self.access_level,
}
if self.handler:
r["handler"] = self.handler
r["params"] = [c.json_data for c in self.params]
return r
def to_json(self) -> str:
return to_json(
self.json_data,
order=[
"name",
"$collection",
"uuid",
"label",
"description",
"access_level",
"handler",
"params",
],
)
def get_commands(self, obj):
"""
Returns ActionCommands instance or None
:param obj: Managed Object
"""
from .actioncommands import ActionCommands
for ac in ActionCommands.objects.filter(action=self, profile=obj.profile.id).order_by(
"preference"
):
if not ac.match:
return ac
for m in ac.match:
if (
not m.platform_re
or (obj.platform and re.search(m.platform_re, obj.platform.name))
) and (
not m.version_re
or (obj.version and re.search(m.version_re, obj.version.version))
):
return ac
return None
def expand_ex(self, obj, **kwargs):
ac = self.get_commands(obj)
if not ac:
return None, None
# Render template
loader = jinja2.DictLoader({"tpl": ac.commands})
env = jinja2.Environment(loader=loader)
template = env.get_template("tpl")
return ac, template.render(**self.clean_args(obj, **kwargs))
def expand(self, obj, **kwargs):
return self.expand_ex(obj, **kwargs)[1]
def execute(self, obj, **kwargs):
"""
Execute commands
"""
ac, commands = self.expand_ex(obj, **kwargs)
if commands is None:
return None
# Execute rendered commands
if ac.config_mode:
return obj.scripts.configure(commands=commands)
else:
return obj.scripts.commands(commands=commands)
def clean_args(self, obj, **kwargs):
args = {}
for p in self.params:
if p.name not in kwargs and p.is_required and not p.default:
raise ValueError("Required parameter '%s' is missed" % p.name)
v = kwargs.get(p.name, p.default)
if v is None:
continue
if p.type == "int":
# Integer type
try:
v = int(v)
except ValueError:
raise ValueError("Invalid integer in parameter '%s': '%s'" % (p.name, v))
elif p.type == "float":
# Float type
try:
v = float(v)
except ValueError:
raise ValueError("Invalid float in parameter '%s': '%s'" % (p.name, v))
elif p.type == "interface":
# Interface
try:
v = obj.get_profile().convert_interface_name(v)
except Exception:
raise ValueError("Invalid interface name in parameter '%s': '%s'" % (p.name, v))
elif p.type == "ip":
# IP address
try:
v = IP.prefix(v)
except ValueError:
raise ValueError("Invalid ip in parameter '%s': '%s'" % (p.name, v))
elif p.type == "vrf":
if isinstance(v, VRF):
pass
elif isinstance(v, int):
try:
v = VRF.objects.get(id=v)
except VRF.DoesNotExist:
raise ValueError("Unknown VRF in parameter '%s': '%s'" % (p.name, v))
elif isinstance(v, str):
try:
v = VRF.objects.get(name=v)
except VRF.DoesNotExist:
raise ValueError("Unknown VRF in parameter '%s': '%s'" % (p.name, v))
else:
raise ValueError("Unknown VRF in parameter '%s': '%s'" % (p.name, v))
args[str(p.name)] = v
return args
#
from noc.ip.models.vrf import VRF
|
18a13f23b8aba1b725c5c89372c26293ce604fda
|
ffdc77394c5b5532b243cf3c33bd584cbdc65cb7
|
/tests/st/ops/test_ops_maxpool_with_argmax_v2.py
|
8d90562132875e6fbab8d311318e78f8ee006b90
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"MPL-1.0",
"OpenSSL",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause-Open-MPI",
"MIT",
"MPL-2.0-no-copyleft-exception",
"NTP",
"BSD-3-Clause",
"GPL-1.0-or-later",
"0BSD",
"MPL-2.0",
"LicenseRef-scancode-free-unknown",
"AGPL-3.0-only",
"Libpng",
"MPL-1.1",
"IJG",
"GPL-2.0-only",
"BSL-1.0",
"Zlib",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-python-cwi",
"BSD-2-Clause",
"LicenseRef-scancode-gary-s-brown",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"LicenseRef-scancode-mit-nagy",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
] |
permissive
|
mindspore-ai/mindspore
|
ca7d5bb51a3451c2705ff2e583a740589d80393b
|
54acb15d435533c815ee1bd9f6dc0b56b4d4cf83
|
refs/heads/master
| 2023-07-29T09:17:11.051569
| 2023-07-17T13:14:15
| 2023-07-17T13:14:15
| 239,714,835
| 4,178
| 768
|
Apache-2.0
| 2023-07-26T22:31:11
| 2020-02-11T08:43:48
|
C++
|
UTF-8
|
Python
| false
| false
| 5,622
|
py
|
test_ops_maxpool_with_argmax_v2.py
|
# Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore as ms
import mindspore.ops.operations as P
from mindspore import context, Tensor
from mindspore.nn import Cell
from mindspore import dtype as mstype
class MaxPoolWithArgmaxV2Net(Cell):
def __init__(self, kernel_size, strides, pads, dilation, ceil_mode, argmax_type=mstype.int64):
super(MaxPoolWithArgmaxV2Net, self).__init__()
self.maxpool_with_argmax_v2 = P.MaxPoolWithArgmaxV2(kernel_size, strides, pads, dilation, ceil_mode,
argmax_type)
def construct(self, input_data):
output, argmax = self.maxpool_with_argmax_v2(input_data)
return output, argmax
class DynamicShapeMaxPoolWithArgmaxV2Net(Cell):
def __init__(self, net, axis=0):
super(DynamicShapeMaxPoolWithArgmaxV2Net, self).__init__()
self.net = net
self.unique = P.Unique()
self.gather = P.Gather()
self.axis = axis
def construct(self, x, indices):
unique_indices, _ = self.unique(indices)
x = self.gather(x, unique_indices, self.axis)
return self.net(x)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_maxpool_with_argmax_v2_float16(mode):
"""
Feature: Test MaxPoolWithArgmaxV2.
Description: Test MaxPoolWithArgmaxV2 with float16 inputs.
Expectation: success.
"""
ms.set_context(mode=mode)
attributes = {'kernel_size': (3, 2), 'strides': (2, 1), 'pads': 0, 'dilation': 1,
'ceil_mode': False, 'argmax_type': mstype.int64}
x = Tensor(np.arange(20 * 16 * 50 * 32).reshape((20, 16, 50, 32)), mstype.float16)
net = MaxPoolWithArgmaxV2Net(**attributes)
output, argmax = net(x)
assert output.shape == (20, 16, 24, 31)
assert argmax.shape == (20, 16, 24, 31)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_dynamic_maxpool_with_argmax_v2(mode):
"""
Feature: Test MaxPoolWithArgmaxV2.
Description: Test MaxPoolWithArgmaxV2 following Unique and gather ops.
Expectation: success.
"""
ms.set_context(mode=mode)
attributes = {'kernel_size': (3, 2), 'strides': (2, 1), 'pads': 0, 'dilation': 1,
'ceil_mode': False, 'argmax_type': mstype.int64}
x = Tensor(np.arange(20 * 16 * 50 * 32).reshape((20, 16, 50, 32)), mstype.float16)
indices = Tensor(np.array([0, 1, 2, 0]).astype(np.int32))
net = MaxPoolWithArgmaxV2Net(**attributes)
dy_net = DynamicShapeMaxPoolWithArgmaxV2Net(net)
output, argmax = dy_net(x, indices)
assert output.shape == (3, 16, 24, 31)
assert argmax.shape == (3, 16, 24, 31)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_maxpool_with_argmax_v2_dynamic_shape(mode):
"""
Feature: Test MaxPoolWithArgmaxV2.
Description: Test MaxPoolWithArgmaxV2 with dynamic shape.
Expectation: success.
"""
ms.set_context(mode=mode)
attributes = {'kernel_size': (3, 2), 'strides': (2, 1), 'pads': 0, 'dilation': 1,
'ceil_mode': False, 'argmax_type': mstype.int64}
x = Tensor(np.arange(20 * 16 * 50 * 32).reshape((20, 16, 50, 32)), mstype.float16)
x_dyn = Tensor(shape=[None for _ in x.shape], dtype=mstype.float16)
net = MaxPoolWithArgmaxV2Net(**attributes)
net.set_inputs(x_dyn)
output, argmax = net(x)
assert output.shape == (20, 16, 24, 31)
assert argmax.shape == (20, 16, 24, 31)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_maxpool_with_argmax_v2_ceil_mode_true(mode):
"""
Feature: Test MaxPoolWithArgmaxV2.
Description: Test MaxPoolWithArgmaxV2 with `ceil_mode` is True.
Expectation: success.
"""
ms.set_context(mode=mode)
attributes = {'kernel_size': (3, 2), 'strides': (2, 1), 'pads': 0, 'dilation': 1,
'ceil_mode': True, 'argmax_type': mstype.int64}
x = Tensor(np.arange(20 * 16 * 50 * 32).reshape((20, 16, 50, 32)), mstype.float16)
net = MaxPoolWithArgmaxV2Net(**attributes)
output, argmax = net(x)
assert output.shape == (20, 16, 25, 31)
assert argmax.shape == (20, 16, 25, 31)
|
fbb409686e4cab8b88ae3a450d9b06b7fa1ce7d4
|
15f0514701a78e12750f68ba09d68095172493ee
|
/Python3/183.py
|
5902e2b47594d578946ecf4da992a21c232e4630
|
[
"MIT"
] |
permissive
|
strengthen/LeetCode
|
5e38c8c9d3e8f27109b9124ae17ef8a4139a1518
|
3ffa6dcbeb787a6128641402081a4ff70093bb61
|
refs/heads/master
| 2022-12-04T21:35:17.872212
| 2022-11-30T06:23:24
| 2022-11-30T06:23:24
| 155,958,163
| 936
| 365
|
MIT
| 2021-11-15T04:02:45
| 2018-11-03T06:47:38
| null |
UTF-8
|
Python
| false
| false
| 623
|
py
|
183.py
|
__________________________________________________________________________________________________
SELECT Name AS Customers FROM Customers
WHERE Id NOT IN (SELECT CustomerId FROM Orders);
__________________________________________________________________________________________________
SELECT Name AS Customers FROM Customers
LEFT JOIN Orders ON Customers.Id = Orders.CustomerId
WHERE Orders.CustomerId IS NULL;
__________________________________________________________________________________________________
SELECT Name AS Customers FROM Customers c
WHERE NOT EXISTS (SELECT * FROM Orders o WHERE o.CustomerId = c.Id);
|
95f20943d916a1e80e365b35bcb4c0a4fe64f3d4
|
bfb55f5cd85a8516510ad00a3c5f298afadecad3
|
/sc2/unit_command.py
|
00a47406bb53d18ce5d1ac182db7c336526a30af
|
[
"MIT"
] |
permissive
|
BurnySc2/python-sc2
|
b53429a4bc733446e0676b96892577a18c604f00
|
76e4a435732d4359e5bd9e15b6283a0498e212ca
|
refs/heads/develop
| 2023-08-31T12:56:47.734503
| 2023-08-03T11:05:47
| 2023-08-03T11:05:47
| 188,820,422
| 409
| 188
|
MIT
| 2023-08-03T11:04:11
| 2019-05-27T10:13:35
|
Python
|
UTF-8
|
Python
| false
| false
| 1,404
|
py
|
unit_command.py
|
from __future__ import annotations
from typing import TYPE_CHECKING, Tuple, Union
from sc2.constants import COMBINEABLE_ABILITIES
from sc2.ids.ability_id import AbilityId
from sc2.position import Point2
if TYPE_CHECKING:
from sc2.unit import Unit
class UnitCommand:
def __init__(self, ability: AbilityId, unit: Unit, target: Union[Unit, Point2] = None, queue: bool = False):
"""
:param ability:
:param unit:
:param target:
:param queue:
"""
assert ability in AbilityId, f"ability {ability} is not in AbilityId"
assert unit.__class__.__name__ == "Unit", f"unit {unit} is of type {type(unit)}"
assert any(
[
target is None,
isinstance(target, Point2),
unit.__class__.__name__ == "Unit",
]
), f"target {target} is of type {type(target)}"
assert isinstance(queue, bool), f"queue flag {queue} is of type {type(queue)}"
self.ability = ability
self.unit = unit
self.target = target
self.queue = queue
@property
def combining_tuple(self) -> Tuple[AbilityId, Union[Unit, Point2], bool, bool]:
return self.ability, self.target, self.queue, self.ability in COMBINEABLE_ABILITIES
def __repr__(self):
return f"UnitCommand({self.ability}, {self.unit}, {self.target}, {self.queue})"
|
3d2337c780d3704512a860cf292f6e32e8fe3604
|
ca98f0332b773f9b1982118daf94ba75ffeaa90d
|
/src/mygrad/nnet/initializers/__init__.py
|
6c43295347684993430f67e744c55f852dac16b2
|
[
"MIT"
] |
permissive
|
rsokl/MyGrad
|
f5b745b26a01ddda4ff6ce279746c47cb2f021cf
|
133072b526966e235d70bbfcf9eb86d43d0fcfa1
|
refs/heads/master
| 2023-07-09T01:20:42.314017
| 2023-07-03T19:13:30
| 2023-07-03T19:13:30
| 97,431,804
| 186
| 28
|
MIT
| 2023-07-03T19:13:32
| 2017-07-17T03:31:24
|
Python
|
UTF-8
|
Python
| false
| false
| 482
|
py
|
__init__.py
|
from mygrad.tensor_creation.funcs import identity
from .constant import constant
from .dirac import dirac
from .glorot_normal import glorot_normal
from .glorot_uniform import glorot_uniform
from .he_normal import he_normal
from .he_uniform import he_uniform
from .normal import normal
from .uniform import uniform
__all__ = [
"constant",
"dirac",
"glorot_normal",
"glorot_uniform",
"he_normal",
"he_uniform",
"identity",
"normal",
"uniform",
]
|
45f2f1ddeea7bfbe7fdda26893006d103ce52127
|
8d5df43c1611a709ddf19d8b23b8763eb37b4e8f
|
/tests/unit/blocking_connection_tests.py
|
7222223e890f93790bfdb58e04a98d4e020b4d25
|
[
"BSD-3-Clause"
] |
permissive
|
pika/pika
|
86ed56bec6aa813ffd8a7037bbef756a9388533e
|
f4d8f8ff02a4da4653749c86161b7d52e53f73fe
|
refs/heads/main
| 2023-09-03T18:19:30.231575
| 2023-07-28T23:01:02
| 2023-07-29T21:16:38
| 342,869
| 3,040
| 919
|
BSD-3-Clause
| 2023-08-03T21:20:50
| 2009-10-19T23:22:02
|
Python
|
UTF-8
|
Python
| false
| false
| 15,288
|
py
|
blocking_connection_tests.py
|
# -*- coding: utf-8 -*-
"""
Tests for pika.adapters.blocking_connection.BlockingConnection
"""
import sys
import unittest
from unittest import mock
from unittest.mock import patch
import pika
from pika.adapters import blocking_connection
from pika.adapters.utils import nbio_interface
import pika.channel
import pika.exceptions
# Disable protected-access
# pylint: disable=W0212
# Disable missing-docstring
# pylint: disable=C0111
# Disable invalid-name
# pylint: disable=C0103
# Disable no-self-use
# pylint: disable=R0201
class BlockingConnectionMockTemplate(blocking_connection.BlockingConnection):
pass
class SelectConnectionTemplate(
blocking_connection.select_connection.SelectConnection):
is_closed = None
is_closing = None
is_open = None
_channels = None
ioloop = None
_transport = None
_get_write_buffer_size = None
class BlockingConnectionTests(unittest.TestCase):
"""TODO: test properties"""
@patch.object(
blocking_connection.select_connection,
'SelectConnection',
spec_set=SelectConnectionTemplate)
def test_constructor(self, _select_connection_class_mock):
with mock.patch.object(blocking_connection.BlockingConnection,
'_create_connection') as _create_connection_mock:
connection = blocking_connection.BlockingConnection('params')
_create_connection_mock.assert_called_once_with('params', None)
connection._impl.add_on_close_callback.assert_called_once_with(
connection._closed_result.set_value_once)
@patch.object(
blocking_connection.select_connection,
'SelectConnection',
spec_set=SelectConnectionTemplate)
def test_process_io_for_connection_setup(self,
select_connection_class_mock):
with mock.patch.object(blocking_connection.BlockingConnection,
'_create_connection'):
connection = blocking_connection.BlockingConnection('params')
mock_connection = select_connection_class_mock.return_value
with mock.patch.object(select_connection_class_mock,
'create_connection',
side_effect=
lambda configs,
on_done,
custom_ioloop: [on_done(mock_connection),
custom_ioloop.close(),
None][2]):
result = connection._create_connection(
None,
select_connection_class_mock)
self.assertIs(result, mock_connection)
@patch.object(
blocking_connection.select_connection,
'SelectConnection',
spec_set=SelectConnectionTemplate)
def test_process_io_for_connection_setup_fails_with_open_error(
self, select_connection_class_mock):
with mock.patch.object(blocking_connection.BlockingConnection,
'_create_connection'):
connection = blocking_connection.BlockingConnection('params')
exc_value = pika.exceptions.AMQPConnectionError('failed')
with mock.patch.object(select_connection_class_mock,
'create_connection',
side_effect=
lambda configs,
on_done,
custom_ioloop: on_done(exc_value)):
with self.assertRaises(pika.exceptions.AMQPConnectionError) as cm:
connection._create_connection(None,
select_connection_class_mock)
self.assertIs(cm.exception, exc_value)
@patch.object(
blocking_connection.select_connection,
'SelectConnection',
spec_set=SelectConnectionTemplate,
is_closed=False)
def test_flush_output(self, select_connection_class_mock):
impl_mock = select_connection_class_mock.return_value
with mock.patch.object(blocking_connection.BlockingConnection,
'_create_connection',
return_value=impl_mock):
connection = blocking_connection.BlockingConnection('params')
get_buffer_size_mock = mock.Mock(
name='_get_write_buffer_size',
side_effect=[100, 50, 0],
spec=nbio_interface.AbstractStreamTransport.get_write_buffer_size)
transport_mock = mock.NonCallableMock(
spec_set=nbio_interface.AbstractStreamTransport)
connection._impl._transport = transport_mock
connection._impl._get_write_buffer_size = get_buffer_size_mock
connection._flush_output(lambda: False, lambda: True)
@patch.object(
blocking_connection.select_connection,
'SelectConnection',
spec_set=SelectConnectionTemplate,
is_closed=False)
def test_flush_output_user_initiated_close(self,
select_connection_class_mock):
impl_mock = select_connection_class_mock.return_value
with mock.patch.object(blocking_connection.BlockingConnection,
'_create_connection',
return_value=impl_mock):
connection = blocking_connection.BlockingConnection('params')
original_exc = pika.exceptions.ConnectionClosedByClient(200, 'success')
connection._closed_result.set_value_once(
impl_mock, original_exc)
connection._flush_output(lambda: False, lambda: True)
self.assertEqual(connection._impl.ioloop.close.call_count, 1)
@patch.object(
blocking_connection.select_connection,
'SelectConnection',
spec_set=SelectConnectionTemplate,
is_closed=False)
def test_flush_output_server_initiated_error_close(
self, select_connection_class_mock):
impl_mock = select_connection_class_mock.return_value
with mock.patch.object(blocking_connection.BlockingConnection,
'_create_connection',
return_value=impl_mock):
connection = blocking_connection.BlockingConnection('params')
original_exc = pika.exceptions.ConnectionClosedByBroker(404,
'not found')
connection._closed_result.set_value_once(
impl_mock, original_exc)
with self.assertRaises(pika.exceptions.ConnectionClosedByBroker) as cm:
connection._flush_output(lambda: False, lambda: True)
self.assertSequenceEqual(cm.exception.args, (404, 'not found'))
self.assertEqual(connection._impl.ioloop.close.call_count, 1)
@patch.object(
blocking_connection.select_connection,
'SelectConnection',
spec_set=SelectConnectionTemplate,
is_closed=False)
def test_flush_output_server_initiated_no_error_close(
self,
select_connection_class_mock):
impl_mock = select_connection_class_mock.return_value
with mock.patch.object(blocking_connection.BlockingConnection,
'_create_connection',
return_value=impl_mock):
connection = blocking_connection.BlockingConnection('params')
original_exc = pika.exceptions.ConnectionClosedByBroker(200, 'ok')
connection._closed_result.set_value_once(
impl_mock,
original_exc)
impl_mock.is_closed = False
with self.assertRaises(pika.exceptions.ConnectionClosed) as cm:
connection._flush_output(lambda: False, lambda: True)
self.assertSequenceEqual(cm.exception.args, (200, 'ok'))
self.assertEqual(connection._impl.ioloop.close.call_count, 1)
@patch.object(
blocking_connection.select_connection,
'SelectConnection',
spec_set=SelectConnectionTemplate)
def test_close(self, select_connection_class_mock):
impl_mock = select_connection_class_mock.return_value
impl_mock.is_closed = False
with mock.patch.object(blocking_connection.BlockingConnection,
'_create_connection',
return_value=impl_mock):
connection = blocking_connection.BlockingConnection('params')
impl_channel_mock = mock.Mock()
connection._impl._channels = {1: impl_channel_mock}
with mock.patch.object(blocking_connection.BlockingConnection,
'_flush_output',
spec_set=connection._flush_output):
connection._closed_result.signal_once()
connection.close(200, 'text')
impl_channel_mock._get_cookie.return_value.close.assert_called_once_with(
200, 'text')
select_connection_class_mock.return_value.close.assert_called_once_with(
200, 'text')
@patch.object(
blocking_connection.select_connection,
'SelectConnection',
spec_set=SelectConnectionTemplate)
def test_close_with_channel_closed_exception(self,
select_connection_class_mock):
impl_mock = select_connection_class_mock.return_value
impl_mock.is_closed = False
with mock.patch.object(blocking_connection.BlockingConnection,
'_create_connection',
return_value=impl_mock):
connection = blocking_connection.BlockingConnection('params')
channel1_mock = mock.Mock(
is_open=True,
close=mock.Mock(
side_effect=pika.exceptions.ChannelClosed(-1, 'Just because'),
spec_set=pika.channel.Channel.close),
spec_set=blocking_connection.BlockingChannel)
channel2_mock = mock.Mock(
is_open=True, spec_set=blocking_connection.BlockingChannel)
connection._impl._channels = {
1:
mock.Mock(
_get_cookie=mock.Mock(
return_value=channel1_mock,
spec_set=pika.channel.Channel._get_cookie),
spec_set=pika.channel.Channel),
2:
mock.Mock(
_get_cookie=mock.Mock(
return_value=channel2_mock,
spec_set=pika.channel.Channel._get_cookie),
spec_set=pika.channel.Channel)
}
with mock.patch.object(blocking_connection.BlockingConnection,
'_flush_output',
spec_set=connection._flush_output):
connection._closed_result.signal_once()
connection.close(200, 'text')
channel1_mock.close.assert_called_once_with(200, 'text')
channel2_mock.close.assert_called_once_with(200, 'text')
impl_mock.close.assert_called_once_with(200, 'text')
@unittest.skipIf(sys.version_info < (3, 8), "mock args differ on 3.7 and earlier")
@patch.object(
blocking_connection.select_connection,
'SelectConnection',
spec_set=SelectConnectionTemplate)
def test_update_secret(self, select_connection_class_mock):
impl_mock = select_connection_class_mock.return_value
impl_mock.is_closed = False
impl_mock.is_open = True
impl_mock._transport = None
with mock.patch.object(blocking_connection.BlockingConnection,
'_create_connection',
return_value=impl_mock):
connection = blocking_connection.BlockingConnection('params')
with mock.patch.object(blocking_connection._CallbackResult,
'is_ready',
return_value=True):
connection.update_secret("new_secret", "reason")
if sys.version_info < (3, 8):
args_0 = select_connection_class_mock.return_value.update_secret.call_args[0]
args_1 = select_connection_class_mock.return_value.update_secret.call_args[1]
args_len = len(select_connection_class_mock.return_value.update_secret.call_args)
else:
args_0 = select_connection_class_mock.return_value.update_secret.call_args.args[0]
args_1 = select_connection_class_mock.return_value.update_secret.call_args.args[1]
args_len = len(select_connection_class_mock.return_value.update_secret.call_args.args)
self.assertEqual(args_0, "new_secret")
self.assertEqual(args_1, "reason")
self.assertEqual(args_len, 3)
@patch.object(
blocking_connection.select_connection,
'SelectConnection',
spec_set=SelectConnectionTemplate)
@patch.object(
blocking_connection,
'BlockingChannel',
spec_set=blocking_connection.BlockingChannel)
def test_channel(
self,
blocking_channel_class_mock, # pylint: disable=W0613
select_connection_class_mock): # pylint: disable=W0613
impl_mock = select_connection_class_mock.return_value
with mock.patch.object(blocking_connection.BlockingConnection,
'_create_connection',
return_value=impl_mock):
connection = blocking_connection.BlockingConnection('params')
with mock.patch.object(blocking_connection.BlockingConnection,
'_flush_output',
spec_set=connection._flush_output):
connection.channel()
@patch.object(
blocking_connection.select_connection,
'SelectConnection',
spec_set=SelectConnectionTemplate)
def test_sleep(self, select_connection_class_mock): # pylint: disable=W0613
with mock.patch.object(blocking_connection.BlockingConnection,
'_create_connection'):
connection = blocking_connection.BlockingConnection('params')
with mock.patch.object(blocking_connection.BlockingConnection,
'_flush_output',
spec_set=connection._flush_output):
connection.sleep(0.00001)
def test_connection_blocked_evt(self):
blocked_buffer = []
frame = pika.frame.Method(0, pika.spec.Connection.Blocked('reason'))
evt = blocking_connection._ConnectionBlockedEvt(
blocked_buffer.append,
frame)
repr(evt)
evt.dispatch()
self.assertEqual(len(blocked_buffer), 1)
self.assertIs(blocked_buffer[0], frame)
def test_connection_unblocked_evt(self):
unblocked_buffer = []
frame = pika.frame.Method(0, pika.spec.Connection.Unblocked())
evt = blocking_connection._ConnectionUnblockedEvt(
unblocked_buffer.append,
frame)
repr(evt)
evt.dispatch()
self.assertEqual(len(unblocked_buffer), 1)
self.assertIs(unblocked_buffer[0], frame)
|
a000eaf1ccae2804554e729ac056907321bafc38
|
9f48878caa37ac5f2ccf938fc476efa47c89c644
|
/src/NumericalAlgorithms/Interpolation/Python/__init__.py
|
0c5191335f7fb7165b3451690f046932386b356f
|
[
"MIT"
] |
permissive
|
sxs-collaboration/spectre
|
34f7733ab4c75dbca2f432028145fed110c9ef24
|
96f573cf158201f712da2bfb3378edf497a35a0d
|
refs/heads/develop
| 2023-08-19T11:18:18.465609
| 2023-08-19T04:24:25
| 2023-08-19T04:24:25
| 87,570,510
| 149
| 190
|
NOASSERTION
| 2023-09-14T20:10:35
| 2017-04-07T17:28:20
|
C++
|
UTF-8
|
Python
| false
| false
| 227
|
py
|
__init__.py
|
# Distributed under the MIT License.
# See LICENSE.txt for details.
from ._Pybindings import *
Irregular = {1: Irregular1D, 2: Irregular2D, 3: Irregular3D}
RegularGrid = {1: RegularGrid1D, 2: RegularGrid2D, 3: RegularGrid3D}
|
3651d7b786063bedae92a6d7cc96dafb70172271
|
8294fbe3389bebe56c42f958c87e0482c412887d
|
/2017/May/3_data/rjmessibarca.py
|
c95c76a45fdda798063163e1eb0d9674b034cd56
|
[] |
no_license
|
py-study-group/challenges
|
8717376d1f44c4846d05052e8446b647f84c26f6
|
9129f48fb7c61fb5610c16e8da427149f0a9ed0a
|
refs/heads/master
| 2021-01-20T08:37:14.224261
| 2018-02-28T16:27:52
| 2018-02-28T16:27:52
| 90,166,445
| 133
| 70
| null | 2018-02-28T10:22:04
| 2017-05-03T15:48:15
|
Python
|
UTF-8
|
Python
| false
| false
| 4,040
|
py
|
rjmessibarca.py
|
"""
The subquestions have been answered in various functions. To get the answer of a question, simply type the function in
last line
The various available functions are:
1) plotting() : This function plots all the data of the crime rates
2) unusual_developments_in_crime() : This function finds the type of crime which has the highest difference in
its maximum and min value over the years
3) regression_murder(year): This function applies linear regression on crimes of the type "murder" and can be used
to predict the expected murder rates in the "year" parameter of the function, which you give as an input.
It also shows a plot of the linear regression line and prints the expected murder rate in the year mentioned on
the screen.
"""
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import style
import numpy as np
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
style.use('bmh')
def get_data_frames():
l = [0, 1, 2]
for i in range(24, 38):
l.append(i)
df = pd.read_excel('crime_table.xls', skiprows=l).iloc[:20]
df.set_value(7, 'Year', 2001)
df.set_value(18, 'Year', 2012)
df.set_index('Year', inplace=True)
df.drop(df.columns[[19, 20, 21, 22]], axis=1,
inplace=True) # axis=1 means we are referring to a col not a row
x = []
df.rename(columns={'Population1': 'Population'}, inplace=True)
crime_rate_df = df.copy(deep=True)
population_df = df['Population']
for i in range(1, 10):
x.append(2 * i)
crime_rate_df.drop(crime_rate_df.columns[x], axis=1, inplace=True)
crime_rate_df.drop(crime_rate_df.columns[[0]], axis=1, inplace=True)
indexes = list(crime_rate_df.index.values)
for i in range(len(indexes)):
crime_rate_df.iloc[i] = (crime_rate_df.iloc[i] / population_df.iloc[i]) * 100000 # the crime rate is wrt
# 100,000 as base population
total_crime_df = crime_rate_df.mean(axis=1) # axis =1 means row wise and axis = 0 means col wise
perc_change = crime_rate_df.copy(deep=True)
perc_change = (perc_change / perc_change.iloc[0] - 1) * 100
total_crime_perc_change = perc_change.mean(axis=1)
return df, crime_rate_df, population_df, perc_change, total_crime_perc_change, total_crime_df
def plotting(): # plots the various crime rates
fig = plt.figure()
ax1 = plt.subplot2grid((1, 1), (0, 0))
perc_change.plot(ax=ax1, linewidth=3)
total_crime_perc_change.plot(ax=ax1, linewidth=10, color='black', label='mean crime rate')
ax1.legend(loc="lower left")
plt.title("Percentage change in crime rates compared to starting year")
plt.show()
def unusual_developments_in_crime():
max_min = pd.DataFrame()
max_min['max'] = crime_rate_df.max(axis=0)
max_min['min'] = crime_rate_df.min(axis=0)
max_min['abs'] = max_min['max'] - max_min['min']
max_abs = max_min['abs'].max(axis=0)
crime_type_max_change = max_min[max_min['abs'] == max_abs].index.tolist()
print("%s has the highest change in max and min value of the crime with difference being %s" % (
crime_type_max_change[0], max_abs))
def regression_murder(year): # applies linear regression on murder rates
murder = pd.DataFrame()
dates = crime_rate_df.index.values.tolist()
murder['label'] = crime_rate_df['Murder and\nnonnegligent \nmanslaughter']
prediction_size = int(0.1 * len(murder))
X = np.array(dates)
y = np.array(murder['label'])
y.reshape((len(X), 1))
y_train = y[:-prediction_size]
X_train = X[:-prediction_size]
clf = LinearRegression()
clf.fit(X_train.reshape(-1, 1), y_train)
regression_line = [clf.predict(X_train[i].reshape(1, -1)) for i in range(len(X_train))]
print(clf.predict(year))
plt.scatter(X_train, y_train)
plt.plot(X_train, regression_line)
plt.show()
df, crime_rate_df, population_df, perc_change, total_crime_perc_change, total_crime_df = get_data_frames()
|
773f9c1b0c1ec2f90a88e6818661bddfaf7c6ef2
|
ffdc77394c5b5532b243cf3c33bd584cbdc65cb7
|
/tests/ut/python/mindrecord/skip_test_mindrecord_shard.py
|
775dbd1d0d644491a157d5ada0d03626e73944ba
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"MPL-1.0",
"OpenSSL",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause-Open-MPI",
"MIT",
"MPL-2.0-no-copyleft-exception",
"NTP",
"BSD-3-Clause",
"GPL-1.0-or-later",
"0BSD",
"MPL-2.0",
"LicenseRef-scancode-free-unknown",
"AGPL-3.0-only",
"Libpng",
"MPL-1.1",
"IJG",
"GPL-2.0-only",
"BSL-1.0",
"Zlib",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-python-cwi",
"BSD-2-Clause",
"LicenseRef-scancode-gary-s-brown",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"LicenseRef-scancode-mit-nagy",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
] |
permissive
|
mindspore-ai/mindspore
|
ca7d5bb51a3451c2705ff2e583a740589d80393b
|
54acb15d435533c815ee1bd9f6dc0b56b4d4cf83
|
refs/heads/master
| 2023-07-29T09:17:11.051569
| 2023-07-17T13:14:15
| 2023-07-17T13:14:15
| 239,714,835
| 4,178
| 768
|
Apache-2.0
| 2023-07-26T22:31:11
| 2020-02-11T08:43:48
|
C++
|
UTF-8
|
Python
| false
| false
| 13,578
|
py
|
skip_test_mindrecord_shard.py
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""test internal shard api"""
import os
import random
from utils import get_data, get_nlp_data, get_mkv_data
from mindspore import log as logger
from mindspore.mindrecord import ShardHeader, SUCCESS
from mindspore.mindrecord import ShardWriter, ShardIndexGenerator, ShardReader, ShardSegment
FILES_NUM = 4
CV_FILE_NAME = "./imagenet.mindrecord"
NLP_FILE_NAME = "./aclImdb.mindrecord"
MKV_FILE_NAME = "./vehPer.mindrecord"
def test_nlp_file_writer():
"""test nlp file writer using shard api"""
schema_json = {"id": {"type": "string"}, "label": {"type": "number"},
"rating": {"type": "number"},
"input_ids": {"type": "array",
"items": {"type": "number"}},
"input_mask": {"type": "array",
"items": {"type": "number"}},
"segment_ids": {"type": "array",
"items": {"type": "number"}}
}
data = list(get_nlp_data("../data/mindrecord/testAclImdbData/pos",
"../data/mindrecord/testAclImdbData/vocab.txt",
10))
header = ShardHeader()
schema = header.build_schema(schema_json, ["segment_ids"], "nlp_schema")
schema_id = header.add_schema(schema)
assert schema_id == 0, 'failed on adding schema'
index_fields_list = ["id", "rating"]
ret = header.add_index_fields(index_fields_list)
assert ret == SUCCESS, 'failed on adding index fields.'
writer = ShardWriter()
paths = ["{}{}".format(NLP_FILE_NAME, x) for x in range(FILES_NUM)]
ret = writer.open(paths)
assert ret == SUCCESS, 'failed on opening files.'
writer.set_header_size(1 << 14)
writer.set_page_size(1 << 15)
ret = writer.set_shard_header(header)
assert ret == SUCCESS, 'failed on setting header.'
ret = writer.write_raw_nlp_data({schema_id: data})
assert ret == SUCCESS, 'failed on writing raw data.'
ret = writer.commit()
assert ret == SUCCESS, 'failed on committing.'
generator = ShardIndexGenerator(os.path.realpath(paths[0]))
generator.build()
generator.write_to_db()
def test_nlp_file_reader():
"""test nlp file reader using shard api"""
dataset = ShardReader()
dataset.open(NLP_FILE_NAME + "0")
dataset.launch()
index = 0
iterator = dataset.get_next()
while iterator:
for _, raw in iterator:
logger.info("#item{}: {}".format(index, raw))
index += 1
iterator = dataset.get_next()
dataset.finish()
dataset.close()
def test_nlp_page_reader():
"""test nlp page reader using shard api"""
reader = ShardSegment()
reader.open(NLP_FILE_NAME + "0")
fields = reader.candidate_fields
logger.info("fields: {}".format(fields))
reader.category_field = "rating"
info = reader.read_category_info()
logger.info("category info: {}".format(info))
img1 = reader.read_at_page_by_id(0, 0, 1)
logger.info("img1 len: {}, img1[0] len: {}, img1[0]: {}".format(len(img1), len(img1[0]), img1[0]))
img2 = reader.read_at_page_by_name("7", 0, 1)
logger.info("img2 len: {}, img2[0] len: {}, img2[0]: {}".format(len(img2), len(img2[0]), img2[0]))
paths = ["{}{}".format(NLP_FILE_NAME, str(x).rjust(1, '0'))
for x in range(FILES_NUM)]
for x in paths:
os.remove("{}".format(x))
os.remove("{}.db".format(x))
def test_cv_file_writer():
"""test cv file reader using shard api"""
img_schema_json = {"file_name": {"type": "string"},
"label": {"type": "number"}}
data = get_data("../data/mindrecord/testImageNetData/")
header = ShardHeader()
img_schema = header.build_schema(img_schema_json, ["data"], "img_schema")
schema_id = header.add_schema(img_schema)
assert schema_id == 0, 'failed on building schema.'
index_fields_list = ["file_name", "label"]
ret = header.add_index_fields(index_fields_list)
assert ret == SUCCESS, 'failed on adding index fields.'
writer = ShardWriter()
paths = ["{}{}".format(CV_FILE_NAME, x) for x in range(FILES_NUM)]
ret = writer.open(paths)
assert ret == SUCCESS, 'failed on opening files.'
writer.set_header_size(1 << 24)
writer.set_page_size(1 << 25)
ret = writer.set_shard_header(header)
assert ret == SUCCESS, 'failed on setting header.'
ret = writer.write_raw_cv_data({schema_id: data})
assert ret == SUCCESS, 'failed on writing raw data.'
ret = writer.commit()
assert ret == SUCCESS, 'failed on committing.'
# ShardIndexGenerator
generator = ShardIndexGenerator(os.path.abspath(paths[0]))
generator.build()
generator.write_to_db()
def test_cv_file_reader():
"""test cv file reader using shard api"""
dataset = ShardReader()
dataset.open(CV_FILE_NAME + "0")
dataset.launch()
index = 0
_, blob_fields = dataset.get_blob_fields()
iterator = dataset.get_next()
while iterator:
for blob, raw in iterator:
raw[blob_fields[0]] = bytes(blob)
logger.info("#item{}: {}".format(index, raw))
index += 1
iterator = dataset.get_next()
dataset.finish()
dataset.close()
def test_cv_page_reader():
"""test cv page reader using shard api"""
reader = ShardSegment()
reader.open(CV_FILE_NAME + "0")
fields = reader.candidate_fields
logger.info("fields: {}".format(fields))
reader.category_field = "label"
info = reader.read_category_info()
logger.info("category info: {}".format(info))
img1 = reader.read_at_page_by_id(0, 0, 1)
logger.info("img1 len: {}, img1[0] len: {}".format(len(img1), len(img1[0])))
img2 = reader.read_at_page_by_name("822", 0, 1)
logger.info("img2 len: {}, img2[0] len: {}".format(len(img2), len(img2[0])))
paths = ["{}{}".format(CV_FILE_NAME, str(x).rjust(1, '0'))
for x in range(FILES_NUM)]
for x in paths:
os.remove("{}".format(x))
os.remove("{}.db".format(x))
def test_mkv_file_writer():
"""test mkv file writer using shard api"""
data = get_mkv_data("../data/mindrecord/testVehPerData/")
schema_json = {"file_name": {"type": "string"}, "id": {"type": "number"},
"prelabel": {"type": "string"}}
header = ShardHeader()
img_schema = header.build_schema(schema_json, ["data"], "img_schema")
schema_id = header.add_schema(img_schema)
assert schema_id == 0, 'failed on building schema.'
index_fields_list = ["id", "file_name"]
ret = header.add_index_fields(index_fields_list)
assert ret == SUCCESS, 'failed on adding index fields.'
writer = ShardWriter()
paths = ["{}{}".format(MKV_FILE_NAME, x) for x in range(FILES_NUM)]
ret = writer.open(paths)
assert ret == SUCCESS, 'failed on opening files.'
writer.set_header_size(1 << 24)
writer.set_page_size(1 << 25)
ret = writer.set_shard_header(header)
assert ret == SUCCESS, 'failed on setting header.'
ret = writer.write_raw_cv_data({schema_id: data})
assert ret == SUCCESS, 'failed on writing raw data.'
ret = writer.commit()
assert ret == SUCCESS, 'failed on committing.'
generator = ShardIndexGenerator(os.path.realpath(paths[0]))
generator.build()
generator.write_to_db()
def test_mkv_page_reader():
"""test mkv page reader using shard api"""
reader = ShardSegment()
reader.open(MKV_FILE_NAME + "0")
fields = reader.candidate_fields
logger.info("fields: {}".format(fields))
reader.category_field = "id"
info = reader.read_category_info()
logger.info("category info: {}".format(info))
img1 = reader.read_at_page_by_id(0, 0, 1)
logger.info("img1 len: {}, img1[0] len: {}, img1[0]: {}".format(len(img1), len(img1[0]), img1[0]))
img2 = reader.read_at_page_by_name("2", 0, 1)
logger.info("img2 len: {}, img2[0] len: {}, img2[0]: {}".format(len(img2), len(img2[0]), img2[0]))
def test_mkv_page_reader_random():
"""test mkv page random reader using shard api"""
reader = ShardSegment()
reader.open(MKV_FILE_NAME + "0")
fields = reader.candidate_fields
logger.info("fields: {}".format(fields))
reader.category_field = "id"
names = random.sample(range(1, 6), 5)
for name in names:
img2 = reader.read_at_page_by_name(str(name), 0, 2)
logger.info("name: {}, img2[0] len: {}".format(str(name), len(img2[0])))
paths = ["{}{}".format(MKV_FILE_NAME, str(x).rjust(1, '0'))
for x in range(FILES_NUM)]
for x in paths:
os.remove("{}".format(x))
os.remove("{}.db".format(x))
def test_mkv_file_writer_with_exactly_schema():
"""test mkv file writer using shard api"""
header = ShardHeader()
img_schema_json = {"annotation_name": {"type": "array",
"items": {"type": "string"}},
"annotation_pose": {"type": "array",
"items": {"type": "string"}},
"annotation_truncated": {"type": "array",
"items": {"type": "string"}},
"annotation_difficult": {"type": "array",
"items": {"type": "string"}},
"annotation_xmin": {"type": "array",
"items": {"type": "number"}},
"annotation_ymin": {"type": "array",
"items": {"type": "number"}},
"annotation_xmax": {"type": "array",
"items": {"type": "number"}},
"annotation_ymax": {"type": "array",
"items": {"type": "number"}},
"metadata_width": {"type": "number"},
"metadata_height": {"type": "number"},
"metadata_depth": {"type": "number"},
"img_path": {"type": "string"},
"annotation_path": {"type": "string"}}
img_schema = header.build_schema(img_schema_json, ["data"], "image_schema")
schema_id = header.add_schema(img_schema)
assert schema_id == 0, 'failed on building schema.'
writer = ShardWriter()
paths = ["{}{}".format(MKV_FILE_NAME, x) for x in range(1)]
ret = writer.open(paths)
assert ret == SUCCESS, 'failed on opening files.'
writer.set_header_size(1 << 24)
writer.set_page_size(1 << 25)
image_bytes = bytes("it's a image picutre", encoding="utf8")
data = []
data.append({"annotation_name": ["xxxxxxxxxx.jpg"],
"annotation_pose": ["hahahahah"],
"annotation_truncated": ["1"], "annotation_difficult": ["0"],
"annotation_xmin": [100], "annotation_ymin": [200],
"annotation_xmax": [300], "annotation_ymax": [400],
"metadata_width": 333, "metadata_height": 222,
"metadata_depth": 3,
"img_path": "/tmp/", "annotation_path": "/tmp/annotation",
"data": image_bytes})
data.append({"annotation_name": ["xxxxxxxxxx.jpg"],
"annotation_pose": ["hahahahah"],
"annotation_truncated": ["1"], "annotation_difficult": ["0"],
"annotation_xmin": [100], "annotation_ymin": [200],
"annotation_xmax": [300], "annotation_ymax": [400],
"metadata_width": 333, "metadata_height": 222,
"metadata_depth": 3,
"img_path": "/tmp/", "annotation_path": "/tmp/annotation",
"data": image_bytes})
ret = writer.set_shard_header(header)
assert ret == SUCCESS, 'failed on setting header.'
ret = writer.write_raw_cv_data({schema_id: data})
assert ret == SUCCESS, 'failed on writing raw data.'
ret = writer.commit()
assert ret == SUCCESS, 'failed on committing.'
generator = ShardIndexGenerator(os.path.realpath(paths[0]))
generator.build()
generator.write_to_db()
def test_mkv_file_reader_with_exactly_schema():
"""test mkv file reader using shard api"""
dataset = ShardReader()
dataset.open(MKV_FILE_NAME + "0")
dataset.launch()
index = 0
_, blob_fields = dataset.get_blob_fields()
iterator = dataset.get_next()
while iterator:
for blob, raw in iterator:
raw[blob_fields[0]] = bytes(blob)
logger.info("#item{}: {}".format(index, raw))
index += 1
iterator = dataset.get_next()
dataset.finish()
dataset.close()
paths = ["{}{}".format(MKV_FILE_NAME, str(x).rjust(1, '0'))
for x in range(1)]
for x in paths:
os.remove("{}".format(x))
os.remove("{}.db".format(x))
|
dbb5e5f7cdef0ef22617f004005a82a53581fd89
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-as/huaweicloudsdkas/v1/model/list_all_scaling_v2_policies_request.py
|
b629d99a08106ac0a0cf301a87c037521715e15a
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 18,933
|
py
|
list_all_scaling_v2_policies_request.py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListAllScalingV2PoliciesRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'scaling_resource_id': 'str',
'scaling_resource_type': 'str',
'scaling_policy_name': 'str',
'scaling_policy_type': 'str',
'scaling_policy_id': 'str',
'start_number': 'int',
'limit': 'int',
'sort_by': 'str',
'order': 'str',
'enterprise_project_id': 'str',
'alarm_id': 'str'
}
attribute_map = {
'scaling_resource_id': 'scaling_resource_id',
'scaling_resource_type': 'scaling_resource_type',
'scaling_policy_name': 'scaling_policy_name',
'scaling_policy_type': 'scaling_policy_type',
'scaling_policy_id': 'scaling_policy_id',
'start_number': 'start_number',
'limit': 'limit',
'sort_by': 'sort_by',
'order': 'order',
'enterprise_project_id': 'enterprise_project_id',
'alarm_id': 'alarm_id'
}
def __init__(self, scaling_resource_id=None, scaling_resource_type=None, scaling_policy_name=None, scaling_policy_type=None, scaling_policy_id=None, start_number=None, limit=None, sort_by=None, order=None, enterprise_project_id=None, alarm_id=None):
"""ListAllScalingV2PoliciesRequest
The model defined in huaweicloud sdk
:param scaling_resource_id: 伸缩组ID。
:type scaling_resource_id: str
:param scaling_resource_type: 伸缩资源类型:伸缩组:SCALING_GROUP;带宽:BANDWIDTH
:type scaling_resource_type: str
:param scaling_policy_name: 伸缩策略名称。
:type scaling_policy_name: str
:param scaling_policy_type: 策略类型。 告警策略:ALARM ,定时策略:SCHEDULED, 周期策略:RECURRENCE
:type scaling_policy_type: str
:param scaling_policy_id: 伸缩策略ID。
:type scaling_policy_id: str
:param start_number: 查询的起始行号,默认为0。
:type start_number: int
:param limit: 查询记录数,默认20,最大100。
:type limit: int
:param sort_by: 排序方法POLICY_NAME:根据策略名称排序;TRIGGER_CONDITION:根据触发条件排序,如升序下,告警策略最先,其余根据最近一次触发时间升序排列;CREATE_TIME:根据策略的创建时间排序。
:type sort_by: str
:param order: 排序顺序ASC:升序;DESC:降序
:type order: str
:param enterprise_project_id: 企业项目ID。当scaling_resource_type指定为:SCALING_GROUP传入all_granted_eps时:华为云帐号和拥有全局权限的IAM用户可以查询该用户所有的伸缩组对应的伸缩策略。授予部分企业项目的IAM用户,可以查询该用户所有授权企业项目下的伸缩组对应的伸缩策略。说明:如果授予部分企业项目的IAM用户拥有超过100个企业项目,则只能返回有权限的前100个企业项目对应伸缩组的伸缩策略列表。当scaling_resource_type指定为:BANDWIDTH传入all_granted_eps时:华为云帐号和拥有全局权限的IAM用户可以查询该用户所有带宽对应的伸缩策略。授予部分企业项目的IAM用户,可以查询该用户所有授权企业项目下的带宽对应的伸缩策略,带宽在all_granted_eps场景下返回策略请参见[《EIP接口参口》查询带宽列表](https://support.huaweicloud.com/api-eip/eip_apiBandwidth_0002.html)。不指定scaling_resource_type当传入all_granted_eps时:华为云帐号和拥有全局权限的IAM用户可以查询该用户所有的伸缩组和带宽对应的伸缩策略。授予部分企业项目的IAM用户,可以查询该用户所有授权企业项目下的伸缩组和带宽对应的伸缩策略。说明:如果授予部分企业项目的IAM用户拥有超过100个企业项目,则只能返回有权限的前100个企业项目对应伸缩组的伸缩策略列表;带宽在all_granted_eps场景下返回策略请参见[《EIP接口参口》查询带宽列表](https://support.huaweicloud.com/api-eip/eip_apiBandwidth_0002.html)。
:type enterprise_project_id: str
:param alarm_id: 告警ID,即告警规则的ID。
:type alarm_id: str
"""
self._scaling_resource_id = None
self._scaling_resource_type = None
self._scaling_policy_name = None
self._scaling_policy_type = None
self._scaling_policy_id = None
self._start_number = None
self._limit = None
self._sort_by = None
self._order = None
self._enterprise_project_id = None
self._alarm_id = None
self.discriminator = None
if scaling_resource_id is not None:
self.scaling_resource_id = scaling_resource_id
if scaling_resource_type is not None:
self.scaling_resource_type = scaling_resource_type
if scaling_policy_name is not None:
self.scaling_policy_name = scaling_policy_name
if scaling_policy_type is not None:
self.scaling_policy_type = scaling_policy_type
if scaling_policy_id is not None:
self.scaling_policy_id = scaling_policy_id
if start_number is not None:
self.start_number = start_number
if limit is not None:
self.limit = limit
if sort_by is not None:
self.sort_by = sort_by
if order is not None:
self.order = order
if enterprise_project_id is not None:
self.enterprise_project_id = enterprise_project_id
if alarm_id is not None:
self.alarm_id = alarm_id
@property
def scaling_resource_id(self):
"""Gets the scaling_resource_id of this ListAllScalingV2PoliciesRequest.
伸缩组ID。
:return: The scaling_resource_id of this ListAllScalingV2PoliciesRequest.
:rtype: str
"""
return self._scaling_resource_id
@scaling_resource_id.setter
def scaling_resource_id(self, scaling_resource_id):
"""Sets the scaling_resource_id of this ListAllScalingV2PoliciesRequest.
伸缩组ID。
:param scaling_resource_id: The scaling_resource_id of this ListAllScalingV2PoliciesRequest.
:type scaling_resource_id: str
"""
self._scaling_resource_id = scaling_resource_id
@property
def scaling_resource_type(self):
"""Gets the scaling_resource_type of this ListAllScalingV2PoliciesRequest.
伸缩资源类型:伸缩组:SCALING_GROUP;带宽:BANDWIDTH
:return: The scaling_resource_type of this ListAllScalingV2PoliciesRequest.
:rtype: str
"""
return self._scaling_resource_type
@scaling_resource_type.setter
def scaling_resource_type(self, scaling_resource_type):
"""Sets the scaling_resource_type of this ListAllScalingV2PoliciesRequest.
伸缩资源类型:伸缩组:SCALING_GROUP;带宽:BANDWIDTH
:param scaling_resource_type: The scaling_resource_type of this ListAllScalingV2PoliciesRequest.
:type scaling_resource_type: str
"""
self._scaling_resource_type = scaling_resource_type
@property
def scaling_policy_name(self):
"""Gets the scaling_policy_name of this ListAllScalingV2PoliciesRequest.
伸缩策略名称。
:return: The scaling_policy_name of this ListAllScalingV2PoliciesRequest.
:rtype: str
"""
return self._scaling_policy_name
@scaling_policy_name.setter
def scaling_policy_name(self, scaling_policy_name):
"""Sets the scaling_policy_name of this ListAllScalingV2PoliciesRequest.
伸缩策略名称。
:param scaling_policy_name: The scaling_policy_name of this ListAllScalingV2PoliciesRequest.
:type scaling_policy_name: str
"""
self._scaling_policy_name = scaling_policy_name
@property
def scaling_policy_type(self):
"""Gets the scaling_policy_type of this ListAllScalingV2PoliciesRequest.
策略类型。 告警策略:ALARM ,定时策略:SCHEDULED, 周期策略:RECURRENCE
:return: The scaling_policy_type of this ListAllScalingV2PoliciesRequest.
:rtype: str
"""
return self._scaling_policy_type
@scaling_policy_type.setter
def scaling_policy_type(self, scaling_policy_type):
"""Sets the scaling_policy_type of this ListAllScalingV2PoliciesRequest.
策略类型。 告警策略:ALARM ,定时策略:SCHEDULED, 周期策略:RECURRENCE
:param scaling_policy_type: The scaling_policy_type of this ListAllScalingV2PoliciesRequest.
:type scaling_policy_type: str
"""
self._scaling_policy_type = scaling_policy_type
@property
def scaling_policy_id(self):
"""Gets the scaling_policy_id of this ListAllScalingV2PoliciesRequest.
伸缩策略ID。
:return: The scaling_policy_id of this ListAllScalingV2PoliciesRequest.
:rtype: str
"""
return self._scaling_policy_id
@scaling_policy_id.setter
def scaling_policy_id(self, scaling_policy_id):
"""Sets the scaling_policy_id of this ListAllScalingV2PoliciesRequest.
伸缩策略ID。
:param scaling_policy_id: The scaling_policy_id of this ListAllScalingV2PoliciesRequest.
:type scaling_policy_id: str
"""
self._scaling_policy_id = scaling_policy_id
@property
def start_number(self):
"""Gets the start_number of this ListAllScalingV2PoliciesRequest.
查询的起始行号,默认为0。
:return: The start_number of this ListAllScalingV2PoliciesRequest.
:rtype: int
"""
return self._start_number
@start_number.setter
def start_number(self, start_number):
"""Sets the start_number of this ListAllScalingV2PoliciesRequest.
查询的起始行号,默认为0。
:param start_number: The start_number of this ListAllScalingV2PoliciesRequest.
:type start_number: int
"""
self._start_number = start_number
@property
def limit(self):
"""Gets the limit of this ListAllScalingV2PoliciesRequest.
查询记录数,默认20,最大100。
:return: The limit of this ListAllScalingV2PoliciesRequest.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ListAllScalingV2PoliciesRequest.
查询记录数,默认20,最大100。
:param limit: The limit of this ListAllScalingV2PoliciesRequest.
:type limit: int
"""
self._limit = limit
@property
def sort_by(self):
"""Gets the sort_by of this ListAllScalingV2PoliciesRequest.
排序方法POLICY_NAME:根据策略名称排序;TRIGGER_CONDITION:根据触发条件排序,如升序下,告警策略最先,其余根据最近一次触发时间升序排列;CREATE_TIME:根据策略的创建时间排序。
:return: The sort_by of this ListAllScalingV2PoliciesRequest.
:rtype: str
"""
return self._sort_by
@sort_by.setter
def sort_by(self, sort_by):
"""Sets the sort_by of this ListAllScalingV2PoliciesRequest.
排序方法POLICY_NAME:根据策略名称排序;TRIGGER_CONDITION:根据触发条件排序,如升序下,告警策略最先,其余根据最近一次触发时间升序排列;CREATE_TIME:根据策略的创建时间排序。
:param sort_by: The sort_by of this ListAllScalingV2PoliciesRequest.
:type sort_by: str
"""
self._sort_by = sort_by
@property
def order(self):
"""Gets the order of this ListAllScalingV2PoliciesRequest.
排序顺序ASC:升序;DESC:降序
:return: The order of this ListAllScalingV2PoliciesRequest.
:rtype: str
"""
return self._order
@order.setter
def order(self, order):
"""Sets the order of this ListAllScalingV2PoliciesRequest.
排序顺序ASC:升序;DESC:降序
:param order: The order of this ListAllScalingV2PoliciesRequest.
:type order: str
"""
self._order = order
@property
def enterprise_project_id(self):
"""Gets the enterprise_project_id of this ListAllScalingV2PoliciesRequest.
企业项目ID。当scaling_resource_type指定为:SCALING_GROUP传入all_granted_eps时:华为云帐号和拥有全局权限的IAM用户可以查询该用户所有的伸缩组对应的伸缩策略。授予部分企业项目的IAM用户,可以查询该用户所有授权企业项目下的伸缩组对应的伸缩策略。说明:如果授予部分企业项目的IAM用户拥有超过100个企业项目,则只能返回有权限的前100个企业项目对应伸缩组的伸缩策略列表。当scaling_resource_type指定为:BANDWIDTH传入all_granted_eps时:华为云帐号和拥有全局权限的IAM用户可以查询该用户所有带宽对应的伸缩策略。授予部分企业项目的IAM用户,可以查询该用户所有授权企业项目下的带宽对应的伸缩策略,带宽在all_granted_eps场景下返回策略请参见[《EIP接口参口》查询带宽列表](https://support.huaweicloud.com/api-eip/eip_apiBandwidth_0002.html)。不指定scaling_resource_type当传入all_granted_eps时:华为云帐号和拥有全局权限的IAM用户可以查询该用户所有的伸缩组和带宽对应的伸缩策略。授予部分企业项目的IAM用户,可以查询该用户所有授权企业项目下的伸缩组和带宽对应的伸缩策略。说明:如果授予部分企业项目的IAM用户拥有超过100个企业项目,则只能返回有权限的前100个企业项目对应伸缩组的伸缩策略列表;带宽在all_granted_eps场景下返回策略请参见[《EIP接口参口》查询带宽列表](https://support.huaweicloud.com/api-eip/eip_apiBandwidth_0002.html)。
:return: The enterprise_project_id of this ListAllScalingV2PoliciesRequest.
:rtype: str
"""
return self._enterprise_project_id
@enterprise_project_id.setter
def enterprise_project_id(self, enterprise_project_id):
"""Sets the enterprise_project_id of this ListAllScalingV2PoliciesRequest.
企业项目ID。当scaling_resource_type指定为:SCALING_GROUP传入all_granted_eps时:华为云帐号和拥有全局权限的IAM用户可以查询该用户所有的伸缩组对应的伸缩策略。授予部分企业项目的IAM用户,可以查询该用户所有授权企业项目下的伸缩组对应的伸缩策略。说明:如果授予部分企业项目的IAM用户拥有超过100个企业项目,则只能返回有权限的前100个企业项目对应伸缩组的伸缩策略列表。当scaling_resource_type指定为:BANDWIDTH传入all_granted_eps时:华为云帐号和拥有全局权限的IAM用户可以查询该用户所有带宽对应的伸缩策略。授予部分企业项目的IAM用户,可以查询该用户所有授权企业项目下的带宽对应的伸缩策略,带宽在all_granted_eps场景下返回策略请参见[《EIP接口参口》查询带宽列表](https://support.huaweicloud.com/api-eip/eip_apiBandwidth_0002.html)。不指定scaling_resource_type当传入all_granted_eps时:华为云帐号和拥有全局权限的IAM用户可以查询该用户所有的伸缩组和带宽对应的伸缩策略。授予部分企业项目的IAM用户,可以查询该用户所有授权企业项目下的伸缩组和带宽对应的伸缩策略。说明:如果授予部分企业项目的IAM用户拥有超过100个企业项目,则只能返回有权限的前100个企业项目对应伸缩组的伸缩策略列表;带宽在all_granted_eps场景下返回策略请参见[《EIP接口参口》查询带宽列表](https://support.huaweicloud.com/api-eip/eip_apiBandwidth_0002.html)。
:param enterprise_project_id: The enterprise_project_id of this ListAllScalingV2PoliciesRequest.
:type enterprise_project_id: str
"""
self._enterprise_project_id = enterprise_project_id
@property
def alarm_id(self):
"""Gets the alarm_id of this ListAllScalingV2PoliciesRequest.
告警ID,即告警规则的ID。
:return: The alarm_id of this ListAllScalingV2PoliciesRequest.
:rtype: str
"""
return self._alarm_id
@alarm_id.setter
def alarm_id(self, alarm_id):
"""Sets the alarm_id of this ListAllScalingV2PoliciesRequest.
告警ID,即告警规则的ID。
:param alarm_id: The alarm_id of this ListAllScalingV2PoliciesRequest.
:type alarm_id: str
"""
self._alarm_id = alarm_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListAllScalingV2PoliciesRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
c3c825891d6c53e99e15f953af274b30b66da236
|
702347798650c4f125fcd5291a52e51e9d20c63c
|
/learn2learn/text/datasets/__init__.py
|
2f7f4a12ae26841128c1d281fce651932f3cb713
|
[
"MIT"
] |
permissive
|
learnables/learn2learn
|
8c8165297d9567450eb83f0591014de1da5f7363
|
db5cee3795490071282bbdfa81179ef732349196
|
refs/heads/master
| 2023-09-03T07:05:45.651636
| 2023-07-03T18:50:46
| 2023-07-03T18:50:46
| 201,314,421
| 2,397
| 352
|
MIT
| 2023-07-03T18:50:47
| 2019-08-08T18:22:41
|
Python
|
UTF-8
|
Python
| false
| false
| 76
|
py
|
__init__.py
|
#!/usr/bin/env python3
from .news_classification import NewsClassification
|
0b00ff05403ea246489b5cf9812bb8934a5cd0e3
|
95b4a15808b9c412c8364db80fd619a65dd587e0
|
/src/compas/artists/exceptions.py
|
8b21c798c028e2fff8786b9e532ce54d605f2285
|
[
"MIT"
] |
permissive
|
compas-dev/compas
|
11d5c4d9afd554833297b4a5dbe6a975e6940ce3
|
486e2e9332553240bcbd80e100d26bff58071709
|
refs/heads/main
| 2023-08-31T15:49:32.430570
| 2023-08-17T10:19:52
| 2023-08-17T10:19:52
| 104,857,648
| 286
| 116
|
MIT
| 2023-09-12T13:53:36
| 2017-09-26T08:28:01
|
Python
|
UTF-8
|
Python
| false
| false
| 784
|
py
|
exceptions.py
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
class DataArtistNotRegistered(Exception):
"""Exception that is raised when no artist is registered for a given data type."""
class NoArtistContextError(Exception):
"""Exception that is raised when no artist context is assigned is registered for a given data type."""
def __init__(self):
error_message = "No context defined."
error_message += "\n\nThis usually means that the script that you are running requires"
error_message += "\na CAD environment but it is being ran as a standalone script"
error_message += "\n(ie. from the command line or code editor)."
super(NoArtistContextError, self).__init__(error_message)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.